python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth'
pretrained = 'pretrained/mae_pretrain_vit_base.pth'
model = dict(
backbone=dict(
_delete_=True,
type='ViTAdapter',
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.2,
conv_inplane=64,
n_points=4,
deform_num_heads=12,
cffn_ratio=0.25,
deform_ratio=0.5,
use_extra_extractor=False,
layer_scale=False,
interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]],
window_attn=[True, True, False, True, True, False,
True, True, False, True, True, False],
window_size=[14, 14, None, 14, 14, None,
14, 14, None, 14, 14, None],
pretrained=pretrained),
neck=dict(
type='FPN',
in_channels=[768, 768, 768, 768],
out_channels=256,
num_outs=5,
norm_cfg=dict(type='MMSyncBN', requires_grad=True)),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(type='Shared4Conv1FCBBoxHead',
norm_cfg=dict(type='MMSyncBN', requires_grad=True)),
mask_head=dict(norm_cfg=dict(type='MMSyncBN', requires_grad=True)),
))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(1024, 1024),
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(1024, 1024),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(1024, 1024)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
lr_config = dict(
_delete_=True,
policy='CosineAnnealing',
min_lr_ratio=0.01,
warmup='linear',
warmup_iters=2000,
warmup_ratio=0.001)
runner = dict(type='EpochBasedRunner', max_epochs=50)
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'level_embed': dict(decay_mult=0.),
'pos_embed': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'bias': dict(decay_mult=0.)
}))
optimizer_config = dict(grad_clip=None)
fp16 = dict(loss_scale=dict(init_scale=512))
checkpoint_config = dict(
interval=1,
max_keep_ckpts=3,
save_last=True,
)
|
ViT-Adapter-main
|
detection/configs/upgraded_mask_rcnn/mask_rcnn_mae_adapter_base_lsj_fpn_50ep_coco.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
drop_path_rate = 0.4
model = dict(
type='HybridTaskCascadeAug',
backbone=dict(
type='ViTAdapter',
img_size=384,
pretrain_size=384,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
drop_path_rate=drop_path_rate,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
with_cp=True,
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
window_attn=[True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False],
window_size=[14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None],
pretrained=None),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=drop_path_rate,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
aug=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=1000,
scale_ranges=[['l'], ['l'], ['m', 'l'],
['s', 'm'], ['s', 'm'], ['s', 'm']],
)
))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=[(1600, 600), (1600, 800), (1600, 1000),
(1600, 1200), (1600, 1400), (1600, 1600)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_augreg_adapter_large_fpn_3x_coco_ms.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth'
pretrained = 'pretrained/beitv2_large_patch16_224_pt1k_ft21k.pth'
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.4,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=pretrained),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=0.3,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
# find_unused_parameters=True
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_3x_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
NUM_CLASSES = 80
drop_path_rate = 0.3 # 0.4 (pre-train) -> 0.3 (fine-tune)
# https://github.com/czczup/ViT-Adapter/releases/download/v0.3.1/htc++_beitv2_adapter_large_fpn_o365.pth
load_from = 'pretrained/htc++_beitv2_adapter_large_fpn_o365.pth'
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=drop_path_rate,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
with_cp=True,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=None),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
with_cp=True,
ffn_ratio=4.0,
drop_path=drop_path_rate,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
norm_cfg=dict(type='GN', num_groups=32),
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(2000, 600), (2000, 1600)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(2000, 1000),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# 4 nodes, total batch size=2*32=64
data = dict(samples_per_gpu=2,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.80))
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[])
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=20000)
checkpoint_config = dict(interval=500, max_keep_ckpts=3)
evaluation = dict(interval=500, save_best='auto')
custom_hooks = [
dict(
type='ExpMomentumEMAHook',
resume_from=None,
momentum=0.0001,
priority=49)
]
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_o365_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
NUM_CLASSES = 80
drop_path_rate = 0.3 # 0.4 (pre-train) -> 0.3 (fine-tune)
model = dict(
type='HybridTaskCascadeAug',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=drop_path_rate,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
with_cp=True,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=None),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
with_cp=True,
ffn_ratio=4.0,
drop_path=drop_path_rate,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
norm_cfg=dict(type='GN', num_groups=32),
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=NUM_CLASSES,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), # use GIoU loss
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
aug=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=1000,
scale_ranges=[['l'], ['l'], ['m', 'l'],
['s', 'm'], ['s', 'm'], ['s', 'm']],
)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(2000, 600), (2000, 1600)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=[(3000, 600), (3000, 800), (3000, 1000),
(3000, 1200), (3000, 1400), (3000, 1600)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# 4 nodes, total batch size=2*32=64
data = dict(samples_per_gpu=2,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.80))
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[])
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=20000)
checkpoint_config = dict(interval=500, max_keep_ckpts=3)
evaluation = dict(interval=500, save_best='auto')
custom_hooks = [
dict(
type='ExpMomentumEMAHook',
resume_from=None,
momentum=0.0001,
priority=49)
]
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_o365_coco_ms.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
model = dict(
type='HybridTaskCascadeAug',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.4,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=None),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=0.3,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
aug=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=1000,
scale_ranges=[['l'], ['l'], ['m', 'l'],
['s', 'm'], ['s', 'm'], ['s', 'm']],
)
))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=[(3000, 600), (3000, 800), (3000, 1000),
(3000, 1200), (3000, 1400), (3000, 1600)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco_ms.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth'
pretrained = 'pretrained/beit_large_patch16_224_pt22k_ft22k.pth'
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.3, # maybe 0.4 is better
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=pretrained,
version='old'),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=0.3,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
# find_unused_parameters=True
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco_old.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz'
# pretrained = 'https://github.com/czczup/ViT-Adapter/releases/download/v0.1.6/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth'
pretrained = 'pretrained/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.pth'
drop_path_rate = 0.4
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='ViTAdapter',
img_size=384,
pretrain_size=384,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
drop_path_rate=drop_path_rate,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
with_cp=True,
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
window_attn=[True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False],
window_size=[14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None],
pretrained=pretrained),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=drop_path_rate,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=300,
mask_thr_binary=0.5)))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
# find_unused_parameters=True
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_augreg_adapter_large_fpn_3x_coco.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
model = dict(
type='HybridTaskCascadeAug',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.4,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=None),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=0.3,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
aug=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=1000,
scale_ranges=[['l'], ['l'], ['m', 'l'],
['s', 'm'], ['s', 'm'], ['s', 'm']],
)
))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=[(3000, 600), (3000, 800), (3000, 1000),
(3000, 1200), (3000, 1400), (3000, 1600)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beitv2_adapter_large_fpn_3x_coco_ms.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth'
pretrained = 'pretrained/beit_large_patch16_224_pt22k_ft22k.pth'
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='BEiTAdapter',
img_size=224,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.4,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
window_attn=[True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True,
True, True, True, True, True, True],
window_size=[14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56,
14, 14, 14, 14, 14, 56],
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
pretrained=pretrained),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=0.3,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
# fp16 = dict(loss_scale=dict(init_scale=512))
# find_unused_parameters=True
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_beit_adapter_large_fpn_3x_coco.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
pretrained = 'pretrained/uni-perceiver-large-L24-H1024-224size-pretrained_converted.pth'
drop_path_rate = 0.4
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='UniPerceiverAdapter',
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
drop_path_rate=drop_path_rate,
conv_inplane=64,
n_points=4,
deform_num_heads=16,
cffn_ratio=0.25,
deform_ratio=0.5,
with_cp=False,
interaction_indexes=[[0, 5], [6, 11], [12, 17], [18, 23]],
window_attn=[True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False,
True, True, True, True, True, False],
window_size=[14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None,
14, 14, 14, 14, 14, None],
pretrained=pretrained),
neck=[
dict(
type='ExtraAttention',
in_channels=[1024, 1024, 1024, 1024],
num_head=32,
with_ffn=True,
ffn_ratio=4.0,
drop_path=drop_path_rate,
),
dict(
type='FPN',
in_channels=[1024, 1024, 1024, 1024],
out_channels=256,
num_outs=5)],
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
# file_client_args = dict(backend='petrel')
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(samples_per_gpu=1,
train=dict(seg_prefix='data/coco/stuffthingmaps/train2017/',
pipeline=train_pipeline),
test=dict(pipeline=test_pipeline),
val=dict(pipeline=test_pipeline))
optimizer = dict(_delete_=True,
type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.90))
optimizer_config = dict(grad_clip=None)
checkpoint_config = dict(
interval=1,
max_keep_ckpts=2,
save_last=True,
)
evaluation = dict(save_best='auto')
# fp16 = dict(loss_scale=dict(init_scale=512))
|
ViT-Adapter-main
|
detection/configs/htc++/htc++_uniperceiver_adapter_large_fpn_3x_coco.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'
pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth'
model = dict(
type='ATSS',
backbone=dict(
type='ViTAdapter',
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
drop_path_rate=0.2,
conv_inplane=64,
n_points=4,
deform_num_heads=6,
cffn_ratio=0.25,
deform_ratio=1.0,
interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]],
window_attn=[True, True, False, True, True, False,
True, True, False, True, True, False],
window_size=[14, 14, None, 14, 14, None,
14, 14, None, 14, 14, None],
pretrained=pretrained),
neck=dict(
type='FPN',
in_channels=[384, 384, 384, 384],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(1024, 1024),
allow_negative_crop=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'level_embed': dict(decay_mult=0.),
'pos_embed': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'bias': dict(decay_mult=0.)
}))
optimizer_config = dict(grad_clip=None)
fp16 = dict(loss_scale=dict(init_scale=512))
find_unused_parameters = True
checkpoint_config = dict(
interval=1,
max_keep_ckpts=3,
save_last=True,
)
|
ViT-Adapter-main
|
detection/configs/atss/atss_deit_adapter_small_fpn_3x_coco.py
|
# Copyright (c) Shanghai AI Lab. All rights reserved.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_3x.py',
'../_base_/default_runtime.py'
]
# pretrained = 'https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'
pretrained = 'pretrained/deit_small_patch16_224-cd65a155.pth'
model = dict(
type='GFL',
backbone=dict(
type='ViTAdapter',
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
drop_path_rate=0.2,
conv_inplane=64,
n_points=4,
deform_num_heads=6,
cffn_ratio=0.25,
deform_ratio=1.0,
interaction_indexes=[[0, 2], [3, 5], [6, 8], [9, 11]],
window_attn=[True, True, False, True, True, False,
True, True, False, True, True, False],
window_size=[14, 14, None, 14, 14, None,
14, 14, None, 14, 14, None],
pretrained=pretrained),
neck=dict(
type='FPN',
in_channels=[384, 384, 384, 384],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(1024, 1024),
allow_negative_crop=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'level_embed': dict(decay_mult=0.),
'pos_embed': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'bias': dict(decay_mult=0.)
}))
optimizer_config = dict(grad_clip=None)
fp16 = dict(loss_scale=dict(init_scale=512))
find_unused_parameters = True
checkpoint_config = dict(
interval=1,
max_keep_ckpts=3,
save_last=True,
)
|
ViT-Adapter-main
|
detection/configs/gfl/gfl_deit_adapter_small_fpn_3x_coco.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import torch
from functions.ms_deform_attn_func import (MSDeformAttnFunction,
ms_deform_attn_core_pytorch)
from torch.autograd import gradcheck
N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros(
(1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H * W).item() for H, W in shapes])
torch.manual_seed(3)
@torch.no_grad()
def check_forward_equal_with_pytorch_double():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1,
keepdim=True).sum(-2,
keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(
value.double(), shapes, sampling_locations.double(),
attention_weights.double()).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value.double(), shapes,
level_start_index,
sampling_locations.double(),
attention_weights.double(),
im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() /
output_pytorch.abs()).max()
print(
f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}'
)
@torch.no_grad()
def check_forward_equal_with_pytorch_float():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1,
keepdim=True).sum(-2,
keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(
value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index,
sampling_locations,
attention_weights,
im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() /
output_pytorch.abs()).max()
print(
f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}'
)
def check_gradient_numerical(channels=4,
grad_value=True,
grad_sampling_loc=True,
grad_attn_weight=True):
value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1,
keepdim=True).sum(-2,
keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(
func,
(value.double(), shapes, level_start_index,
sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})')
if __name__ == '__main__':
check_forward_equal_with_pytorch_double()
check_forward_equal_with_pytorch_float()
for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
check_gradient_numerical(channels, True, True, True)
|
ViT-Adapter-main
|
detection/ops/test.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
import glob
import os
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
requirements = ['torch', 'torchvision']
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = [
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
else:
raise NotImplementedError('Cuda is not availabel')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
'MultiScaleDeformableAttention',
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name='MultiScaleDeformableAttention',
version='1.0',
author='Weijie Su',
url='https://github.com/fundamentalvision/Deformable-DETR',
description=
'PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention',
packages=find_packages(exclude=(
'configs',
'tests',
)),
ext_modules=get_extensions(),
cmdclass={'build_ext': torch.utils.cpp_extension.BuildExtension},
)
|
ViT-Adapter-main
|
detection/ops/setup.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import MultiScaleDeformableAttention as MSDA
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.cuda.amp import custom_bwd, custom_fwd
class MSDeformAttnFunction(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, value, value_spatial_shapes, value_level_start_index,
sampling_locations, attention_weights, im2col_step):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(value, value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
ctx.im2col_step)
ctx.save_for_backward(value, value_spatial_shapes,
value_level_start_index, sampling_locations,
attention_weights)
return output
@staticmethod
@once_differentiable
@custom_bwd
def backward(ctx, grad_output):
value, value_spatial_shapes, value_level_start_index, \
sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
MSDA.ms_deform_attn_backward(
value, value_spatial_shapes, value_level_start_index,
sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(value, value_spatial_shapes,
sampling_locations, attention_weights):
# for debug and test only,
# need to use cuda version instead
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_ * M_, D_, H_, W_)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_, mode='bilinear',
padding_mode='zeros', align_corners=False)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_ * M_, 1, Lq_, L_ * P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) *
attention_weights).sum(-1).view(N_, M_ * D_, Lq_)
return output.transpose(1, 2).contiguous()
|
ViT-Adapter-main
|
detection/ops/functions/ms_deform_attn_func.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from .ms_deform_attn_func import MSDeformAttnFunction
__all__ = ['MSDeformAttnFunction']
|
ViT-Adapter-main
|
detection/ops/functions/__init__.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import math
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import constant_, xavier_uniform_
from ..functions import MSDeformAttnFunction
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError('invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n)))
return (n & (n - 1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4, ratio=1.0):
"""Multi-Scale Deformable Attention Module.
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, '
'but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2
# which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn(
"You'd better set d_model in MSDeformAttn to make "
'the dimension of each attention head a power of 2 '
'which is more efficient in our CUDA implementation.')
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.ratio = ratio
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, int(d_model * ratio))
self.output_proj = nn.Linear(int(d_model * ratio), d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(
self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(
self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes,
input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] *
input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads,
int(self.ratio * self.d_model) // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(
N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(
N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).\
view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack(
[input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'
.format(reference_points.shape[-1]))
output = MSDeformAttnFunction.apply(value, input_spatial_shapes, input_level_start_index,
sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return output
|
ViT-Adapter-main
|
detection/ops/modules/ms_deform_attn.py
|
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from .ms_deform_attn import MSDeformAttn
__all__ = ['MSDeformAttn']
|
ViT-Adapter-main
|
detection/ops/modules/__init__.py
|
import torch
from qwen.model import QwenVL
#usage
img = torch.randn(1, 3, 256, 256)
caption = torch.randint(0, 20000, (1, 1024))
model = QwenVL()
output = model(img, caption)
print(output.shape)
|
Qwen-VL-main
|
example.py
|
from qwen.inference import QwenVLChat
qwen_chat = QwenVLChat(model_name="Qwen/Qwen-VL-Chat", device_map="cuda")
response = qwen_chat.chat([
{"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"},
{"text": "这是什么?"}
])
print(response)
response = qwen_chat.chat("框出图中击掌的位置")
print(response)
image = qwen_chat.draw_bbox_on_latest_picture(response)
if image:
image.save("1.jpg")
else:
print("no box")
# For Qwen-VL
qwen_vl = QwenVLChat(model_name="Qwen/Qwen-VL", device_map="cuda")
response = qwen_vl.chat([
{"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"},
{"text": "Generate the caption in English with grounding:"}
])
print(response)
image = qwen_vl.draw_bbox_on_latest_picture(response)
if image:
image.save("2.jpg")
else:
print("no box")
|
Qwen-VL-main
|
inference.py
|
from qwen.model import QwenVL, QwenVLTokenizer
from qwen.train import CFG, Train
from qwen.inference import QwenVLChat
|
Qwen-VL-main
|
qwen/__init__.py
|
import torch
import torch.nn as nn
from transformers import AutoTokenizer, CLIPProcessor
from qwen.transformer import (
Decoder,
Encoder,
Transformer,
ViTransformerWrapper,
AutoregressiveWrapper
)
class QwenVLTokenizer:
def __init__(self):
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=["<img>", "</img>"],
eos_token ="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids(["<img>", "</img>"])
except Exception as e:
print(f"Error init tokenizer: {e}")
def tokenize_texts(self, texts):
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize_images(self, images):
try:
tokenized_images = self.processor(images=images, return_tensors="pt").pixel_values
print(f"Tokenized image: {tokenized_images.shape}")
return tokenized_images
except Exception as e:
print(f"Error tokenizing texts: {e}")
def tokenize(self, sample):
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["img"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
print(f"Error during tokenization {e}")
class QwenVL(torch.nn.Module):
def __init__(self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True):
super(QwenVL, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
self.decoder = AutoregressiveWrapper(self.decoder)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
|
Qwen-VL-main
|
qwen/model.py
|
from functools import partial
from typing import Optional
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates
|
Qwen-VL-main
|
qwen/attend.py
|
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1
|
Qwen-VL-main
|
qwen/utils.py
|
import math
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
# constants
from math import ceil
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import pack, rearrange, reduce, repeat, unpack
from torch import Tensor, einsum, nn
from qwen.attend import Attend, Intermediates
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
scale_fn = lambda t: t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
scale = self.qk_norm_scale
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
mask_value = max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
is_last_layer = ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
is_last = ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
|
Qwen-VL-main
|
qwen/transformer.py
|
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
########### SETUP CONFIG
import torch.distributed as dist
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.state import AcceleratorState
from accelerate.utils import DummyOptim, InitProcessGroupKwargs
from datasets import load_dataset
from lion_pytorch import Lion
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl,
apply_activation_checkpointing,
checkpoint_wrapper,
)
# import bitsandbytes as bnb
from torch.distributed.fsdp import (
BackwardPrefetch,
FullyShardedDataParallel,
MixedPrecision,
ShardingStrategy,
)
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.nn import LayerNorm
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoTokenizer,
default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup,
set_seed,
)
from qwen.model import QwenVL
from qwen.transformer import Transformer
from qwen.utils import StableAdamWUnfused
# state = AcceleratorState()
logger = get_logger(__name__, log_level="INFO")
class CFG:
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "Andromeda"
LOGGING_STEPS: int = 100
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, Transformer)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
Andromeda_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Transformer,
},
)
else:
Andromeda_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=Andromeda_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
try:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
except KeyError:
# print(f"Parameter {param_name} does not exist in the model")
pass
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
state = AcceleratorState()
state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #??????
accelerator.init_trackers(
project_name="Andromeda",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
# init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
model = QwenVL()
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
model = accelerator.prepare(model)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='lion',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
# if False: # if CFG.USE_DEEPSPEED:
# lr_scheduler = DummyScheduler(
# optim,
# total_num_steps=max_train_steps * accelerator.num_processes,
# warmup_num_steps=NUM_WARMUP_STEPS
# )
# else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
#logging every CFG.LOGGING STEPS
if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0:
logger.info(
f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}"
)
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def train():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
if __name__ == '__main__':
train()
|
Qwen-VL-main
|
qwen/train.py
|
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import torch
class QwenVLChat:
def __init__(self,
model_name,
device_map="cuda",
trust_remote_code=True,
bf16=False,
fp16=False,
cpu=False,
seed=1234):
torch.manual_seed(seed)
self.tokenizer = AutoTokenizer.from_pretrained(model_name,
trust_remote_code=trust_remote_code)
if bf16:
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map,
trust_remote_code=trust_remote_code, bf16=True).eval()
elif fp16:
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map,
trust_remote_code=trust_remote_code, fp16=True).eval()
elif cpu:
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu", trust_remote_code=trust_remote_code).eval()
else:
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map, trust_remote_code=trust_remote_code).eval()
self.model.generation_config = GenerationConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
self.history = None
def chat(self, text):
query = self.tokenizer.from_list_format(text)
response, self.history = self.model.chat(self.tokenizer, query=query, history=self.history)
return response
def draw_box(self, response):
image = self.tokenizer.draw_bbox_on_latest_picture(response, self.history)
return image
|
Qwen-VL-main
|
qwen/inference.py
|
import torch
from cm3.model import CM3
#usage
img = torch.randn(1, 3, 256, 256)
caption = torch.randint(0, 20000, (1, 1024))
model = CM3()
output = model(img, caption)
print(output.shape) # (1, 1024, 20000)
|
CM3Leon-main
|
example.py
|
from cm3.model import CM3Tokenizer, CM3
|
CM3Leon-main
|
cm3/__init__.py
|
import logging
import torch
from torch import nn
from torch.nn import Module
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import AutoTokenizer, CLIPProcessor
from zeta.nn.architecture.transformer import Decoder, Encoder, Transformer, ViTransformerWrapper
from zeta.nn.architecture.auto_regressive_wrapper import AutoregressiveWrapper
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
# Implement classes with type hints and error handling
class CM3Tokenizer:
"""
A tokenizer class for the CM3LEON model
Attributes:
processor(CLIPProcessor): The processor to tokenize images
tokenizer: (AutoTokenizer): The tokenizer to tokenize text
im_idx: (int): The Index of the "" token.
break_idx (int): The index of the "<break>" token.
"""
def __init__(self):
try:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=["", "<break>"],
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
self.image_tokenizer = Compose([
Resize((256, 256)),
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
except Exception as e:
logging.error(f"Failed to initialize AndromedaTokenizer: {e}")
raise
self.im_idx, self.im_end_idx, self.break_idx = self.tokenizer.convert_tokens_to_ids(["", "<break>"])
def tokenize_texts(self, texts: str):
"""
Tokenize given texts.
Args:
Texts (str): The Text to be tokenized
Returns:
A tuple containing the tokenized texts and only the text tokens.
"""
try:
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  <break> text </s>"
special_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.break_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], special_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
logging.error(f"Failed to tokenize texts: {e}")
raise
def tokenize_images(self, images):
"""
Tokenizes given images.
Args:
images: The images to be tokenized
Returns:
The tokenized images.
"""
try:
return self.processor(images=images, return_tensors="pt").pixel_values
except Exception as e:
logging.error(f"Failed to tokenize images: {e}")
raise
def tokenize(self, sample):
"""
Tokenizes given sample.
Args:
Sample: The sample to be tokenized
Returns:
A dictionary containing the tokenized text tokens, images, labels, and attention mask.
"""
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
logging.error(f"Failed to tokenize sample: {e}")
raise
class CM3(Module):
"""
Andromeda is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
"""
def __init__(
self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
image_size=256,
patch_size=32,
attn_one_kv_head=True, # multiquery attention
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
):
super().__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads
)
)
self.transformer = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
# attn_one_kv_head=attn_one_kv_head,
# qk_norm=qk_norm,
# attn_qk_norm=attn_qk_norm,
# attn_qk_norm_dim_scale=attn_qk_norm_dim_scale,
cross_attend=True
)
)
self.decoder = AutoregressiveWrapper(self.transformer)
def mask_and_relocate(self, text_tokens):
#mask image span
text_tokens = text_tokens.masked_fill(text_tokens==self.im_idx, self.mask_token)
#relocate to end
image_span = text_tokens[text_tokens==self.im_end_idx].unsqueeze(1)
text_tokens = torch.cat([text_tokens, image_span], dim=1)
return text_tokens
def cm3_loss(self, log_probs, labels):
#cm3 loss prediction
loss = nn.NLLLoss()(log_probs, labels)
return loss
# def forward(self, text_tokens, img, **kwargs):
# try:
# encoded_img = self.encoder(img, return_embeddings=True)
# #mask and relocate image span in text tokens
# text_tokens = self.mask_and_relocate(text_tokens)
# #concat
# context = torch.cat([encoded_img, text_tokens], dim=1)
# #get log probs
# log_probs = self.decoder(context, **kwargs)
# #calculate cm3 loss
# loss = self.cm3_loss(log_probs, text_tokens)
# return loss
# # return self.decoder(text_tokens, context=encoded_img)
# except Exception as error:
# print(f"Failed in forward method: {error}")
# raise
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise
|
CM3Leon-main
|
cm3/model.py
|
CM3Leon-main
|
cm3/transformer.py
|
|
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
from torch.distributed.fsdp import (
FullyShardedDataParallel,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
)
from accelerate import Accelerator
from accelerate.utils import (DummyOptim, DummyScheduler,
InitProcessGroupKwargs)
from datasets import load_dataset
from lion_pytorch import Lion
from torch.nn import LayerNorm
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy
)
from torch.optim import AdamW
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (AutoTokenizer, default_data_collator,
get_cosine_schedule_with_warmup,
get_linear_schedule_with_warmup, set_seed)
from cm3.utils.stable_adamw import StableAdamWUnfused
# import bitsandbytes as bnb
from cm3.model import CM3LEON
########### SETUP CONFIG
import torch.distributed as dist
# dist.init_process_group(backend='nccl') #init_method="env://")
###############
class CFG:
BATCH_SIZE = 3
GRADIENT_ACCUMULATE_EVERY: int = 1
SEED: int = 42
LEARNING_RATE: float = 3e-4
WEIGHT_DECAY: float = 0.1
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
USE_DEEPSPEED: bool = True
USE_FSDP: bool = True
USE_PRETOKENIZED: bool = True
USE_ACTIVATION_CHECKPOINTING: bool = True
RESUME_FROM_CHECKPOINT: str = False
CHECKPOINTING_STEPS: int = 1000
OUTPUT_DIR: str = 'checkpoints/' # Folder
ENTITY_NAME: str = "CM3LEON"
# helpers
def print_num_params(model, accelerator: Accelerator):
# n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
accelerator.print(f"Number of parameters in model: {n_params}")
# activation checkpointing
def activation_checkpointing(
model: torch.nn.Module,
offload_to_cpu: bool = False,
accelerator: Accelerator = None,
):
"""
Apply activation checkpointing to a model.
Args:
model (Module): The model to which to apply activation checkpointing.
offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
"""
if accelerator is not None:
accelerator.print("Using activation checkpointing")
def check_fn(submodule):
return isinstance(submodule, CM3LEON)
non_reentrant_wrapper = partial(
checkpoint_wrapper,
offload_to_cpu=offload_to_cpu,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
# FSDP
def fsdp(
model: torch.nn.Module,
auto_wrap: bool = False,
mp: str = "fp32",
shard_strat: str = "NO_SHARD",
):
"""
This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding.
Args:
model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP.
auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False.
mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'.
shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'.
Raises:
ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'.
ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'.
Returns:
torch.nn.Module: The input model wrapped with FSDP.
"""
if auto_wrap:
CM3LEON_auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
CM3LEON,
},
)
else:
CM3LEON_auto_wrap_policy = None
if mp == "bf16":
mp_fsdp = MixedPrecision(
param_dtype=torch.bfloat16,
# Gradient communication precision.
reduce_dtype=torch.bfloat16,
# Buffer precision.
buffer_dtype=torch.bfloat16,
)
elif mp == "fp16":
mp_fsdp = MixedPrecision(
param_dtype=torch.float16,
# Gradient communication precision.
reduce_dtype=torch.float16,
# Buffer precision.
buffer_dtype=torch.float16,
)
elif mp == "fp32":
mp_fsdp = MixedPrecision(
param_dtype=torch.float32,
# Gradient communication precision.
reduce_dtype=torch.float32,
# Buffer precision.
buffer_dtype=torch.float32,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format(
mp
)
)
if shard_strat == "SHARD_GRAD":
sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP
elif shard_strat == "FULL_SHARD":
sharding_strat_fsdp = ShardingStrategy.FULL_SHARD
elif shard_strat == "NO_SHARD":
sharding_strat_fsdp = ShardingStrategy.NO_SHARD
else:
raise ValueError(
"Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format(
shard_strat
)
)
model = FullyShardedDataParallel(
model,
auto_wrap_policy=CM3LEON_auto_wrap_policy,
mixed_precision=mp_fsdp,
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
sharding_strategy=sharding_strat_fsdp,
forward_prefetch=True,
use_orig_params=True,
)
return model
# learning rate scheduler
def get_lr_scheduler_with_warmup(
optimizer: torch.optim.Optimizer,
scheduler_type: str,
num_warmup_steps: int,
max_train_steps: int,
grad_accumulate_every: int = 1,
accelerator: Accelerator = None,
):
"""
Get a learning rate scheduler with warmup.
Args:
optimizer (Optimizer): The optimizer for which to create the learning rate scheduler.
scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine".
num_warmup_steps (int): The number of warmup steps for the learning rate scheduler.
max_train_steps (int): The maximum number of training steps.
grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1.
accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None.
Returns:
The learning rate scheduler with warmup.
Raises:
ValueError: If scheduler_type is not "linear" or "cosine".
"""
NUM_WARMUP_STEPS = num_warmup_steps
GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every
if accelerator is not None:
accelerator.print(f"Using {scheduler_type} lr scheduler")
if scheduler_type == "linear":
return get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
elif scheduler_type == "cosine":
return get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY,
num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY,
)
else:
raise ValueError(
"Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format(
scheduler_type
)
)
# optimizers
def decoupled_optimizer(
model: torch.nn.Module,
learning_rate: float,
weight_decay: float,
beta_1: float,
beta_2: float,
optimizer_type: str,
use_fsdp: bool = True,
accelerator: Accelerator = None,
):
"""
Decouples the optimizer from the training process.
This function sets up the optimizer for the model by creating two groups of parameters:
one for weight decay and one without weight decay. Then, it initializes the optimizer
with these two groups of parameters.
Args:
model (Module): The model whose parameters are optimized.
learning_rate (float): The learning rate for the optimizer.
weight_decay (float): The weight decay for the optimizer.
beta_1 (float): The exponential decay rate for the 1st moment estimates.
beta_2 (float): The exponential decay rate for the 2nd moment estimates.
optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'.
use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True.
accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None.
Returns:
Optimizer: The initialized optimizer.
Raises:
ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'.
"""
accelerator.print(f"Using {optimizer_type} optimizer")
# Create an empty dictionary called param_dict to store the model's named parameters.
param_dict = {}
# Iterate over the model's named parameters and populate the param_dict with key-value pairs.
for param_name, param in model.named_parameters():
param_dict[param_name] = param
# Separate the model's named modules into two groups: decay and no_decay.
# Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay.
no_decay = []
if use_fsdp:
exclude_module = "_fsdp_wrapped_module.token_emb"
else:
exclude_module = "token_emb"
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding).
for ndim in [LayerNorm, torch.nn.Embedding]:
if isinstance(module, ndim):
# If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list.
if module_name == exclude_module:
no_decay.append(f"{module_name}.weight")
else:
# If the module is an instance of LayerNorm
no_decay.append(f"{module_name}.gamma")
# Exit the inner loop since the desired module has been found.
break
# Create an empty list to store the names of the Linear layer weights with weight decay.
decay = []
# Iterate through the named modules of the model.
for module_name, module in model.named_modules():
# Check if the current module is an instance of the desired type (torch.nn.Linear).
for ndim in [torch.nn.Linear]:
if isinstance(module, ndim):
# If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list.
decay.append(f"{module_name}.weight")
# Exit the inner loop since the desired module has been found.
break
# Create two separate lists of model parameters: decay_param and no_decay_param.
# The decay_param list contains the parameters that should have weight decay applied.
# The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter.
# Create an empty list called decay_param to store the parameters with weight decay.
decay_param = []
if use_fsdp:
exclude_param = "_fsdp_wrapped_module.to_logits.weight"
else:
exclude_param = "to_logits.weight"
# Iterate over the decay list, which contains the names of the parameters with weight decay.
for param in decay:
# Check if the current parameter is not 'to_logits.weight'.
# Append the corresponding parameter from param_dict to the decay_param list.
if param != exclude_param:
decay_param.append(param_dict[param])
# Create an empty list called no_decay_param to store the parameters without weight decay.
no_decay_param = []
# Iterate over the no_decay list, which contains the names of the parameters without weight decay.
for param in no_decay:
# Append the corresponding parameter from param_dict to the no_decay_param list.
no_decay_param.append(param_dict[param])
# Create a list called grouped_params that contains two dictionaries.
# The first dictionary has the decay_param list and the corresponding weight_decay value.
# The second dictionary has the no_decay_param list and a weight_decay value of 0.0.
grouped_params = [
{"params": decay_param, "weight_decay": weight_decay},
{"params": no_decay_param, "weight_decay": 0.0},
]
# Create a variable called optimizer that stores an instance of the optimizer.
if optimizer_type == "lion":
optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "adamw":
optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "deepspeed":
optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),)
elif optimizer_type == "stable_adamw":
optimizer = StableAdamWUnfused(
grouped_params, lr=learning_rate, betas=(beta_1, beta_2),
)
# elif optimizer_type=="Adam8bit":
# optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
# elif optimizer_type=="Lion8Bit":
# optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2))
else:
raise ValueError(
"Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format(
optimizer_type
)
)
# Return the optimizer.
return optimizer
# dataloaders
def build_dataloaders():
"""
Build data loaders for training.
This function performs the following steps:
1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model.
2. Load the "openwebtext" dataset.
3. Tokenize the dataset, adding the end-of-sentence token to each text.
4. Process the tokenized dataset into chunks of a specified block size.
Returns:
Dataset: The processed dataset ready for training.
"""
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
dataset = load_dataset("openwebtext", split="train")
tokenized_dataset = dataset.map(
lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]),
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_dataset = tokenized_dataset.map(
group_texts, batched=True, num_proc=CFG.NUM_CPU,
)
return train_dataset
#switch to falconwebdataset
def build_pre_tokenized():
d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]")
# d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train")
# d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train")
# d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train")
# d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train")
# train_dataset = concatenate_datasets([d0, d1, d2, d3, d4])
return d0
def Train():
# accelerator
timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000))
accelerator = Accelerator(
gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY,
mixed_precision="fp16",
log_with="wandb",
kwargs_handlers=[timeout],
)
# AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = 4 #??????
accelerator.init_trackers(
project_name="CM3LEON",
config={
"batch_size": CFG.BATCH_SIZE,
"gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY,
"learning_rate": CFG.LEARNING_RATE,
"seq_len": CFG.SEQ_LEN,
},
init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}},
)
accelerator.print(f"Total GPUS: {accelerator.num_processes}")
# set seed
set_seed(CFG.SEED)
model = CM3LEON().to(accelerator.device)
print_num_params(model, accelerator)
if CFG.USE_FSDP:
model = fsdp(
model,
mp="fp16",
shard_strat="SHARD_GRAD"
)
if CFG.USE_ACTIVATION_CHECKPOINTING:
activation_checkpointing(model, accelerator)
# model = accelerator.prepare(model, train_dataloader)
# dataloaders
if CFG.USE_PRETOKENIZED:
train_dataset = build_pre_tokenized()
else:
train_dataset = build_dataloaders()
train_loader = DataLoader(
train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator,
)
model = accelerator.prepare(model, train_loader)
# optimizer
optim = decoupled_optimizer(
model=model,
learning_rate=CFG.LEARNING_RATE,
weight_decay=CFG.WEIGHT_DECAY,
beta_1=0.90,
beta_2=0.95,
optimizer_type='stable_adamw',
use_fsdp=True,
accelerator=accelerator
)
# Determine number of training steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps: {max_train_steps}")
# lr scheduler
NUM_WARMUP_STEPS = int(max_train_steps * 0.01)
accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}")
if CFG.USE_DEEPSPEED:
lr_scheduler = DummyScheduler(
optim,
total_num_steps=max_train_steps * accelerator.num_processes,
warmup_num_steps=NUM_WARMUP_STEPS
)
else:
lr_scheduler = get_lr_scheduler_with_warmup(
optimizer=optim,
scheduler_type="cosine",
num_warmup_steps=NUM_WARMUP_STEPS,
max_train_steps=max_train_steps,
grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY,
)
# prepare
optim, train_loader, lr_scheduler = accelerator.prepare(
optim, train_loader, lr_scheduler
)
# checkpoint scheduler
accelerator.register_for_checkpointing(lr_scheduler)
# I do not know why Huggingface recommends recalculation of max_train_steps
max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"Max train steps recalculated: {max_train_steps}")
# Total batch size for logging
total_batch_size = (
CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY
)
accelerator.print(f"Total batch size: {total_batch_size}")
# resume training
progress_bar = tqdm(
range(max_train_steps), disable=not accelerator.is_local_main_process
)
completed_steps = 0
if CFG.RESUME_FROM_CHECKPOINT:
if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "":
accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}")
accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT)
path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT)
training_difference = os.path.splitext(path)[0]
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = (
int(training_difference.replace("step_", ""))
* CFG.GRADIENT_ACCUMULATE_EVERY
)
if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None:
train_loader = accelerator.skip_first_batches(train_loader, resume_step)
completed_steps += resume_step
progress_bar.update(resume_step)
# training
model.train()
for step, batch in enumerate(train_loader):
with accelerator.accumulate(model):
inputs = batch["input_ids"].to(accelerator.device)
loss = model(inputs, return_loss=True)
accelerator.backward(loss)
accelerator.log({"loss": loss.item()}, step=step)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
lr_scheduler.step()
optim.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(CFG.CHECKPOINTING_STEPS, int):
if completed_steps % CFG.CHECKPOINTING_STEPS == 0:
output_dir = f"step_{completed_steps }"
if CFG.OUTPUT_DIR is not None:
output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= max_train_steps:
break
# end training
# accelerator.print(f"Training Finished")
accelerator.end_training()
# save final model
# accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}")
if CFG.OUTPUT_DIR is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
with accelerator.main_process_first():
accelerator.save(
unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt"
)
def main():
os.environ['MASTER_ADDR'] #'localhost'
os.environ['MASTER_PORT'] #= '9994'
# # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters
# # Pay attention to this, use "accelerate config"
os.environ['RANK'] #= str(0) # Number of nodes (servers)
os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count())
dist.init_process_group(backend='nccl') #init_method="env://")
Train()
#
if __name__ == '__main__':
main()
|
CM3Leon-main
|
cm3/train.py
|
import multiprocessing
import argparse
from itertools import chain
from datasets import load_dataset
from cm3.model import CM3LEONTokenizer
class CFG:
SEED: int = 42
SEQ_LEN: int = 8192
NUM_CPU: int = multiprocessing.cpu_count()
HF_ACCOUNT_REPO: str = "YOUR HUGGINGFACE API KEY"
DATASET_NAME: str = "HuggingFaceM4/VQAv2"
#perhaps will need finetuning
def built_dataset(args):
# tokenizer = AutoTokenizer.from_pretrained(CFG.TOKENIZER)
tokenizer = CM3LEONTokenizer.tokenize
train_dataset = load_dataset(CFG.DATASET_NAME, split="train", streaming=True)
def tokenize_function(example):
return tokenizer([t + tokenizer.eos_token for t in example["text"]])
tokenized_dataset = train_dataset.map(
tokenize_function,
batched=True,
num_proc=CFG.NUM_CPU,
remove_columns=["text"],
)
block_size = CFG.SEQ_LEN
#main data processing functin that will concatenate all texts from our dataset
def group_texts(examples):
#concatenate all texts
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
#drop the small remainder we could add padding if the model supported it instead of this drop customize
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
#split by chunks of max length
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
return result
train_tokenized_dataset = tokenized_dataset.map(
group_texts,
batched=True,
num_proc=CFG.NUM_PROC,
)
train_tokenized_dataset.push_to_hub(CFG.HF_ACCOUNT_REPO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process and push dataset to Hugging Face Hub")
parser.add_argument("--seed", type=int, default=CFG.SEED, help="Random seed")
parser.add_argument("--seq_len", type=int, default=CFG.SEQ_LEN, help="Sequence length for processing")
parser.add_argument("--hf_account", type=str, default=CFG.HF_ACCOUNT_REPO, help="Hugging Face account name and repo")
parser.add_argument("--tokenizer", type=str, default=CFG.TOKENIZER, help="Tokenizer model to use")
parser.add_argument("--dataset_name", type=str, default=CFG.DATASET_NAME, help="Name of the dataset to process")
args = parser.parse_args()
built_dataset(args)
|
CM3Leon-main
|
cm3/tokenize.py
|
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
precision="amp_bfloat16",
custom_scalar=65536,
):
beta1, beta2 = betas[0], betas[1]
defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2)
super(StableAdamWUnfused, self).__init__(params, defaults)
self.eps = eps
self.d = clip_thresh
# Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step.
# If you do this, call (custom_scalar * loss).backward() instead of loss.backward().
self.precision = precision
self.custom_scaler = custom_scalar
for group in self.param_groups:
group["step"] = 1.0
print("Using StableAdamWUnfused-v1")
def __setstate__(self, state):
super(StableAdamWUnfused, self).__setstate__(state)
def step(self, closure=None):
if closure is not None:
closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
beta1 = group["beta1"]
beta2 = group["beta2"]
step = group["step"]
for p in group["params"]:
if p.grad is None:
continue
theta = p.data
param_state = self.state[p]
if self.precision == "custom_fp16":
g = p.grad.data / self.custom_scaler
if torch.any(torch.isnan(g) | torch.isinf(g)):
continue
else:
g = p.grad.data
if "exp_avg" not in param_state:
v = param_state["exp_avg"] = torch.zeros_like(theta)
u = param_state["exp_avg_sq"] = torch.zeros_like(theta)
else:
v = param_state["exp_avg"]
u = param_state["exp_avg_sq"]
beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step)
beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step)
v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat)
u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat)
denominator = u.sqrt().add_(self.eps)
# StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise.
rms = (
torch.div(
g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u))
)
.mean()
.sqrt()
.item()
)
theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_(
v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d))
)
# save current params
param_state["exp_avg"] = v
param_state["exp_avg_sq"] = u
group["step"] = step + 1
|
CM3Leon-main
|
cm3/utils/stable_adamw.py
|
CM3Leon-main
|
cm3/utils/__init__.py
|
|
import unittest
import torch
from softmax_one.softmax_one import ScaledDotProductAttention
class TestScaledDotProductAttention(unittest.TestCase):
def setUp(self):
self.module = ScaledDotProductAttention(dropout=0.1)
self.q = torch.rand(16, 10, 64) #16 batches 10 queries of size 64
self.k = torch.rand(16, 10, 64) #16 batches of 10 keys of size 64
self.v = torch.rand(16, 10, 64) #16 batches of 10 values each of size 64
def test_output_shape(self):
output, _ = self.module(self.q, self.k, self.v)
self.assertEqual(output.shape, (16, 10, 64))
if __name__ == '__main__':
unittest.main()
|
AttentionIsOFFByOne-main
|
test.py
|
import torch
from softmax_one.softmax_one import softmax_one
x = torch.randn(5)
y = softmax_one(x, dim=0)
print(y)
print(y.shape)
|
AttentionIsOFFByOne-main
|
example.py
|
import time
import torch
import argparse
import torch.nn.functional as F
import matplotlib.pyplot as plt
# from softmax_one.softmax_one_cupy import softmax_one_cupy as softmax_one
from softmax_one.softmax_one import softmax_one
import numpy as np
import logging
def benchmark(func, x, dim):
start = time.time()
for _ in range(1000):
func(x, dim)
end = time.time()
return end - start
if __name__ == "__main__":
# Set up logging
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Benchmarking Softmax1.')
parser.add_argument('--no-chart', action='store_true',
help='Do not display chart')
args = parser.parse_args()
# Define the sizes to test
sizes = [(10, 10), (100, 100), (1000, 1000), (10000, 10000)]
# Arrays to store results
times_softmax = []
times_softmax_one = []
# Run the benchmark
for size in sizes:
logging.info(f'Running benchmark for tensor size {size}...')
x = torch.rand(size)
time_softmax = benchmark(F.softmax, x, dim=-1)
time_softmax_one = benchmark(softmax_one, x, dim=-1)
times_softmax.append(time_softmax)
times_softmax_one.append(time_softmax_one)
logging.info(f'F.softmax time: {time_softmax} s')
logging.info(f'softmax_one time: {time_softmax_one} s')
# Plot the results
if not args.no_chart:
plt.figure(figsize=(10, 6))
plt.plot([np.prod(s) for s in sizes], times_softmax, label='F.softmax')
plt.plot([np.prod(s) for s in sizes], times_softmax_one, label='softmax_one')
plt.legend()
plt.xlabel('Tensor Size')
plt.ylabel('Time (s)')
plt.title('Benchmarking Results')
plt.show()
else:
logging.info('Chart display is off.')
|
AttentionIsOFFByOne-main
|
tests/benchmark.py
|
from setuptools import setup, Extension
from torch.utils import cpp_extension
softmax_one_cpp = Extension(
name="softmax_one_cpp",
sources=["softmax_one/optimized/softmax_one.cpp", "softmax_one/optimized/binding.cpp"],
include_dirs=["sotmax_one/include"],
extra_compile_args=["-std=c++14"]
)
setup(
name='softmax_one_cpp',
version='0.1',
ext_modules=[softmax_one_cpp],
cmdclass={'build_ext': cpp_extension.BuildExtension},
)
# # python setup.py install
# import softmax_one_cpp
# def test_softmax_one_cpp():
# x = torch.randn(10, 5)
# y = softmax_one_cpp.forward(x, dim=1)
# assert torch.allclose(y.sum(dim=1), torch.ones(10))
|
AttentionIsOFFByOne-main
|
tests/setup.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from softmax_one.softmax_one import softmax_one
# QuietAttention
class QuietAttention(nn.Module):
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = nn.Droput(dropout)
def forward(self, q, k, v, mask=None):
#Get the dimension of key vectors (needed for scaling the dot product of Q and k)
d_k = k.size(-1)
#create a 0 vector with same size as one quer vector in Q
zero_vector = torch.zeros(1, q.size(1), d_k).to(q.device)
#prepend the zero vector to queries, keys, and values
q = torch.cat((zero_vector, q), dim=0)
k = torch.cat((zero_vector, k), dim=0)
v = torch.cat((zero_vector, v), dim=0)
#compute the dot product of Q and K and scale by sqrt(d_k) for more stable gradientws
scores = torch.matmul(q, k.tranpose(-2, -1)) / math.sqrt(d_k)
#if a mask is provided, apply it to scores, (after extending the mask to account for the added zero vector)
if mask is not None:
mask = F.pad(mask, (1, 0))
scores = scores.masked_fill(mask == 0, -1e9)
#compute attention distribution using the modified softmax function
p_attn = softmax_one(scores, dim=-1)
#apply dropout to the attention distribution
p_attn = self.dropout(p_attn)
#compute the final output by multiplying the attention distribution with the value matrix v
return torch.matmul(p_attn, v), p_attn
|
AttentionIsOFFByOne-main
|
softmax_one/attention.py
|
import math
import torch
import torch.nn.functional as F
# Define the softmax_one function with added one in the denominator , which helps to reduce
#the negative impact impact of tiny values in the softmax function and improves numerical stability
def softmax_one(x, dim=None, _stacklevel=3, dtype=None):
#subtract the max for stability
x = x - x.max(dim=dim, keepdim=True).values
#compute exponentials
exp_x = torch.exp(x)
#compute softmax values and add on in the denominator
return exp_x / (1 + exp_x.sum(dim=dim, keepdim=True))
# Implement the scaled dot product attention with GhostSoftmax
class ScaledDotProductAttention(torch.nn.Module):
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k = k.size(-1)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = softmax_one(scores, dim=-1)
p_attn = self.dropout(p_attn)
return torch.matmul(p_attn, v), p_attn
|
AttentionIsOFFByOne-main
|
softmax_one/softmax_one.py
|
from softmax_one.softmax_one import softmax_one, ScaledDotProductAttention
|
AttentionIsOFFByOne-main
|
softmax_one/__init__.py
|
#cupy allows you to compile raw python code into cuda, this a test
import cupy as cp
#softmax
def softmax_one_cupy(x, axis=None):
#substract the max for stability
x = x - cp.max(x, axis=axis, keepdims=True)
#compute exponentials
exp_x = cp.exp(x)
#compute the softmax values and add one in the denominator
return exp_x / (1 + cp.sum(exp_x, axis=axis, keepdims=True))
|
AttentionIsOFFByOne-main
|
optimized/softmax_one_cupy.py
|
import json
from datetime import datetime
import re
def dmy_to_ymd(d):
return datetime.strptime(d, '%d %b %Y').strftime('%Y-%m-%d')
with open('../README.md', 'r') as f:
lines = f.readlines()
# remove empty line
lines = [line.strip() for line in lines if line.strip()]
st = lines.index('# Resources')
end = lines.index('# Papers')
lines = lines[st:end]
print(lines)
# find index of line that start with #
indexs = [i for i, line in enumerate(lines) if line.startswith('#') and not line.startswith('##')]
db = {"resources": []}
# split lines by index
indexs += [len(lines)]
for i, idx in enumerate(indexs[:-1]):
field = lines[idx].strip('##').strip()
print(field)
content = lines[idx + 1:indexs[i + 1]]
second_indexs = [i for i, line in enumerate(content) if line.startswith('##')]
second_indexs += [len(content)]
for i, idx in enumerate(second_indexs[:-1]):
task = content[idx].strip('###').strip()
second_content = content[idx + 1:second_indexs[i + 1]]
print(task, len(second_content))
block_len = 4
if task == 'Tutorial and Jupyter Notebook':
block_len = 3
for l in range(0, len(second_content), block_len):
try:
item = second_content[l:l + block_len]
obj = {}
obj['title'] = item[0][2:-4]
obj['authors'] = item[1][1:-3]
linkstr = item[2].strip()
links = re.findall("\[\[([^\]]*)\]\(([^\)]+)\)\]", linkstr)
links2 = {}
for name, href in links:
links2[name] = href
obj['links'] = links2
if len(item) == 4:
obj['date'] = dmy_to_ymd(item[3].strip())
obj['field'] = field
obj['task'] = task
db['resources'].append(obj)
except Exception as e:
print(item)
import ipdb; ipdb.set_trace()
raise e
with open('resource.json', 'w') as fout:
json.dump(db, fout)
|
EXA-1-master
|
exa/papers/Awesome-Diffusion-Models/website/convert_resource.py
|
import json
from datetime import datetime
def dmy_to_ymd(d):
return datetime.strptime(d, '%d %b %Y').strftime('%Y-%m-%d')
with open('../README.md', 'r') as f:
lines = f.readlines()
# remove empty line
lines = [line.strip() for line in lines if line.strip()]
idx = lines.index('# Papers')
lines = lines[idx:]
# find index of line that start with #
indexs = [i for i, line in enumerate(lines) if line.startswith('##') and not line.startswith('###')]
db = {"papers": []}
# split lines by index
indexs += [len(lines)]
for i, idx in enumerate(indexs[:-1]):
field = lines[idx].strip('##').strip()
print(field)
content = lines[idx + 1:indexs[i + 1]]
second_indexs = [i for i, line in enumerate(content) if line.startswith('###')]
second_indexs += [len(content)]
for i, idx in enumerate(second_indexs[:-1]):
task = content[idx].strip('###').strip()
second_content = content[idx + 1:second_indexs[i + 1]]
print(task, len(second_content))
for l in range(0, len(second_content), 4):
try:
item = second_content[l:l + 4]
obj = {}
obj['title'] = item[0][2:-4]
obj['authors'] = item[1][1:-3]
source, links = item[2][:-1].strip().split('.', maxsplit=1)
obj['source'] = source
links = links.strip().split(' ')
links = [link[1:-1] for link in links]
links2 = {}
for link in links:
if not link.strip():
continue
name, url = link.split(']')
name = name[1:].strip()
url = url[1:-1].strip()
links2[name] = url
obj['links'] = links2
obj['date'] = dmy_to_ymd(item[3].strip())
obj['field'] = field
obj['task'] = task
db['papers'].append(obj)
except Exception as e:
print(item)
# import ipdb; ipdb.set_trace()
raise e
with open('db.json', 'w') as fout:
json.dump(db, fout)
|
EXA-1-master
|
exa/papers/Awesome-Diffusion-Models/website/convert.py
|
import itertools
from jinja2 import Template
import json
DOC_DIR = '../docs'
class Link:
def __init__(self, name, href):
self.name = name
self.href = href
class Paper:
def __init__(self, data):
self.title = data['title']
self.authors = data['authors']
self.source = data.get('source', None)
self.date = data.get('date', None)
self.links = [Link(k, v) for k, v in data['links'].items()]
self.field = data['field']
self.task = data['task']
class Field:
def __init__(self, name, tasks):
self.name = name
self.tasks = [Task(k, v, self) for k, v in tasks.items()]
@property
def total(self):
return sum([len(task.papers) for task in self.tasks])
@property
def url_name(self):
return self.name.lower().replace(' ', '_')
@property
def papers(self):
papers = list(itertools.chain(*[task.papers for task in self.tasks]))
papers = sorted(papers, key=lambda x: x.date)
papers.reverse()
return papers
class Task:
def __init__(self, name, papers, field):
self.name = name
self._papers = papers
self.field = field
@property
def url_name(self):
return self.field.url_name + '_' + self.name.lower().replace(' ', '_')
@property
def papers(self):
papers = self._papers
try:
papers = sorted(papers, key=lambda x: x.date)
papers.reverse()
except:
pass
return papers
def find_all_fields(papers):
fields = {}
for paper in papers:
if paper.field not in fields:
fields[paper.field] = {}
if paper.task not in fields[paper.field]:
fields[paper.field][paper.task] = set()
fields[paper.field][paper.task].add(paper)
field_objs = []
for k, v in fields.items():
obj = Field(k, v)
field_objs.append(obj)
return field_objs
def build_paper():
with open('db.json', 'r') as f:
db = json.load(f)
papers = [Paper(paper) for paper in db['papers']]
fields = find_all_fields(papers)
with open(f'{DOC_DIR}/template.html', 'r') as fin:
template = Template(fin.read(), lstrip_blocks=True, trim_blocks=True)
first = True
for field in fields:
for task in field.tasks:
out = template.render(fields=fields, papers=task.papers, highlight_task=task, section='paper')
with open(f'{DOC_DIR}/{task.url_name}.html', 'w', encoding='utf-8') as fout:
fout.write(out)
def build_field():
with open('db.json', 'r') as f:
db = json.load(f)
papers = [Paper(paper) for paper in db['papers']]
fields = find_all_fields(papers)
with open(f'{DOC_DIR}/template.html', 'r') as fin:
template = Template(fin.read(), lstrip_blocks=True, trim_blocks=True)
first = True
for field in fields:
out = template.render(fields=fields, papers=field.papers, section='paper')
with open(f'{DOC_DIR}/{field.url_name}.html', 'w', encoding='utf-8') as fout:
fout.write(out)
if first:
with open(f'{DOC_DIR}/index.html', 'w', encoding='utf-8') as fout:
fout.write(out)
first = False
def build_resource():
with open('resource.json', 'r') as f:
db = json.load(f)
papers = [Paper(paper) for paper in db['resources']]
fields = find_all_fields(papers)
with open(f'{DOC_DIR}/template.html', 'r') as fin:
template = Template(fin.read(), lstrip_blocks=True, trim_blocks=True)
first = True
for field in fields:
for task in field.tasks:
out = template.render(fields=fields, papers=task.papers, highlight_task=task, section='resource')
with open(f'{DOC_DIR}/{task.url_name}.html', 'w') as fout:
fout.write(out)
if first:
with open(f'{DOC_DIR}/resource.html', 'w') as fout:
fout.write(out)
first = False
if __name__ == '__main__':
build_paper()
build_resource()
build_field()
|
EXA-1-master
|
exa/papers/Awesome-Diffusion-Models/website/main.py
|
# -*- coding: utf-8 -*-
import argparse
import logging
import pprint
from gensim.models import word2vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(description='gensim skip-gram with negative sampling')
parser.add_argument('--is_train', type=bool, default=False,
help='specify train or evaluation')
args = parser.parse_args()
if __name__ == '__main__':
model_name = 'vectors.model'
if args.is_train:
# load up unzipped corpus from http://mattmahoney.net/dc/text8.zip
sentences = word2vec.Text8Corpus('text8')
# train the skip-gram model
model = word2vec.Word2Vec(sentences, sg=1, size=300, min_count=20, window=5, negative=25, workers=4)
# save trained model
model.save(model_name)
else:
# load trained model
model = word2vec.Word2Vec.load(model_name)
# analogy task evaluation with corpus from https://goo.gl/okpDj5
model.accuracy('questions-words.txt')
# execute analogy task like king - man + woman = queen
pprint.pprint(model.most_similar(positive=['woman', 'king'], negative=['man']))
|
EXA-1-master
|
exa/papers/awesome-embedding-models/examples/baseline.py
|
# -*- coding: utf-8 -*-
import os
import zipfile
from keras.utils.data_utils import get_file
def maybe_download(url):
"""
Download a file if not present.
"""
filename = url.split('/')[-1]
path = get_file(filename, url)
return path
def read_data(filename):
"""
Extract the first file enclosed in a zip file as a list of words.
"""
with zipfile.ZipFile(filename) as f:
data = f.read(f.namelist()[0]).split()
return data
def unzip(zip_filename):
"""
Extract a file from the zipfile
"""
with zipfile.ZipFile(zip_filename) as f:
for filename in f.namelist():
dirname = os.path.dirname(filename)
f.extract(filename, dirname)
return os.path.abspath(filename)
def read_analogies(filename, word2id):
"""
Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(filename, 'r') as analogy_f:
for line in analogy_f:
if line.startswith(':'): # Skip comments.
continue
words = line.strip().lower().split()
ids = [w in word2id for w in words]
if False in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(words)
print('Eval analogy file: {}'.format(filename))
print('Questions: {}'.format(len(questions)))
print('Skipped: {}'.format(questions_skipped))
return questions
if __name__ == '__main__':
url = 'http://mattmahoney.net/dc/text8.zip'
filename = maybe_download(url)
unzip(filename)
words = read_data(filename)
print('Data size', len(words))
url = 'http://download.tensorflow.org/data/questions-words.txt'
filename = maybe_download(url)
|
EXA-1-master
|
exa/papers/awesome-embedding-models/examples/utils.py
|
# -*- coding: utf-8 -*-
import pprint
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer, base_filter
from keras.preprocessing.sequence import skipgrams
from keras.models import Sequential
from keras.layers import Dense
from gensim.models.doc2vec import Word2Vec
path = get_file('alice.txt', origin='http://www.gutenberg.org/cache/epub/11/pg11.txt')
sentences = [line.strip() for line in open(path) if line != '\n']
tokenizer = Tokenizer(filters=base_filter() + "'")
tokenizer.fit_on_texts(sentences)
corpus = tokenizer.texts_to_sequences(sentences)
V = len(tokenizer.word_index) + 1
dim = 200
window_size = 5
model = Sequential()
model.add(Dense(input_dim=V, output_dim=dim))
model.add(Dense(input_dim=dim, output_dim=V, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.summary()
def generate_data(corpus, window_size, V):
for words in corpus:
couples, labels = skipgrams(words, V, window_size, negative_samples=0, shuffle=True)
if couples:
X, y = zip(*couples)
X = np_utils.to_categorical(X, V)
y = np_utils.to_categorical(y, V)
yield X, y
for epoch in range(10):
loss = 0.
for x, y in generate_data(corpus, window_size, V):
loss += model.train_on_batch(x, y)
print(epoch, loss)
with open('vectors.txt', 'w') as f:
f.write(' '.join([str(V-1), str(dim)]))
f.write('\n')
vectors = model.get_weights()[0]
for word, i in tokenizer.word_index.items():
f.write(word)
f.write(' ')
f.write(' '.join(map(str, list(vectors[i, :]))))
f.write('\n')
w2v = Word2Vec.load_word2vec_format('./vectors.txt', binary=False)
pprint.pprint(w2v.most_similar(positive=['king']))
pprint.pprint(w2v.most_similar(positive=['place']))
pprint.pprint(w2v.most_similar(positive=['woman', 'king'], negative=['man']))
|
EXA-1-master
|
exa/papers/awesome-embedding-models/examples/skip-gram.py
|
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
from gensim.models import word2vec
from gensim.models.doc2vec import Word2Vec
from keras.layers import Activation, Embedding, Merge, Reshape
from keras.models import Sequential
from keras.preprocessing.sequence import skipgrams, make_sampling_table
from keras.preprocessing.text import Tokenizer, base_filter
from utils import maybe_download, unzip, read_analogies
parser = argparse.ArgumentParser(description='Keras skip-gram with negative sampling')
parser.add_argument('--save_path', type=str, default='vectors.txt',
help='Directory to write the model.')
parser.add_argument('--eval_data', type=str, default=None,
help='Analogy questions. '
'See README.md for how to get questions-words.txt.')
parser.add_argument('--embedding_size', type=int, default=200,
help='The embedding dimension size.')
parser.add_argument('--epochs_to_train', type=int, default=5,
help='Number of epochs to train.'
'Each epoch processes the training data once completely.')
parser.add_argument('--num_neg_samples', type=int, default=5,
help='Negative samples per training example.')
parser.add_argument('--window_size', type=int, default=4,
help='The number of words to predict to the left and right '
'of the target word.')
parser.add_argument('--min_count', type=int, default=5,
help='The minimum number of word occurrences for it to be '
'included in the vocabulary.')
parser.add_argument('--sampling_factor', type=float, default=1e-3,
help='Subsample threshold for word occurrence. Words that appear '
'with higher frequency will be randomly down-sampled. Set '
'to 0 to disable.')
args = parser.parse_args()
zip_filename = maybe_download('http://mattmahoney.net/dc/text8.zip')
text_file = unzip(zip_filename)
sentences = word2vec.Text8Corpus(text_file)
sentences = [' '.join(sent) for sent in sentences]
tokenizer = Tokenizer(filters=base_filter() + "'")
tokenizer.fit_on_texts(sentences)
sentences = tokenizer.texts_to_sequences(sentences)
V = len(tokenizer.word_index) + 1
def build_model():
target_word = Sequential()
target_word.add(Embedding(V, args.embedding_size, input_length=1))
context = Sequential()
context.add(Embedding(V, args.embedding_size, input_length=1))
model = Sequential()
model.add(Merge([target_word, context], mode='dot', dot_axes=2))
model.add(Reshape((1,), input_shape=(1, 1)))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
return model
def train_model(model):
sampling_table = make_sampling_table(V, sampling_factor=args.sampling_factor)
for epoch in range(args.epochs_to_train):
loss = 0.
for i, sent in enumerate(sentences):
print('{}/{}'.format(i, len(sentences)))
couples, labels = skipgrams(sequence=sent, vocabulary_size=V, window_size=args.window_size,
negative_samples=args.num_neg_samples, sampling_table=sampling_table)
if couples:
words, contexts = zip(*couples)
words = np.array(words, dtype=np.int32)
contexts = np.array(contexts, dtype=np.int32)
y = np.array(labels, dtype=np.int32)
loss += model.train_on_batch([words, contexts], y)
print('num epoch: {} loss: {}'.format(epoch, loss))
return model
def save_model(model):
with open(args.save_path, 'w') as f:
f.write(' '.join([str(V - 1), str(args.embedding_size)]))
f.write('\n')
vectors = model.get_weights()[0]
for word, i in tokenizer.word_index.items():
f.write(word)
f.write(' ')
f.write(' '.join(map(str, list(vectors[i, :]))))
f.write('\n')
def eval_model():
w2v = Word2Vec.load_word2vec_format(args.save_path, binary=False)
word2id = dict([(w, i) for i, w in enumerate(w2v.index2word)])
analogy_questions = read_analogies(args.eval_data, word2id)
correct = 0
total = len(analogy_questions)
for question in analogy_questions:
a, b, c, d = question # E.g. [Athens, Greece, Baghdad, Iraq]
analogies = w2v.most_similar(positive=[b, c], negative=[a], topn=4)
for analogy in analogies:
word, _ = analogy
if d == word:
# Predicted Correctly!
correct += 1
break
print('Eval %4d/%d accuracy = %4.1f%%' % (correct, total, correct * 100.0 / total))
def main():
"""
Train a word2vec model.
"""
#if not args.train_data or not args.eval_data or not args.save_path:
if not args.save_path:
print('--train_data --eval_data and --save_path must be specified.')
sys.exit(1)
model = build_model()
model = train_model(model)
save_model(model)
eval_model()
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/papers/awesome-embedding-models/examples/skip-gram_with_ns.py
|
# -*- coding:utf-8 -*-
import re
from pprint import pprint
import requests
from bs4 import BeautifulSoup
def get_html(url):
try:
html = requests.get(url).text
except Exception as e:
print('web requests url error: {}\nlink: {}'.format(e, url))
return html
class WebDownloader(object):
def __init__(self, base_url):
self.url = base_url
self.links = set()
def parse_html(self, verbose=False):
html = get_html(self.url)
soup = BeautifulSoup(html, parser='lxml')
for link in soup.findAll('a'):
if link.has_attr('href'):
href = str(link.get('href'))
if href.startswith('http'):
self.links.add(href)
if verbose:
print(link.get('href'))
def download(self):
for link in self.links:
link = str(link)
if link.endswith('.pdf'): # handle direct pdf url link
file_name = link.split('/')[-1]
try:
r = requests.get(link)
# with open(os.path.join(path, file_name), 'wb+') as f:
with open(file_name, 'wb+') as f:
f.write(r.content)
except Exception as e:
print('Downloading error:{}\nlink:{}'.format(e, link))
url = 'https://chenfeiyang.top/Awesome-Multimodal-Research/'
wd = WebDownloader(url)
wd.parse_html()
pprint(wd.links)
wd.download()
|
EXA-1-master
|
exa/papers/Awesome-Multimodal-Research-master/scripts/WebDownloader.py
|
# -*- coding:utf-8 -*-
import re
import requests
import urllib.request
import os
import argparse
parser = argparse.ArgumentParser(description="pull_paper")
parser.add_argument('--keyword', type=str, default='Multimodal') # Match the keywords we want to find the paper
args = parser.parse_args()
# get web context
r = requests.get('http://openaccess.thecvf.com/CVPR2019.py')
data = r.text
# find all pdf links
link_list = re.findall(r"(?<=href=\").+?pdf(?=\">pdf)|(?<=href=\').+?pdf(?=\">pdf)", data)
name_list = re.findall(r"(?<=href=\").+?2019_paper.html\">.+?</a>", data)
cnt = 1
num = len(link_list)
# your local path to download pdf files
localDir = './CVPR2019/{}/'.format(args.keyword)
if not os.path.exists(localDir):
os.makedirs(localDir)
while cnt < num:
url = link_list[cnt]
# seperate file name from url links
file_name = name_list[cnt].split('<')[0].split('>')[1]
# to avoid some illegal punctuation in file name
file_name = file_name.replace(':', '_')
file_name = file_name.replace('\"', '_')
file_name = file_name.replace('?', '_')
file_name = file_name.replace('/', '_')
file_name = file_name.replace(' ', '_')
search_list = file_name.split('_')
search_pattern = re.compile(r'{}'.format(args.keyword), re.IGNORECASE)
download_next_paper = True
# print([True for i in search_list if search_pattern.findall(i)])
if ([True for i in search_list if search_pattern.findall(i)]):
download_next_paper = False
if download_next_paper:
cnt = cnt + 1
continue
file_path = localDir + file_name + '.pdf'
if os.path.exists(file_path):
print('File [{}.pdf] exists,skip downloading.'.format(file_name))
cnt = cnt + 1
continue
else:
# download pdf files
print('[' + str(cnt) + '/' + str(num) + "] Downloading -> " + file_path)
try:
urllib.request.urlretrieve('http://openaccess.thecvf.com/' + url, file_path)
except:
cnt = cnt + 1
continue
cnt = cnt + 1
print("all download finished")
|
EXA-1-master
|
exa/papers/Awesome-Multimodal-Research-master/scripts/pull_paper.py
|
import os
import pandas as pd
from tqdm import tqdm
BASE_URL="https://archive.org/download/stackexchange/"
table = pd.read_html(BASE_URL)[0]
sources = [x.replace(" (View Contents)", "") for x in table['Name'].tolist()]
sources = [x for x in sources if x.endswith(".7z")]
for source in tqdm(sources):
# if ".meta." not in source:
print(f"source: {source}")
os.system("wget "+BASE_URL+source+" -O "+"./data/"+source)
os.system("7z x ./data/"+source+" -o./data/"+source[:-3])
os.system(f"mv ./data/{source[:-3]}/Posts.xml ./data/{source[:-3]}.xml")
os.system(f"rm -rf ./data/{source[:-3]}")
os.system(f"rm ./data/{source}")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/download.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/__init__.py
|
|
import os
import json
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./data/")
if __name__ == "__main__":
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT,"token_counts", "tokens.json"), "r") as f:
counts = json.load(f)
'''
print a table of the counts
'''
print("|Idx|Site|Token Count|")
print("|---|---|---|")
for idx, (site, count) in enumerate(counts.items()):
print(f"|{idx}|{site}|{count}|")
print(f"|{len(counts.values())}|Total|{sum(counts.values())}|")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/print_stats.py
|
import os
import json
import tiktoken
from multiprocessing import Pool
from transformers import AutoTokenizer
# enc = tiktoken.get_encoding("r50k_base")
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
# "gpt2"
)
def get_token_count(qa_pair):
# return len(enc.encode(qa_pair['text']))
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./stackexchange/")
# if x is a file, not a dir
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} QA pairs for {site}.")
# token count
token_count = 0
with Pool(24) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
# write to file
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", "tokens.json"), "w") as f:
json.dump(token_counts, f)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/token_count.py
|
import os
import json
import sys
import xml.etree.ElementTree as ET
from tqdm import tqdm
sys.path.append("./")
from src.stack_exchange.count import get_sites_count
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/")
if os.path.exists(os.path.join(LEMMA_DATA_DIR_SE, "counts.json")):
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "r") as fp:
counts = json.load(fp)
else:
print("[INFO] Getting counts for sites...")
counts = get_sites_count(LEMMA_DATA_DIR_SE)
# write this to a file
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "w") as f:
json.dump(counts, f)
# take first 28
sites = list(counts.keys())[:28]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE, "parents"), exist_ok=True)
def process_site(site):
parents = {}
qa_pairs = []
print(f"[INFO] Processing {site}...")
# first get the parents dump
if os.path.exists(os.path.join(LEMMA_DATA_DIR_SE, "parents", site)):
with open(os.path.join(LEMMA_DATA_DIR_SE, "parents", site), "r") as f:
parents = json.load(f)
else:
with open(os.path.join(LEMMA_DATA_DIR_SE, site), "r") as f:
for i, line in enumerate(tqdm(f, total=counts[site])):
# first 2 lines are header
# e.g., counts = 2: total=5 lines, 2,3 are data
# last line is footer
if i>1 and i<=counts[site]+1:
root = ET.fromstring(line)
if "ParentId" in root.attrib:
# this is an answer
if root.attrib["ParentId"] not in parents:
parents[root.attrib["ParentId"]] = []
parents[root.attrib["ParentId"]].append({
"id": root.attrib["Id"],
"text": root.attrib["Body"],
"score": root.attrib["Score"]
})
# write parents to file
with open(os.path.join(LEMMA_DATA_DIR_SE, "parents", site), "w") as f:
json.dump(parents, f)
print(f"[INFO] Got {len(parents)} questions for {site}.")
# now we have the Q-A pairs
# now we need to get the texts
with open(os.path.join(LEMMA_DATA_DIR_SE, site), "r") as f:
for i, line in enumerate(tqdm(f, total=counts[site])):
if i>1 and i<=counts[site]+1:
root = ET.fromstring(line)
if "ParentId" not in root.attrib:
post_id = root.attrib["Id"]
if post_id in parents:
# this is a question
qa_pairs.append({
"question": {
"id": post_id,
"text": f"{root.attrib['Title']} {root.attrib['Body']}",
"score": root.attrib["Score"]
},
"answers": parents[post_id]
})
else:
if "Title" in root.attrib:
# if there's a title => then a valid question
body = root.attrib["Body"] if "Body" in root.attrib else ""
score = root.attrib["Score"] if "Score" in root.attrib else 0
qa_pairs.append({
"question": {
"id": post_id,
"text": f"{root.attrib['Title']} {body}",
"score": score
},
})
# write qa_pairs to file
print(f"[INFO] Writing {site} to file...")
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs"), exist_ok=True)
with open(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs", site.removesuffix(".xml")+".jsonl"), "w") as f:
for qa_pair in qa_pairs:
f.write(json.dumps(qa_pair)+"\n")
for each in sites:
process_site(each)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/filter.py
|
import re
import os
import sys
import json
import fasttext
from bs4 import BeautifulSoup
from multiprocessing import Pool
sys.path.append("./")
site_name = ""
CLEANR = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
def cleanhtml(raw_html):
raw_html = raw_html.replace("<li>", "\n*")
raw_html = raw_html.replace("</li>", "")
raw_html = raw_html.replace("<ol>", "\n*")
raw_html = raw_html.replace("</ol>", "")
soup = BeautifulSoup(raw_html, "lxml")
return soup.get_text()
class LanguageIdentification:
def __init__(self):
pretrained_lang_model = "data/lid.176.bin"
self.model = fasttext.load_model(pretrained_lang_model)
def predict_lang(self, text):
text = text.replace("\n", " ")
predictions = self.model.predict(text, k=1) # returns top 2 matching languages
return predictions[0][0].replace("__label__", "")
lang_id = LanguageIdentification()
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/")
LEMMA_DATA_DIR_SE_OUT = os.environ.get("LEMMA_DATA_DIR_SE_OUT", "./data/")
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT), exist_ok=True)
def process_qa_pair(pair):
# sort answers by score
if "answers" in pair:
pair["answers"] = sorted(pair["answers"], key=lambda x: x["score"], reverse=True)
answers = "\nA: ".join([ cleanhtml(x["text"]) for x in pair["answers"]])
text = f"Q: { cleanhtml(pair['question']['text'])}\nA: {answers}"
else:
text = f"Q: { cleanhtml(pair['question']['text'])}"
return {
"text": text,
"meta": {
"language": lang_id.predict_lang(text),
"url": f"https://{site_name}/questions/{pair['question']['id']}",
"timestamp": "2023-03-29",
"source": "stackexchange",
"question_score": pair["question"]["score"],
}
}
# load qa_pairs
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs"))]
# if needed:
# sort sites such that stackoverflow is processed first - to understand the memory pressure
# if OOM -> split stackoverflow into multiple files
# this won't hurt the completeness of the data, as each line is self-contained
for site in sites:
print(f"Processing {site}")
results = []
site_name = site.removesuffix(".jsonl")
if "stackoverflow_part" in site_name:
site_name = "stackoverflow.com"
# load qa_pairs
with open(os.path.join(LEMMA_DATA_DIR_SE, "qa_pairs", site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
# process html to text
with Pool(24) as p:
results = p.map(process_qa_pair, qa_pairs)
print(f"Writing {len(results)} results to {os.path.join(LEMMA_DATA_DIR_SE_OUT, site)}")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "w") as f:
for result in results:
f.write(json.dumps(result) + "\n")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/post_processing.py
|
import os
import json
from tqdm import tqdm
import xml.etree.ElementTree as ET
LEMMA_DATA_DIR_SE = os.environ.get("LEMMA_DATA_DIR_SE", "./data/stack_exchange/")
def get_sites_count(path=LEMMA_DATA_DIR_SE):
sites = os.listdir(path)
sites = [x for x in sites if x.endswith(".xml")]
counts = {}
for site in tqdm(sites):
if site == ".DS_Store":
continue
# read the file
with open(os.path.join(path, site), "r") as f:
# read # lines
count = sum(1 for line in f)
counts[site] = count-3 # subtract the header
# sort the counts
counts = {k: v for k, v in sorted(counts.items(), key=lambda item: item[1], reverse=True)}
return counts
if __name__ == "__main__":
counts = get_sites_count()
'''
print a table of the counts
'''
print("|Idx|Site|Count|")
print("|---|---|---|")
# take the first 28 sites
for idx, (site, count) in enumerate(counts.items()):
if idx < 28:
print(f"|{idx}|{site}|{count}|")
# write to file
with open(os.path.join(LEMMA_DATA_DIR_SE, "counts.json"), "w") as f:
json.dump(counts, f)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/stack_exchange/count.py
|
import argparse
from datasets import load_dataset
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default=None,
help="Path to the wikipedia data directory.")
args = parser.parse_args()
LANGUAGES = [
"bg", "ca", "cs", "da", "de", "en", "es", "fr", "hr", "hu",
"it", "nl", "pl", "pt", "ro", "ru", "sl", "sr", "sv", "uk"
]
DUMP_DATE = "20230320"
def get_data(lan, date, data_dir: pathlib.Path):
wiki_dataset = load_dataset(
"wikipedia", language=lan, date=date, beam_runner="DirectRunner"
)
for split, dataset in wiki_dataset.items():
tgt_fp = data_dir / f"wiki_{lan}_{date}_{split}.jsonl"
dataset.to_json(tgt_fp)
print("Finished Downloading %s %s. There are total %d pages." % (
lan, date, len(dataset["id"])))
if __name__ == "__main__":
if args.data_dir is None:
raise ValueError("missing arg --data_dir.")
for lang in LANGUAGES:
get_data(lang, DUMP_DATE, data_dir=pathlib.Path(args.data_dir))
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/wiki/download.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/wiki/__init__.py
|
|
import os
import json
from multiprocessing import Pool
from transformers import AutoTokenizer
print("start loading!")
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
print("end loading!")
def get_token_count(qa_pair):
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = "./data/wikipedia/"
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} wikipedia pages for {site}.")
token_count = 0
with Pool(100) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", site), "w") as f:
json.dump(token_counts, f)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/wiki/token_count.py
|
import os
import json
LEMMA_DATA_DIR_SE_OUT = "./data/wikipedia/"
LEMMA_DATA_SAVE_DIR = "./data/wikipedia/wiki-full.jsonl"
files = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
files.sort()
with open(LEMMA_DATA_SAVE_DIR, "w") as fw:
for file in files:
lan = file.split("_")[1]
date = file.split("_")[2]
print("Now proceeding %s"%file, lan, date)
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, file), "r") as f:
lines = f.readlines()
for line in lines:
now = json.loads(line)
new = {"text": now["text"], "meta": {"title": now["title"], "url": now["url"], "language": lan, "timestamp": date}}
fw.write(json.dumps(new) + "\n")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/wiki/convert_format.py
|
import argparse
import hashlib
import gzip
import json
import re
import uuid
from datetime import datetime
from typing import Dict, Union
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default=None)
parser.add_argument('--target_dir', type=str,
default="./data/github/processed")
args = parser.parse_args()
# Regex to strip repated copyright comment blocks
CPAT = re.compile("copyright", re.IGNORECASE)
PAT = re.compile("/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/")
def get_timestamp() -> str:
return datetime.now().isoformat()
def clean_copyright_comments(content: str):
r = PAT.search(content)
if r:
# found one, now see if it contains "copyright", if so strip it
span = r.span()
sub = content[span[0]:span[1]]
if CPAT.search(sub):
# cut it
content = content[: span[0]] + content[span[1]:]
return content
lines = content.split('\n')
skip = 0
# Greedy replace any file that begins with comment block, most
# are copyright headers
for k in range(len(lines)):
if (
lines[k].startswith("//") or
lines[k].startswith("#") or
lines[k].startswith("--") or
not lines[k]
):
skip = skip + 1
else:
break
if skip:
# we skipped, consume it
content = "\n".join(lines[skip:])
return content
def get_filecontent_stats(content: str) -> Dict[str, Union[int, str]]:
# split content into lines and get line lengths
line_lengths = list(map(len, content.splitlines()))
if len(line_lengths) == 0:
return {
"line_count": 0,
"max_line_length": 0,
"avg_line_length": 0,
"alnum_prop": 0,
}
# get max line length
max_length = max(line_lengths)
# get average line length
avg_length = len(content) / len(line_lengths)
# get proportion of alphanumeric characters
alnum_count = sum(map(lambda char: 1 if char.isalnum() else 0, content))
alnum_prop = alnum_count / len(content)
return {
"line_count": len(line_lengths),
"max_line_length": max_length,
"avg_line_length": avg_length,
"alnum_prop": alnum_prop,
}
def preprocess_source(source_fp: pathlib.Path, hash_table: dict):
chunk_stats = []
cleaned_records = []
with gzip.open(source_fp, mode="rt", encoding="utf-8") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
result = json.loads(jstr)
# skip pub/key certfiicates
if result['path'].endswith(".crt"):
continue
if result['path'] == "LICENSE":
continue
# comptue hash of content
digest = hashlib.md5(result['content'].encode('utf8')).hexdigest()
# skip if we've seen this before
if digest in hash_table:
continue
# add to hash table
hash_table[digest] = 1
# look for C style multi line comment blocks
try:
content = clean_copyright_comments(result['content'])
except Exception as e:
print(f"[{get_timestamp()}][ERROR] "
f"fp={source_fp}; "
f"Error cleaning copyright comments: {e}")
continue
# get file content stats (line count, max line length, avg line
# length)
try:
file_stats = get_filecontent_stats(content)
except Exception as e:
print(f"[{get_timestamp()}][ERROR] "
f"fp={source_fp}; "
f"Error getting file stats: {e}")
continue
# add hash to file stats for later deduplication
file_stats["content_hash"] = digest
file_stats["path"] = result.get('path', "")
chunk_stats.append(file_stats)
# bring result into the right format
record = {
"text": content,
"meta": {
"content_hash": digest,
"timestamp": "",
"source": "github",
"line_count": file_stats["line_count"],
"max_line_length": file_stats["max_line_length"],
"avg_line_length": file_stats["avg_line_length"],
"alnum_prop": file_stats["alnum_prop"],
**{
k: v for k, v in result.items() if k != "content"
}
}
}
cleaned_records.append(record)
return chunk_stats, cleaned_records
def main():
flush_every = 20
run_id = uuid.uuid4().hex
run_fp = pathlib.Path(args.target_dir) / f"run_{run_id}.jsonl"
stats_fp = pathlib.Path(args.target_dir) / f"stats_{run_id}.jsonl"
print(f"[{get_timestamp()}][INFO] Writing records to {run_fp}")
print(f"[{get_timestamp()}][INFO] Writing stats to {stats_fp}")
stats_file = open(stats_fp, "w")
records_file = open(run_fp, "w")
# process list of *.gz files in input_file
with open(args.input, "r") as input_file:
files_to_process = input_file.readlines()
total_files_to_process = len(files_to_process)
hash_table = {}
for file_num, fp in enumerate(files_to_process, start=1):
fp = fp.strip()
if not fp:
print(f"[{get_timestamp()}][WARNING]"
f"[{file_num}/{total_files_to_process}] "
f"Skipping empty line {fp}")
continue
if not fp.endswith(".gz"):
print(f"[{get_timestamp()}][WARNING]"
f"[{file_num}/{total_files_to_process}] "
f"Skipping {fp}")
continue
source_fp = pathlib.Path(fp)
print(f"[{get_timestamp()}][INFO]"
f"[{file_num}/{total_files_to_process}] "
f"Processing {fp}")
# get file stats and clean records
chunk_stats, cleaned_records = preprocess_source(
source_fp, hash_table
)
# write out stats
for stats in chunk_stats:
stats_file.write(json.dumps(stats) + "\n")
# write out cleaned records
for record in cleaned_records:
records_file.write(json.dumps(record) + "\n")
if file_num % flush_every == 0:
# make sure data is written to disk
print(f"[{get_timestamp()}][INFO] Flushing ...")
stats_file.flush()
records_file.flush()
stats_file.close()
records_file.close()
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/github_clean_dedup_local.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/__init__.py
|
|
import argparse
from datetime import datetime
import json
import multiprocessing as mp
import os
import gzip
from transformers import AutoTokenizer
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, default=None)
parser.add_argument('--target_dir', type=str, default=None)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-6.9b-deduped")
extensions_whitelist = (".asm", ".bat", ".cmd", ".c", ".h", ".cs", ".cpp",
".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H",
".cmake", ".css", ".dockerfile", ".f90", ".f", ".f03",
".f08", ".f77", ".f95", ".for", ".fpp", ".go", ".hs",
".html", ".java", ".js", ".jl", ".lua", ".md",
".markdown", ".php", ".php3", ".php4", ".php5",
".phps", ".phpt", ".pl", ".pm", ".pod", ".perl",
".ps1", ".psd1", ".psm1", ".py", ".rb", ".rs", ".sql",
".scala", ".sh", ".bash", ".command", ".zsh", ".ts",
".tsx", ".tex", ".vb", "Dockerfile", "Makefile",
".xml", ".rst", ".m", ".smali")
def get_token_count(text):
token_count = len(tokenizer.tokenize(text))
return token_count
def get_timestamp() -> str:
return datetime.now().isoformat()
def discard_record(record):
""" return True if we discard the record """
text = record["text"]
metadata = record["meta"]
# discard empty records
if len(text) == 0:
return True
# discard all records that are not whitelisted
if not metadata["path"].endswith(extensions_whitelist):
return True
# discard files whose maximum line length is greater than 1000
if metadata["max_line_length"] > 1000:
return True
# discard files whose average line length is greater than 100
if metadata["avg_line_length"] > 100:
return True
# discard files whose proportion of alphanumeric characters is less than
# 0.25
if metadata["alnum_prop"] < 0.25:
return True
num_tokens = get_token_count(text)
num_alpha = len([c for c in text if c.isalpha()])
if num_alpha / num_tokens < 1.5:
return True
return False
def filter_line(line):
try:
record = json.loads(line)
except json.decoder.JSONDecodeError:
return None
if discard_record(record):
return None
return line
def process_lines_batch(lines_batch, out_file, num_cpus):
if len(lines_batch) == 0:
return
with mp.Pool(processes=num_cpus - 1) as pool:
filtered_lines = pool.map(filter_line, lines_batch)
for line in filtered_lines:
if line is not None:
out_file.write(line)
out_file.flush()
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
batch_size = num_cpus * 5_000
input_fp = pathlib.Path(args.data_file)
target_dir = pathlib.Path(args.target_dir)
output_fp = target_dir / input_fp.name.replace("deduped_", "filtered_")
output_fp = output_fp.with_suffix(".jsonl.gz")
print(f"[{get_timestamp()}][INFO] Processing {input_fp}")
print(f"[{get_timestamp()}][INFO] Writing to {output_fp}")
out_file = gzip.open(output_fp, "wt", encoding="utf-8")
try:
with open(input_fp, "r") as in_file:
while True:
lines_batch = []
# accumulate batch
while True:
line = in_file.readline()
if not line:
raise StopIteration
lines_batch.append(line)
if len(lines_batch) == batch_size:
break
process_lines_batch(lines_batch, out_file, num_cpus)
except StopIteration:
process_lines_batch(lines_batch, out_file, num_cpus)
out_file.close()
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/github_run_filter.py
|
import argparse
import os
from transformers import AutoTokenizer
import json
import multiprocessing as mp
import pathlib
from datetime import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--data_file', type=str, default=None)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-6.9b-deduped")
FRACTION = 0.1
def get_timestamp() -> str:
return datetime.now().isoformat()
def get_token_count(text):
return len(tokenizer.tokenize(text))
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
data_fp = pathlib.Path(args.data_file)
# get total number of records in file
print(f"[{get_timestamp()}][INFO] Counting records in {data_fp} ...")
with open(data_fp, "r") as f:
num_records = sum(1 for _ in f)
print(f"[{get_timestamp()}][INFO] Found {num_records} records.")
print(f"[{get_timestamp()}][INFO] Loading data...")
with open(data_fp, "r") as f:
# get a batch of records
records = []
for _ in range(int(num_records * FRACTION)):
line = f.readline()
if not line:
break
try:
record = json.loads(line)
except json.decoder.JSONDecodeError:
continue
records.append(record["text"])
print(f"[{get_timestamp()}][INFO] Start token count...")
# count tokens in records
with mp.Pool(num_cpus) as pool:
token_counts = pool.map(get_token_count, records)
total_token_count = sum(token_counts)
result = {
"total_token_count": total_token_count,
"sampling_fraction": FRACTION,
"total_count_estimate": total_token_count / FRACTION
}
out_fp = data_fp.parent / \
f"{data_fp.stem.replace('deduped', 'token_count')}.json"
with open(out_fp, mode="w") as out:
out.write(json.dumps(result))
print(json.dumps(result, indent=4))
print(f"[{get_timestamp()}][INFO] Result written to {out_fp}.")
print(f"[{get_timestamp()}][INFO] Done.")
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/github_token_count.py
|
import argparse
import json
from datetime import datetime
from typing import Dict
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--first_step_dir', type=str, default=None)
parser.add_argument('--target_dir', type=str, default=None)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def process_stats_file(source_fp: pathlib.Path, hash_table: Dict[str, str]):
deduped_stats = []
deduped_hashes = []
with open(source_fp, mode="r") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
record_stats = json.loads(jstr)
content_hash = record_stats["content_hash"]
if content_hash in hash_table:
# skip this record since it's a duplicate
continue
hash_table[content_hash] = content_hash
deduped_stats.append(record_stats)
deduped_hashes.append(content_hash)
return hash_table, deduped_stats, deduped_hashes
def main():
first_step_dir = pathlib.Path(args.first_step_dir)
deduped_stats_fp = pathlib.Path(args.target_dir) / "stats_deduped.jsonl"
print(f"[{get_timestamp()}][INFO] Deduplicating "
f"records from {first_step_dir}")
# get list of stats files
stats_filepaths = list(first_step_dir.glob("stats_*.jsonl"))
total_files_to_process = len(stats_filepaths)
deduped_stats_file = open(deduped_stats_fp, "w")
hash_set = {}
for file_num, fp in enumerate(stats_filepaths, start=1):
print(f"[{get_timestamp()}][INFO]"
f"[{file_num}/{total_files_to_process}] "
f"Processing {fp}")
hash_set, deduped_stats, deduped_hashes = process_stats_file(
fp, hash_set
)
# write out stats
for stats in deduped_stats:
deduped_stats_file.write(json.dumps(stats) + "\n")
# write out jsonl to hashes
out_fn = fp.name.replace("stats_", "hashes_")
with open(pathlib.Path(args.target_dir) / out_fn, "w") as f:
f.write(json.dumps({"hashes": deduped_hashes}) + "\n")
print(f"[{get_timestamp()}][INFO] Flushing ...")
deduped_stats_file.flush()
deduped_stats_file.close()
print(f"[{get_timestamp()}][INFO] "
f"Total number of unique records: {len(hash_set)}")
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/github_global_dedup.py
|
import argparse
import json
from datetime import datetime
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument(
'--first_step_dir', type=str,
default="./data/github/processed_v3"
)
parser.add_argument(
'--input', type=str,
default="data/github/processed_v3/run_ce60fbbc14684ed8b659054801e419c8.jsonl"
)
parser.add_argument(
'--target_dir', type=str,
default="./data/github/processed_v3_deduped"
)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def main():
input_fp = pathlib.Path(args.input)
target_dir = pathlib.Path(args.target_dir)
output_fp = target_dir / input_fp.name.replace("run_", "deduped_")
# load hashes into memory
hashes_fp = target_dir / input_fp.name.replace("run_", "hashes_")
with open(hashes_fp) as hf:
globally_unique_hashes = hf.readlines()[0]
globally_unique_hashes = set(json.loads(globally_unique_hashes)["hashes"])
output_file = open(output_fp, "w")
print(f"[{get_timestamp()}][INFO]"
f" Processing {input_fp}")
print(f"[{get_timestamp()}][INFO]"
f" Writing to {output_fp}")
print(f"[{get_timestamp()}][INFO]"
f" Using hashes from {hashes_fp}")
nrecs = 0
with open(input_fp, "r") as in_file:
while True:
jstr = in_file.readline()
if not jstr:
break
record = json.loads(jstr)
content_hash = record["meta"]["content_hash"]
if content_hash not in globally_unique_hashes:
continue
# write to output file
output_file.write(json.dumps(record) + "\n")
nrecs += 1
output_file.close()
print(f"[{get_timestamp()}][INFO]"
f" Processed {nrecs} records")
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/github/github_merge_dedup.py
|
from datasets import load_dataset
book_dataset = load_dataset("the_pile_books3")
for split, dataset in book_dataset.items():
dataset.to_json(f"./data/book/books3-{split}.jsonl")
pg19_dataset = load_dataset("pg19")
for split, dataset in pg19_dataset.items():
dataset.to_json(f"./data/book/pg19-{split}.jsonl")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/book/download.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/book/__init__.py
|
|
# Copyright 2023 Ontocord.ai, Together Computer, ETH Zürich, Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process, Queue
import pickle
import tarfile
import os
import re
from multiprocessing import Pool
from simhash import Simhash
import json
from datetime import datetime
width = 6
hash_k = 5
max_hash_len = 0
def get_features(s):
s = s.lower()
s = re.sub(r'[^\w]+', '', s)
return [s[i:i + width] for i in range(max(len(s) - width + 1, 1))]
def pg19_index(num):
hashes = []
members = []
print("Starting pg19_%0.3d"%(num), len(hashes))
with open("./data/book/split/pg19_%0.3d"%(num), "r") as f:
lines = f.readlines()
for idx, i in enumerate(lines):
if idx % 200 == 0:
print("This is pg19_%0.3d"%(num), idx)
member = json.loads(i)
try:
if max_hash_len == 0:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text']))))
else:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text'][:max_hash_len]))))
members.append(member)
except:
continue
print("Finishing pg19_%0.3d"%(num), len(hashes), len(members))
return (hashes, members)
def book_index(num):
hashes = []
members = []
print("Starting book_%0.3d"%(num), len(hashes))
with open("./data/book/split/books3_%0.3d"%(num), "r") as f:
lines = f.readlines()
for idx, i in enumerate(lines):
if idx % 200 == 0:
print("This is book_%0.3d"%(num), idx)
member = json.loads(i)
try:
if max_hash_len == 0:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text']))))
else:
hashes.append((str(idx + num * 2000), Simhash(get_features(member['text'][:max_hash_len]))))
members.append(member)
except:
continue
print("Finishing book_%0.3d"%(num), len(hashes), len(members))
return (hashes, members)
def get_pg19(njobs):
with Pool(n_jobs) as p:
hashes_members = p.map(pg19_index, [i for i in range(15)])
return hashes_members
def get_book(njobs):
with Pool(n_jobs) as p:
hashes_members = p.map(book_index, [i for i in range(99)])
return hashes_members
def split_list(list, n):
length = len(list)
return [list[i*length // n: (i+1)*length // n] for i in range(n)]
def find_match(args):
i, index = args
value_dict = {}
for item in i:
flag = 1
try:
now_list = index.get_near_dups(item[1])
for x in now_list:
if int(x) >= int(item[0]):
continue
flag = 0
break
value_dict[item[0]] = flag
except:
value_dict[item[0]] = flag
return value_dict
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-w', type=int, default=6, help='the window size')
parser.add_argument('-k', type=int, default=5, help='find K nearest region')
parser.add_argument('-l', type=int, default=0, help='the max length of the text for hashing, 0 means no limit')
parser.add_argument('-n', type=int, default=100, help='the number of processes to run')
args = parser.parse_args()
width = args.w
hash_k = args.k
max_hash_len = args.l
n_jobs = args.n
outfile = "./data/book/book.jsonl"
hashes_members = get_pg19(n_jobs)
hashes_members.extend(get_book(n_jobs))
print("Finish getting hashes and members!")
import itertools
hashes = list(itertools.chain(*[item[0] for item in hashes_members]))
import itertools
members = list(itertools.chain(*[item[1] for item in hashes_members]))
import re
from simhash import Simhash, SimhashIndex
index = SimhashIndex(hashes, k=hash_k)
print("Finish building index!")
from multiprocessing import Pool
n_hashes = split_list(hashes, n_jobs)
with Pool(n_jobs) as p:
temp_dict = p.map(find_match, [(i, index) for i in n_hashes])
value_dict = {}
for dict in temp_dict:
for i in dict:
value_dict[i] = dict[i]
print("Finish finding matches!")
mem_hashes = list(zip(members, hashes))
with open(outfile, 'w') as f:
for mem, a_hash in mem_hashes:
if value_dict[a_hash[0]] == 1:
meta = {}
for feature in mem:
if feature != "text":
meta[feature] = mem[feature]
new = {"meta": meta, "text": mem["text"]}
f.write(json.dumps(new) + '\n')
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/book/dedup.py
|
import os
import json
from multiprocessing import Pool
from transformers import AutoTokenizer
enc = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
def get_token_count(qa_pair):
return len(enc.tokenize(qa_pair['text']))
LEMMA_DATA_DIR_SE_OUT = "./data/book/"
sites = [x for x in os.listdir(os.path.join(LEMMA_DATA_DIR_SE_OUT)) if os.path.isfile(os.path.join(LEMMA_DATA_DIR_SE_OUT, x))]
sites.sort()
os.makedirs(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts"), exist_ok=True)
token_counts = {}
for site in sites:
print(f"[INFO] Processing {site}...")
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, site), "r") as f:
qa_pairs = [json.loads(x) for x in f.readlines()]
print(f"[INFO] Got {len(qa_pairs)} books for {site}.")
token_count = 0
with Pool(100) as p:
token_count = sum(p.map(get_token_count, qa_pairs))
token_counts[site] = token_count
print(f"[INFO] Got {token_count} tokens for {site}.")
summ = 0
for i in token_counts:
print(f"{i}: {token_counts[i]}")
summ += token_counts[i]
print("Total: ", summ)
with open(os.path.join(LEMMA_DATA_DIR_SE_OUT, "token_counts", site), "w") as f:
json.dump(token_counts, f)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/book/token_count.py
|
import argparse
from datetime import datetime
import json
import gzip
import os
import pathlib
import joblib
from joblib import Parallel, delayed
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/c4/en")
parser.add_argument('--output_dir', type=str, default="./data/c4/processed_en")
parser.add_argument('--max_files', type=int, default=-1)
args = parser.parse_args()
def get_timestamp() -> str:
return datetime.now().isoformat()
def process_record(record):
return {
"text": record["text"],
"meta": {
"timestamp": record["timestamp"],
"url": record["url"],
"language": "en",
"source": "c4"
}
}
def process_file(fp):
print(f"[{get_timestamp()}][INFO] start processing {fp}...")
out_dir = pathlib.Path(args.output_dir)
out_fp = out_dir / fp.with_suffix("").name.replace("json", "jsonl")
with gzip.open(fp, "r") as in_f:
records = [json.loads(line) for line in in_f.readlines()]
with open(out_fp, "w") as out_f:
for record in records:
record = process_record(record)
if record is not None:
out_f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] done processing {fp}...")
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", joblib.cpu_count()))
print(f"Using {num_cpus} processes")
out_dir = pathlib.Path(args.output_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
records_files = list(pathlib.Path(args.data_dir).glob("*.json.gz"))
if args.max_files > 0:
records_files = records_files[:args.max_files]
Parallel(n_jobs=num_cpus)(
delayed(process_file)(fp) for fp in records_files
)
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/c4/c4_reformat.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/c4/__init__.py
|
|
import argparse
import boto3
from botocore.exceptions import ClientError
import configparser
import itertools
import numpy as np
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--aws_config', type=str, help='aws config file')
parser.add_argument('--target_dir', type=str, default="./data/arxiv")
parser.add_argument('--workers', type=int, default=1)
parser.add_argument('--input', type=str,
help='input file from which to read keys. '
'This is only used when running on slurm.')
parser.add_argument('--local', action='store_true')
parser.add_argument('--setup', action='store_true',
help='if set, we partition the keys into chunks.')
parser.add_argument('--max_files', type=int, default=-1,
help='max files to download, useful for testing')
args = parser.parse_args()
class ArxivDownloader:
def __init__(self, config_file: str):
# import configs from config file
configs = configparser.SafeConfigParser()
configs.read(config_file)
# Create S3 resource & set configs
self.s3resource = boto3.resource(
's3', # the AWS resource we want to use
aws_access_key_id=configs['DEFAULT']['ACCESS_KEY'],
aws_secret_access_key=configs['DEFAULT']['SECRET_KEY'],
region_name='us-east-1' # same region arxiv bucket is in
)
def run(self, input_file: str, tgt_dir: pathlib.Path, max_files=-1):
(tgt_dir / 'src').mkdir(exist_ok=True, parents=True)
with open(input_file, 'r') as f:
file_keys = f.readlines()
files_downloaded = 0
for key in file_keys:
self.__download_file(tgt_dir=tgt_dir, key=key.strip())
files_downloaded += 1
if files_downloaded >= max_files > 0:
break
def __download_file(self, key, tgt_dir: pathlib.Path):
print('\nDownloading s3://arxiv/{} t'
'o {}...'.format(key, pathlib.Path(tgt_dir, key)))
try:
self.s3resource.meta.client.download_file(
Bucket='arxiv',
Key=key,
Filename=pathlib.Path(tgt_dir, key),
ExtraArgs={'RequestPayer': 'requester'})
except ClientError as e:
if e.response['Error']['Code'] == "404":
print('ERROR: ' + key + " does not exist in arxiv bucket")
else:
try:
code = e.response['Error']['Code']
msg = e.response['Error']['Message']
print(f"UNKNOWN ERROR: code={code}; msg={msg}")
except Exception as e:
print("UNKNOWN ERROR for key ", key, e)
def partition_keys(
partitions_dir: pathlib.Path, config_file: str, workers: int
):
r"""Partitions the keys of the arxiv bucket into chunks for parallel
download.
@param partitions_dir: the directory to save the partition files to (will be
created if it doesn't exist)
@param config_file: the path to the config file containing the aws
credentials
@param workers: the number of workers to partition the keys into
"""
partitions_dir = pathlib.Path(partitions_dir).absolute()
partitions_dir.mkdir(parents=True, exist_ok=True)
# Securely import configs from private config file
configs = configparser.SafeConfigParser()
configs.read(config_file)
# Create S3 resource & set configs
print('Connecting to Amazon S3...')
s3resource = boto3.resource(
's3', # the AWS resource we want to use
aws_access_key_id=configs['DEFAULT']['ACCESS_KEY'],
aws_secret_access_key=configs['DEFAULT']['SECRET_KEY'],
region_name='us-east-1' # same region arxiv bucket is in
)
# Create a reusable Paginator
paginator = s3resource.meta.client.get_paginator('list_objects_v2')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(
Bucket='arxiv',
RequestPayer='requester',
Prefix='src/'
)
# partition keys into chunks
file_parts = np.array_split(list(
itertools.chain(
*[
[
file['Key'] for file in page['Contents']
if file['Key'].endswith(".tar")
]
for page in page_iterator
]
)),
indices_or_sections=workers
)
# save chunks to disk as text files
for i, part in enumerate(file_parts):
part_fp = partitions_dir / f"part_{i}.txt"
with open(part_fp, "w") as f:
f.write("\n".join(part))
print(f"Created partition {part_fp}.")
def run_download(
input_file: str,
target_dir: pathlib.Path,
max_files: int,
aws_config: str
):
# create downloader
arxiv_downloader = ArxivDownloader(config_file=aws_config)
# run download
arxiv_downloader.run(
input_file=input_file,
tgt_dir=target_dir,
max_files=max_files
)
def main():
if not args.local and not args.setup:
# here we only download the files; this requires that setup has already
# been run
run_download(input_file=args.input,
target_dir=pathlib.Path(args.target_dir),
max_files=args.max_files,
aws_config=args.aws_config)
return
# create directories
target_dir = pathlib.Path(args.target_dir)
partitions_dir = target_dir / 'partitions'
if args.setup:
# here we only partition the keys into chunks; no download yet
partition_keys(partitions_dir=partitions_dir,
config_file=args.aws_config,
workers=args.workers)
return
if args.local:
partition_keys(partitions_dir=partitions_dir,
config_file=args.aws_config,
workers=args.workers)
run_download(input_file=str(partitions_dir / 'part_0.txt'),
target_dir=pathlib.Path(args.target_dir),
max_files=args.max_files,
aws_config=args.aws_config)
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/arxiv/run_download.py
|
import concurrent.futures
from datetime import datetime
import fasttext
import json
import pathlib
import tarfile
from typing import List, Tuple, Dict, Union
import gzip
import tempfile
import uuid
import re
from utils import predict_lang, get_timestamp, format_arxiv_id
# suppress fasttext warning
fasttext.FastText.eprint = lambda x: None
# constants
ARXIV_URL = "https://arxiv.org/abs/"
FT_MODEL_PATH = "models/lid.176.bin"
class ArxivCleaner:
r""" Class for cleaning raw arxiv data. """
def __init__(
self,
data_dir: pathlib.Path,
work_dir: pathlib.Path,
target_dir: pathlib.Path,
worker_id: str = None
):
self._data_dir = data_dir
self._work_dir = work_dir
self._target_dir = target_dir
self._worker_id = worker_id if worker_id else str(uuid.uuid4())
# make sure dirs exist
for d in [self._work_dir, self._target_dir]:
if not d.exists():
d.mkdir(parents=True)
def run_parallel(
self, max_files: int = None, workers: int = None,
tar_fp_list: List[str] = None
):
r""" function to run the cleaning process in parallel. This function
will iterate over all arxiv projects and clean the tex files. The
cleaned tex files are then written to a jsonl file.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed. This is useful for testing.
@param workers: number of workers to use, defaults to None which means
that all cores are used.
@param tar_fp_list: list of tars to process. Defaults to None which
means that all files in data_dir are processed.
"""
out_file = self._target_dir / f"arxiv_{self._worker_id}.jsonl"
with open(out_file, "w") as f:
with concurrent.futures.ProcessPoolExecutor(workers) as executor:
for record, arxiv_id in executor.map(
create_record_single_arg,
self.arxiv_iterator(
max_files=max_files, tar_fp_list=tar_fp_list
)
):
if record is None:
print(f"[{get_timestamp()}][ERROR] "
f"failed to process {arxiv_id}")
continue
if len(record["text"]) == 0:
print(f"[{get_timestamp()}][WARNING] "
f"empty text for {arxiv_id}")
continue
f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] "
f"processed {arxiv_id}")
executor.shutdown(wait=True)
def run(self, max_files: int = -1, out_fname: str = "arxiv.jsonl"):
r""" function to run the cleaning process. This function will iterate
over all arxiv projects and clean the tex files. The cleaned tex files
are then written to a jsonl file.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed. This is useful for testing.
@param out_fname: name of the output file, defaults to "arxiv.jsonl"
"""
with open(self._target_dir / out_fname, "w") as f:
for tex_files, yymm, arxiv_id, timestamp in self.arxiv_iterator(
max_files=max_files
):
record, arxiv_id = create_record(
tex_files=tex_files,
yymm=yymm,
arxiv_id=arxiv_id,
timestamp=timestamp
)
if record is None:
print(f"[{get_timestamp()}][ERROR] "
f"failed to process {arxiv_id}")
continue
if len(record["text"]) == 0:
print(f"[{get_timestamp()}][WARNING] "
f"empty text for {arxiv_id}")
continue
f.write(json.dumps(record) + "\n")
print(f"[{get_timestamp()}][INFO] "
f"processed {arxiv_id}")
def arxiv_iterator(
self, max_files: int = -1, tar_fp_list: List[str] = None
):
r""" iterator over arxiv shards. Each shard contains tex projects or
files that are compressed using gzip. This function will extract the
tex files and yield them together with yymm, the raw arxiv id and the
timestamp of the project.
@param max_files: maximum number of files to process, defaults to -1
which means all files are processed.
@param tar_fp_list: optional list of tar files to process, defaults to
None. In this case all tar files in data_dir are processed.
@return: iterator over tex files, yymm, arxiv id and timestamp.
"""
if tar_fp_list is None:
def _tar_fp_iterator():
for _tar_fp in self._data_dir.glob("*.tar"):
yield _tar_fp
else:
def _tar_fp_iterator():
for _tar_fp in tar_fp_list:
yield _tar_fp
failed = 0
processed = 0
for tar_fp in _tar_fp_iterator():
print(f"[{get_timestamp()}][INFO] start processing {tar_fp}")
with tempfile.TemporaryDirectory(dir=self._work_dir) as tmpdir:
with tarfile.open(tar_fp) as tf:
tf.extractall(members=tf.getmembers(), path=tmpdir)
for proj_dir_or_file in pathlib.Path(tmpdir).rglob("*.gz"):
# get arxiv id and month from the filename
yymm = proj_dir_or_file.parent.stem
arxiv_id = proj_dir_or_file.stem
# load the tex source files (we also get the timestamp
# here)
data = _tex_proj_loader(proj_dir_or_file)
if data is None:
failed += 1
continue
tex_files, timestamp = data
processed += 1
if processed > max_files > 0:
break
yield tex_files, yymm, arxiv_id, timestamp
else:
continue
break
print(f"[{get_timestamp()}][INFO] # Failed loading : {failed}")
print(f"[{get_timestamp()}][INFO] done.")
def create_record_single_arg(args):
r""" convenience function to create a record from a single argument. """
return create_record(*args)
def create_record(
tex_files: List[str],
yymm: str,
arxiv_id: str,
timestamp: float
) -> Tuple[Union[Dict[str, Union[str, Dict[str, str]]], str, None], str]:
r""" function to create a record from the tex files, yymm, arxiv id and
timestamp. The function also detects the language of the tex files using a
fasttext model.
@param tex_files: list of tex file contents as strings
@param yymm: yymm of the arxiv project
@param arxiv_id: raw arxiv id
@param timestamp: timestamp of the arxiv project
@return: dictionary containing the cleaned tex text and metadata
"""
# clean tex files
try:
cleaned_str = clean_tex_files(tex_files)
except Exception as e:
return None, arxiv_id
if len(cleaned_str) == 0:
return {"text": "", "meta": {}}, arxiv_id
# get the arxiv id in the correct format
try:
clean_arxiv_id = format_arxiv_id(arxiv_id)
except Exception as e:
print(f"[WARNING] failed to format arxiv id {arxiv_id}; excpetion={e}")
clean_arxiv_id = arxiv_id
# detect language
ft_model = fasttext.load_model(path=str(FT_MODEL_PATH))
lang, _ = predict_lang(text=cleaned_str, lang_model=ft_model, k=1)
try:
lang = lang[0]
except IndexError:
lang = "unknown"
if timestamp is not None:
timestamp = datetime.fromtimestamp(timestamp).isoformat()
return (
{
"text": cleaned_str,
"meta": {
"timestamp": timestamp,
"yymm": yymm,
"arxiv_id": clean_arxiv_id,
"language": lang,
"url": f"{ARXIV_URL}{clean_arxiv_id}",
"source": "arxiv"
}
},
clean_arxiv_id
)
def _tex_proj_loader(
file_or_dir_path: pathlib.Path
) -> Union[Tuple[List[str], float], None]:
r""" function to load the tex files from a tar file or a gzip file. The
function will return a tuple containing a list of tex files and the
timestamp of the project.
@param file_or_dir_path: path to the tar file or the gzip file
@return: tuple containing a list of tex files and the timestamp of the
project
"""
files_and_content = []
timestamp = file_or_dir_path.lstat().st_mtime
try:
# if it is a directory, open it as a tarfile
with tarfile.open(file_or_dir_path) as sub_tf:
for member in sub_tf.getmembers():
if member.name.endswith(".tex"):
file_content = sub_tf.extractfile(member).read()
try:
file_content = file_content.decode("utf-8")
except UnicodeDecodeError:
print(f"[{get_timestamp()}][ERROR] "
f"UnicodeDecodeError: {file_or_dir_path}")
return None
files_and_content.append(file_content)
except tarfile.ReadError:
# otherwise we try opening it as a gzip file
try:
with gzip.open(file_or_dir_path, "rb") as gz:
file_content = gz.read()
except Exception as e:
# all fails, we skip this file
print(f"[ERROR] {e}: {file_or_dir_path}")
return None
try:
file_content = file_content.decode("utf-8")
except UnicodeDecodeError:
print(f"[{get_timestamp()}][ERROR] "
f"UnicodeDecodeError: {file_or_dir_path}")
return None
files_and_content.append(file_content)
except Exception as e:
print(f"[ERROR] {e}: {file_or_dir_path}")
return None
return files_and_content, timestamp
def clean_tex_files(tex_files: List[str]) -> str:
r""" function takes a list of tex files and returns a cleaned version of
the tex project. The cleaned version is a concatenation of the tex files
with the following modifications:
- if multiple latex files, then concatenate them
- remove all comments (i.e. all lines starting with %)
- remove everything before the first \section header
- remove everything after the first occurrence of either \appendix or
\bibliography
- inline-expand definitions and macros
@param tex_files: list of file_content strings
@return: cleaned tex project as a string, empty string if no tex files are
provided
"""
if len(tex_files) == 0:
return ""
# build dictionaries that contain the definitions of all macros in all tex
# files. This is later used to expand all macros used in the text with
# their definitions, so that consistency among different authors is
# ensured.
non_arg_macros = {}
for file_content in tex_files:
non_arg_macros.update(_build_non_arg_macros_dict(file_content))
# TODO: macros that take arguments are not supported yet
arg_macros = {}
# join multiple latex files with a newline character
cleaned_latex_file_str = "\n".join(
_clean_tex_file(
file_content=file_content,
arg_macros=arg_macros,
non_arg_macros=non_arg_macros
)
for file_content in tex_files
)
return cleaned_latex_file_str
def _clean_tex_file(
file_content: str, arg_macros: Dict, non_arg_macros: Dict
) -> str:
r""" function takes a tex file as input and returns a cleaned version. The
cleaned version is a concatenation of the tex files with the
following modifications:
- remove all comments (i.e. all lines starting with %)
- remove everything before the first section-like header
- remove everything after the first occurrence of either \appendix or
\bibliography
- inline-expand definitions and macros
@param file_content: the content of the tex file as a string.
@return: cleaned tex file as a string
"""
# find the first occurence of a \section-like header and replace everything
# before it with an empty string. This matches the following pattern:
# \<section-type>[optional-args]{name}
pattern = r"^(.*?)("
pattern += r"\\\bchapter\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bpart\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bsubsubsection\b\*?(?:\[(.*?)\])?\{(.*?)\}|"
pattern += r"\\\bparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}"
pattern += r"\\\bsubparagraph\b\*?(?:\[(.*?)\])?\{(.*?)\}"
pattern += r")"
# if no section like header is found, then we return an empty string
if not re.search(pattern, file_content, flags=re.DOTALL):
return ""
# replace everything with the second group of the match (i.e. everything
# after and including the section header)
file_content = re.sub(
pattern=pattern,
repl=r"\2",
string=file_content,
flags=re.DOTALL # make sure that the dot matches also newlines
)
# remove all line comments
file_content = re.sub(
pattern=r"(?m)^%.*\n?",
repl=r"",
string=file_content,
flags=re.MULTILINE
)
# remove all in comments within a line
file_content = re.sub(
# pattern matches a "%" that is not preceded by a backslash (=comment)
pattern=r"[^\\]%.+$",
repl=r"",
string=file_content,
flags=re.MULTILINE
)
# find the first occurence of either \appendix or \bibliography and
# replace everything after it with an empty string
pattern = r"("
pattern += r"\\appendix|"
pattern += r"\\begin\{references\}|"
pattern += r"\\begin\{REFERENCES\}|"
pattern += r"\\begin\{thebibliography\}|"
pattern += r"\\bibliography\{.*\}"
pattern += r").*$"
file_content = re.sub(
pattern=pattern,
repl=r'',
string=file_content,
flags=re.DOTALL # make sure that the dot matches also newlines
)
# inline-expand all non-arg macros
for macro_name, macro_value in non_arg_macros.items():
file_content = re.sub(
# make pattern grouped to make sure that the macro is not part
# of a longer alphanumeric word
pattern=r"(" + macro_name + r")" + r"([^a-zA-Z0-9])",
# replace the macro with its value and add back the character that
# was matched after the macro
repl=macro_value + r"\2",
string=file_content
)
# inline-expand all macros that use args
# TODO: inline-expand macros with args
for macro_name, macro_value in arg_macros.items():
pass
return file_content
def _build_non_arg_macros_dict(file_content: str) -> Dict[str, str]:
r""" function takes the content of a tex file and returns a dictionary
that contains the definitions of all macros that do not use arguments.
The dictionary is of the form {macro_name: macro_value}.
@param file_content: the content of the tex file as a string.
@return: dict
"""
# regex for extracting \newcommand macros without arguments
non_arg_nc_reg = re.compile(
# this regex matches the following:
# \newcommand{\macro_name}{macro_value}
# \newcommand*{\macro_name}{macro_value}
# where macro_name is only allowed to contain letters and numbers;
# macro_value can contain any character.
pattern=r'\\\bnewcommand\b\*?\{(\\[a-zA-Z0-9]+?)\}\{(.*?)\}$',
flags=re.MULTILINE
)
# regex for extracting \def macros without arguments
non_arg_def_reg = re.compile(
# this regex matches the following:
# \def\macro_name{macro_value}
# where macro_name is only allowed to contain letters and numbers;
# macro_value can contain any character.
pattern=r'\\def\s*(\\[a-zA-Z0-9]+?)\s*\{(.*?)\}$',
flags=re.MULTILINE
)
# Extract all user-defined LaTeX macros from the preamble
macros = {}
for reg in [non_arg_nc_reg, non_arg_def_reg]:
for match in reg.finditer(file_content):
# convert the macro name and value to a raw string that can be
# used in re.sub
macro_name = match \
.group(1).encode("unicode-escape").decode("utf-8")
macro_val = match \
.group(2).encode("unicode-escape").decode("utf-8")
macros[macro_name] = macro_val
return macros
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/arxiv/arxiv_cleaner.py
|
from datetime import datetime
import fasttext
import re
from typing import List, Tuple
def get_timestamp() -> str:
return datetime.now().isoformat()
def predict_lang(
text: str, lang_model: fasttext.FastText._FastText, k=5
) -> Tuple[List[str], List[float]]:
r""" Predict top-k languages of text.
@param text: text to predict language of
@param lang_model: language model
@param k: number of predictions to return, defaults to 5
@return: list of predicted languages and list of corresponding
confidence scores
"""
# preprocess text
text = text.lower().replace("\n", " ").replace("\t", " ")
tags, confs = lang_model.predict(text, k=k)
# convert confs to float
confs = [float(conf) for conf in confs]
# convert lang codes to names
tags = [tag.replace("__label__", "") for tag in tags]
return tags, confs
def format_arxiv_id(arxiv_id: str) -> str:
r""" this function brings the raw arxiv-id into a format compliant with the
specification from arxiv. This is used to create the url to the arxiv
abstract page.
- Format prior to March 2007:
<archive>/YYMMNNN where N is a 3-digit number
- Format after March 2007: <archive>/YYMM.NNNNN where N is a 5 (or 6)-digit
number
References: https://info.arxiv.org/help/arxiv_identifier.html
@param arxiv_id: raw arxiv id which can be in one of the following formats:
- <archive><YY><MM><NNN>
- <YY><MM><NNNNN|NNNNNN>
@return: formatted arxiv id
"""
match = re.search(r'^([a-zA-Z-]*)([\d\.]+)$', arxiv_id)
if match is None:
raise ValueError(f"Invalid arxiv id: {arxiv_id}")
if match.group(1) == "":
return match.group(2)
return f"{match.group(1)}/{match.group(2)}"
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/arxiv/utils.py
|
import argparse
import os
from collections import defaultdict
from datetime import datetime
from transformers import AutoTokenizer
import json
import multiprocessing as mp
import pathlib
import pandas as pd
from tabulate import tabulate
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/arxiv/processed")
parser.add_argument('--max_files', type=int, default=-1,
help="max lines to process; this is useful for testing")
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/pythia-6.9b-deduped",
)
def get_token_count(text):
return len(tokenizer.tokenize(text))
def process_record(record):
token_count = get_token_count(text=record["text"])
year = record["meta"]["yymm"][:2]
return token_count, year
def get_timestamp() -> str:
return datetime.now().isoformat()
def print_stats(token_count_data):
df = pd.DataFrame.from_dict(
token_count_data, orient="index"
)
df = df.reset_index()
df.columns = ["year", "count"]
df = df.set_index("year")
df["count"] = df["count"].astype(int)
df["count"] = df["count"] / 1e12
df = df.sort_values(by="count", ascending=False)
df.loc['Total'] = df.sum(numeric_only=True)
print(tabulate(
df, headers=["year", "count (T)"], tablefmt="github", floatfmt=".4f"
))
def main():
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", mp.cpu_count()))
print(f"Using {num_cpus} workers")
files_processed = 0
token_count_data = defaultdict(int)
for filenum, fp in enumerate(pathlib.Path(args.data_dir).glob("*.jsonl")):
with open(fp, "r") as f:
records = [json.loads(rec) for rec in f.readlines()]
with mp.Pool(processes=num_cpus - 2) as pool:
results = pool.map(process_record, records)
for counts, year in results:
token_count_data[year] += int(counts)
total_tokens = sum(token_count_data.values())
print(f"[{get_timestamp()}][INFO] "
f"processed {filenum} files; "
f"total tokens: {total_tokens}")
if files_processed > args.max_files > 0:
print(f"[{get_timestamp()}][INFO] "
f"reached max lines")
break
print(json.dumps(token_count_data, indent=4))
print(f"Total tokens: {sum(token_count_data.values())}")
print("\n" + "=" * 80 + "\n")
print_stats(token_count_data)
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/arxiv/token_count.py
|
import argparse
import os
import uuid
import numpy as np
import pathlib
import tempfile
from typing import List
import joblib
from arxiv_cleaner import ArxivCleaner
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./data/arxiv/src")
parser.add_argument('--target_dir', type=str, default="./data/arxiv/processed")
parser.add_argument('--workers', type=int, default=1)
parser.add_argument('--input', type=str, default=None,
help='input file from which to read keys. '
'This is only used when running on slurm.')
parser.add_argument('--local', action='store_true')
parser.add_argument('--setup', action='store_true',
help='if set, we partition the keys and into chunks.')
parser.add_argument('--max_files', type=int, default=-1,
help='max files to download, useful for testing')
args = parser.parse_args()
WORK_DIR = os.getenv('WORK_DIR', pathlib.Path(__file__).parent / "work")
WORK_DIR = pathlib.Path(WORK_DIR)
if not WORK_DIR.exists():
WORK_DIR.mkdir()
print(f"Created work directory {WORK_DIR}")
def run_clean(
data_dir: pathlib.Path,
target_dir: pathlib.Path,
input_file: pathlib.Path = None,
max_files: int = -1,
):
num_cpus = int(os.getenv("SLURM_CPUS_PER_TASK", joblib.cpu_count()))
print(f"Using {num_cpus} processes")
worker_id = os.getenv('SLURM_ARRAY_TASK_ID', None)
if worker_id is None:
worker_id = str(uuid.uuid4())
# create temporary work directory
work_dir = pathlib.Path(
tempfile.mkdtemp(dir=WORK_DIR, prefix=worker_id + "_")
)
if input_file is not None:
# we are running on slurm
assert input_file.exists()
with open(input_file, 'r') as f:
tar_fp_list = f.read().splitlines()
else:
tar_fp_list = None
# create cleaner
arxiv_cleaner = ArxivCleaner(
data_dir=data_dir, work_dir=work_dir, target_dir=target_dir,
worker_id=worker_id
)
arxiv_cleaner.run_parallel(
max_files=max_files, tar_fp_list=tar_fp_list
)
def partition_tar_files(
data_dir: pathlib.Path, workers: int
) -> List[List[str]]:
return np.array_split(
list(str(fp) for fp in data_dir.glob('*.tar')),
indices_or_sections=workers
)
def main():
# create target directory where we store the processed data
target_dir = pathlib.Path(args.target_dir)
if not target_dir.exists():
target_dir.mkdir()
data_dir = pathlib.Path(args.data_dir)
assert data_dir.exists()
if not args.local and not args.setup:
# here we only download the files; this requires that setup has already
# been run
run_clean(
data_dir=data_dir,
target_dir=target_dir,
input_file=pathlib.Path(args.input),
max_files=args.max_files
)
return
if args.setup:
parts = partition_tar_files(data_dir=data_dir, workers=args.workers)
if not (target_dir / "partitions").exists():
(target_dir / "partitions").mkdir()
for i, part in enumerate(parts):
with open(
target_dir / "partitions" / f'tars_part_{i}.txt', 'w'
) as f:
f.write('\n'.join(part))
return
# run locally; here we don't partition the tar files as slurm is not used
if args.local:
run_clean(
data_dir=pathlib.Path(args.data_dir),
target_dir=pathlib.Path(args.target_dir),
input_file=None,
max_files=args.max_files
)
if __name__ == '__main__':
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/arxiv/run_clean.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from setuptools import setup # type: ignore
setup(
name="cc_net",
version="1.0.0",
packages=["cc_net"],
# metadata to display on PyPI
author="Guillaume Wenzek",
author_email="guw@fb.com",
description="Tools to download and clean Common Crawl",
keywords="common crawl dataset",
url="https://github.com/facebookresearch/cc_net",
license="CC-BY-NC-4.0",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
project_urls={
"Bug Tracker": "https://github.com/facebookresearch/cc_net/issues",
"Source Code": "https://github.com/facebookresearch/cc_net",
},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.7",
install_requires=[
"beautifulsoup4>=4.7.1",
"pandas>=0.23.4",
"requests>=2.22.0",
"fasttext>=0.9.1",
"sentencepiece>=0.1.82",
"kenlm @ git+https://github.com/kpu/kenlm.git@master",
"func_argparse>=1.1.1",
"psutil>=5.6.3",
"sacremoses",
"submitit>=1.0.0",
"typing_extensions",
],
extras_require={
"dev": ["mypy==0.790", "pytest", "black==19.3b0", "isort==5.6.4"],
# To use scripts inside cc_net/tools
"tools": ["lxml", "sentence_splitter"],
# Memory-efficient hashset.
# This fork only compiles the kind of dict used by cc_net.
# Full version is at https://github.com/atom-moyer/getpy
"getpy": ["getpy @ git+https://github.com/gwenzek/getpy.git@v0.9.10-subset"],
},
package_data={"cc_net": ["data/*"]},
)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Main script to download a CC dump, remove duplicates, split by language and
filter the documents.
The pipeline parameters are described in the `Config` class.
"""
import hashlib
import json
import time
import warnings
from argparse import ArgumentParser
from collections import defaultdict
from itertools import repeat
from pathlib import Path
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Tuple
import func_argparse
# Local scripts
from cc_net import dedup, execution, jsonql, minify, perplexity, process_wet_file
from cc_net import regroup as regroup_module
from cc_net import split_by_lang
from cc_net.execution import Executor
# Constant
FILE_DIR = Path(__file__).parent
CUTOFF_CSV = FILE_DIR / "data" / "cutoff.csv"
DEFAULT_PIPELINE = [
"dedup",
"lid",
"keep_lang",
"sp",
"lm",
"pp_bucket",
"drop",
"split_by_lang",
]
class Config(NamedTuple):
"""
Mine Common Crawl with the given settings.
config_name
dump: CC dump id
output_dir: working directory
mined_dir: name of the destination folder, full path will be {ouput_dir}/{mined_dir}/{dump_id}
execution: chose how to parallelize the execution
num_shards: number of shards to split the dump
min_shard: start at shard `min_shard` if specified
num_segments_per_shard: allow to download a small portion of CC (eg for tests)
min_len: remove documents shorter than this (in chars)
hashes_in_mem: number of shards hashes to use for dedup
lang_whitelist: only treat those languages
lang_blacklist: ignore those languages
lang_threshold: remove docs whose top language score is lower than this
keep_bucket: keep only those perplexity bucket chose from (head, middle, tail, all)
lm_dir: folder containing LMs
lm_languages: only use LMs for the following languages
cutoff: cutoff file to use for split in head/middle/tail
mine_num_processes: number of processes to use for mining
target_size: size of finals files produce during `regroup` stage
cleanup_after_regroup: delete intermediary files after regroup
task_parallelism: max number of task to run in parallel
pipeline: restricts the mining pipeline to the given steps. Order is important !
experiments: (HACK) enable specific experiments in the code
"""
config_name: str = "base"
dump: str = "2017-51"
output_dir: Path = Path("data")
mined_dir: str = "mined"
execution: str = "auto"
num_shards: int = 1600
min_shard: int = -1
num_segments_per_shard: int = -1
metadata: Optional[str] = None
min_len: int = 300
hash_in_mem: int = 50
lang_whitelist: Sequence[str] = []
lang_blacklist: Sequence[str] = []
lang_threshold: float = 0.5
keep_bucket: Sequence[str] = []
lm_dir: Path = Path("data/lm_sp")
cutoff: Path = CUTOFF_CSV
lm_languages: Optional[Sequence[str]] = None
mine_num_processes: int = 16
target_size: str = "4G"
cleanup_after_regroup: bool = False
task_parallelism: int = -1
pipeline: Sequence[str] = DEFAULT_PIPELINE
experiments: Sequence[str] = []
cache_dir: Optional[Path] = None
def get_executor(
self, name: str, timeout_hour: int = 1, mem_gb: int = 1, cpus: int = 1
) -> Executor:
name = "_".join((name, self.config_name, *self.experiments))
return execution.get_executor(
name,
self.output_dir / "logs",
self.execution,
timeout_hour=timeout_hour,
mem_gb=mem_gb,
cpus=cpus,
task_parallelism=self.task_parallelism,
)
def get_cc_shard(self, shard: int) -> process_wet_file.CCShardReader:
dump_cache: Optional[Path] = None
if self.cache_dir:
self.cache_dir.mkdir(exist_ok=True)
dump_cache = self.cache_dir / self.dump
dump_cache.mkdir(exist_ok=True)
return process_wet_file.CCShardReader(
self.dump,
shard=shard,
num_shards=self.num_shards,
num_segments_per_shard=self.num_segments_per_shard,
min_len=self.min_len,
cache_dir=dump_cache,
)
@classmethod
def from_json(cls, json_file: Path) -> "Config":
raw_lines = json_file.read_text().splitlines()
raw_lines = [l for l in raw_lines if not l.strip().startswith("//")]
json_config = json.loads("".join(raw_lines))
path_keys = ["cache_dir", "lm_dir", "output_dir"]
for key in path_keys:
if key in json_config:
json_config[key] = Path(json_config[key])
return Config(**json_config)
@property
def will_split(self) -> bool:
return "split_by_lang" in self.pipeline or "split_by_segment" in self.pipeline
def get_lm_languages(self) -> Sequence[str]:
if self.lm_languages is not None:
return self.lm_languages
if self.lang_whitelist:
return self.lang_whitelist
languages = [m.name.split(".")[0] for m in self.lm_dir.glob("*.arpa.bin")]
if self.lang_blacklist:
languages = [l for l in languages if l not in self.lang_blacklist]
return languages
def get_mined_dir(self, regroup: bool = False) -> Path:
if self.will_split and not regroup:
return self.output_dir / f"{self.mined_dir}_split" / self.dump
return self.output_dir / self.mined_dir / self.dump
BASE_CONFIG = Config()
BYLANG_CONFIG = Config(
config_name="by_lang",
mined_dir="mined_by_lang",
pipeline=list(BASE_CONFIG.pipeline[:-1]) + ["split_by_lang"],
)
REPRODUCE_CONFIG = Config(
config_name="reproduce",
dump="2019-09",
mined_dir="reproduce",
pipeline=["fetch_metadata", "keep_lang", "keep_bucket", "split_by_lang"],
metadata="https://dl.fbaipublicfiles.com/cc_net/1.0.0",
# Optional filtering:
# It won't change much the execution speed, but decreases the disk requirement.
# Restrict languages
lang_whitelist=["fr"],
# Restrict perplexity buckets
# Top languages have been split in perplexity buckets according
# to a Wikipedia trained LM.
# The buckets from low perplexity (good) to high (bad) are:
# ["head", "middle", "tail"]
# Languages without a LM have only one bucket "all".
# It won't change much the execution speed, but decreases the disk requirement.
keep_bucket=["head", "all"],
mine_num_processes=1,
)
TEST_CONFIG = BASE_CONFIG._replace(
config_name="test",
dump="2019-09",
output_dir=Path("test_data"),
execution="local",
num_shards=4,
num_segments_per_shard=1,
hash_in_mem=2,
mine_num_processes=2,
lang_whitelist=["de", "it", "fr"],
target_size="32M",
cleanup_after_regroup=False,
cache_dir=Path("test_data/wet_cache"),
)
PREDEF_CONFIGS = {
"base": BASE_CONFIG,
"by_lang": BYLANG_CONFIG,
"test": TEST_CONFIG,
"test_slurm": TEST_CONFIG._replace(execution="slurm,partition=dev"),
"debug": TEST_CONFIG._replace(config_name="debug", mine_num_processes=0),
"reproduce": REPRODUCE_CONFIG,
"augment": BASE_CONFIG._replace(
config_name="augment", dump="2019-13", lang_blacklist=["en"]
),
}
def tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def finalize(tmp_output: Path, output: Path) -> None:
if not tmp_output.exists():
warnings.warn(f"Targeted tmp output {tmp_output} doesn't exists.")
return
tmp_index = tmp_output.parent / (tmp_output.name + ".index")
tmp_output.rename(output)
if tmp_index.exists():
tmp_index.rename(output.parent / (output.name + ".index"))
def _transpose(iterable: Sequence[Tuple[Any, ...]], n=-1) -> Tuple[List, ...]:
if n < 0:
n = len(iterable[0])
columns: tuple = tuple([] for _ in range(n))
for row in iterable:
assert len(row) == n, f"Found tuple of len({len(row)}, expected {n}: {row}"
for i in range(n):
columns[i].append(row[i])
return columns
def hashes(conf: Config) -> List[Path]:
"""Computes hashes for each shard."""
hashes_dir = conf.output_dir / "hashes" / conf.dump
outputs = [hashes_dir / f"{shard:04d}.bin" for shard in range(conf.num_shards)]
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if not missing_outputs:
return outputs
hashes_dir.mkdir(parents=True, exist_ok=True)
# With FlatHashSet we need ~2Gb of RAM / shard, but we need to account for
# overhead due to how the dynamic allocation works.
ex = conf.get_executor(f"hashes_{conf.dump}", mem_gb=4, timeout_hour=6, cpus=2)
ex(_hashes_shard, repeat(conf), *_transpose(missing_outputs))
# Wait a bit so that files appears on the disk.
time.sleep(20)
assert all(o.exists() for o in outputs)
return outputs
def _hashes_shard(conf: Config, shard: int, output: Path):
tmp_output = tmp(output)
jsonql.run_pipes(
dedup.HashesCollector(field="raw_content", output=tmp_output),
inputs=conf.get_cc_shard(shard),
)
finalize(tmp_output, output)
return f"Hashed {output}"
HASHES_IN_MEM = [0, 1, 2, 5, 10, 20, 50, 100, 200, 400]
def mine(conf: Config) -> List[Path]:
"""Remove dups, run LID and LMs, and split by lang and quality."""
mined_dir = conf.get_mined_dir()
if conf.min_shard == -1:
shard_range = list(range(conf.num_shards))
else:
shard_range = list(range(conf.min_shard, conf.num_shards))
if conf.will_split:
# Give a directories when splitting
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
else:
# Files otherwise
outputs = [
mined_dir / f"{shard:04d}.json.gz" for shard in shard_range
]
if "mini_again" in conf.experiments:
mined_dir = conf.output_dir / "mini_again" / conf.dump
outputs = [mined_dir / f"{shard:04d}" for shard in shard_range]
# TODO: try to reduce this / make it a function of "hash_in_mem" / num_langs
mem_gb = 60 + 1 * conf.hash_in_mem
timeout_hour = 5
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
outputs = [
conf.output_dir / f"hashes_exp/{conf.dump}_0000_dedup{h:03d}.json.gz"
for h in HASHES_IN_MEM
]
mem_gb = int(max(HASHES_IN_MEM) * 1.2)
timeout_hour = 8
missing_outputs = [(shard, o) for shard, o in enumerate(outputs) if not o.exists()]
if "mini_again" in conf.experiments:
missing_outputs = [
(shard, o)
for shard, o in enumerate(outputs)
if shard in [5, 139] and not o.exists()
]
if not missing_outputs:
return outputs
mined_dir.mkdir(parents=True, exist_ok=True)
ex = conf.get_executor(
f"mine_{conf.dump}",
mem_gb=mem_gb,
timeout_hour=timeout_hour,
cpus=conf.mine_num_processes + 1,
)
# Compute hashes firsts.
if "dedup" in conf.pipeline:
hashes_groups = list(jsonql.grouper(hashes(conf), conf.hash_in_mem))
hashes_files: Iterable[List[Path]] = [
hashes_groups[shard // conf.hash_in_mem] for shard, o in missing_outputs
]
else:
hashes_files = repeat([])
ex(_mine_shard, repeat(conf), hashes_files, *_transpose(missing_outputs))
assert all(o.exists() for o in outputs)
return outputs
def _get_segment(tmp_output: Path, doc: dict) -> str:
segment: str = doc["cc_segment"].split("/")[-1]
return str(tmp_output / segment.replace(".warc.wet.gz", ".json.gz"))
def _mine_shard(conf: Config, hashes: List[Path], shard: int, output: Path) -> str:
print(conf.pipeline)
assert conf.pipeline
tmp_output = tmp(output)
if "hashes" in conf.experiments:
# HACK: used for generating paper figures
hashes_in_mem = shard
hashes = hashes[: HASHES_IN_MEM[hashes_in_mem]]
shard = 0
cc_shard = conf.get_cc_shard(shard)
steps: Dict[str, Optional[jsonql.Transformer]] = {}
lang_id = Path("bin") / "lid.bin"
steps["lid_before_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_before_dedup", top=5
)
steps["dedup"] = dedup.DuplicatesRemover(field="raw_content", hashes_files=hashes)
steps["lid"] = split_by_lang.Classifier(
model=lang_id,
field="raw_content",
out_field="language",
top=1,
threshold=conf.lang_threshold,
)
steps["lid_after_dedup"] = split_by_lang.Classifier(
model=lang_id, field="raw_content", out_field="lid_after_dedup", top=5
)
if conf.lang_blacklist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") not in set(conf.lang_blacklist)]
)
elif conf.lang_whitelist:
steps["keep_lang"] = jsonql.where(
[lambda doc: doc.get("language") in set(conf.lang_whitelist)]
)
else:
steps["keep_lang"] = None
tok_field = "tokenized"
steps["sp"] = perplexity.MultiSentencePiece(
{l: conf.lm_dir / f"{l}.sp.model" for l in conf.get_lm_languages()},
field="raw_content",
output_field=tok_field,
normalize=True,
)
steps["lm"] = perplexity.DocLM(
{l: conf.lm_dir / f"{l}.arpa.bin" for l in conf.get_lm_languages()},
field=tok_field,
output_field="perplexity",
normalize=False, # Normalization is done before SentencePiece
# load_method=kenlm.LoadMethod.PARALLEL_READ,
)
steps["pp_bucket"] = perplexity.PerplexityBucket(CUTOFF_CSV)
steps["drop"] = perplexity.DropKeys(tok_field)
steps["keep_bucket"] = None
if conf.keep_bucket:
steps["keep_bucket"] = jsonql.where(
[lambda doc: doc.get("bucket", "all") in conf.keep_bucket]
)
if "fetch_metadata" in conf.pipeline:
# TODO: better default
assert conf.metadata is not None
steps["fetch_metadata"] = minify.MetadataFetcher(
f"{conf.metadata}/{conf.dump}/"
)
steps["minify"] = minify.Minifier()
pattern = str(tmp_output / "{language}_{bucket}.json.gz")
steps["split_by_lang"] = jsonql.split(pattern=str(pattern), mkdir=True)
steps["split_by_segment"] = jsonql.split(
split_fn=lambda doc: _get_segment(tmp_output, doc), mkdir=True
)
pipeline = filter(None, (steps[s] for s in conf.pipeline))
jsonql.run_pipes(
*pipeline,
inputs=cc_shard,
processes=conf.mine_num_processes,
chunksize=100,
# The splitter takes care of writing to files.
output=tmp_output if not conf.will_split else None,
)
finalize(tmp_output, output)
return f"Mined {output}"
def regroup(conf: Config, all_dirs: List[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs
all_files = [f for d in all_dirs for f in d.glob("*.json.gz")]
if not all_files:
print(f"No .json.gz file found in {all_dirs[0]}")
splits: Dict[str, List[Path]] = defaultdict(list)
for f in all_files:
split = f.name.split(".")[0]
splits[split].append(f)
print(f"Identified {len(all_files)} files to regroup from {len(splits)} splits.")
inputs: List[List[Path]] = []
outputs: List[Path] = []
target_size = jsonql.parse_size(conf.target_size)
for split, files in splits.items():
cuts = list(regroup_module.determine_groups(files, target_size=target_size))
if not cuts:
continue
pattern = f"{split}_????.json.gz"
existing_outputs = sorted(regroup_dir.glob(pattern))
if not conf.cleanup_after_regroup:
# We still have all the inputs so it is safe to overwrite existing outputs.
assert len(existing_outputs) <= len(cuts)
existing_outputs = []
if len(existing_outputs) > 0 and len(cuts) == 1:
# append to existing file if size allows it.
new_size = (
sum(f.stat().st_size for f in cuts[0])
+ existing_outputs[-1].stat().st_size
)
if new_size < target_size:
print(f"Will append {cuts[0]} to {existing_outputs[-1]}")
cuts[0].insert(0, existing_outputs.pop(-1))
n_existing = len(existing_outputs)
for i, cut in enumerate(cuts):
# avoid overwriting existing files.
j = i + n_existing
output = regroup_dir / f"{split}_{j:04}.json.gz"
inputs.append(cut)
outputs.append(output)
print(
str(regroup_dir / pattern),
"->",
len(cuts),
f"shards ({n_existing} already there).",
)
ex = conf.get_executor(f"regroup_{conf.dump}", mem_gb=1, timeout_hour=12, cpus=2)
ex(_regroup, repeat(conf), inputs, outputs)
return regroup_dir
def _regroup(conf: Config, inputs: List[Path], output: Path) -> str:
output.parent.mkdir(parents=True, exist_ok=True)
regroup_module.fast_reshard(
inputs, output, tmp=tmp(output), rm_original=conf.cleanup_after_regroup
)
return f"Regrouped {output}"
def move_segments(conf: Config, all_dirs: Sequence[Path]) -> Path:
"""Reshards each language/quality after 'mine'."""
# check that mining is over.
regroup_dir = conf.get_mined_dir(regroup=True)
assert all_dirs, "Received no dirs to move"
assert all(
d.is_dir() for d in all_dirs
), f"move_segments was expecting dirs received files: {all_dirs[:10]}..."
regroup_dir.parent.mkdir(exist_ok=True)
regroup_dir.mkdir(exist_ok=True)
ex = conf.get_executor(f"moveseg_{conf.dump}", mem_gb=1, timeout_hour=1, cpus=2)
def _move_segments(subdir: Path, regroup_dir: Path) -> str:
n = 0
for f in subdir.iterdir():
if not f.is_file() or f.is_symlink():
continue
n += f.name.endswith(".json.gz")
new_name = regroup_dir / f.name
target = new_name.resolve()
assert f.resolve() != target
# this make the job idempotent.
f.rename(new_name)
f.symlink_to(target)
if n == 0:
return ""
return f"Moved {n} .json.gz files from {subdir} to {regroup_dir}"
ex(_move_segments, all_dirs, repeat(regroup_dir))
print(f"Results are in {regroup_dir}")
return regroup_dir
def _validate_test(conf: Config, output_dir: Path, generate: bool = False):
stats: Dict[str, dict] = {}
for file in sorted(output_dir.glob("*.json.gz")):
fname = "/".join((file.parent.name, file.name))
# The order of documents is not guaranteed inside a shard,
lines = sorted(jsonql.open_read(file))
content = "\n".join(lines)
size = len(content)
checksum = hashlib.sha1(bytes(content, encoding="utf-8")).hexdigest()
# first_document = json.loads(lines[0])
stats[fname] = {"size": size, "checksum": checksum}
def dump(x):
return json.dumps(x, indent=2, ensure_ascii=False)
print("*** Stats ***")
stats_raw = dump(stats)
stats_file = FILE_DIR / "data" / "test_stats.json"
if generate:
print("Saving stats to", stats_file)
stats_file.write_text(stats_raw)
return
expected_stats: Dict[str, dict] = {}
if stats_file.exists():
expected_stats = json.loads(stats_file.read_text())
if expected_stats == stats:
print("Everything looks good !")
return
stats_file.with_suffix(".actual.json").write_text(stats_raw)
print("*** Expected Stats ***")
print(dump(expected_stats))
print("*** Diff ***")
for fname in sorted(expected_stats.keys()):
print(fname)
assert fname in expected_stats, "missing file " + fname
if expected_stats[fname]["size"] != stats[fname]["size"]:
print(
" - Expected size",
expected_stats[fname]["size"],
", size",
stats[fname]["size"],
)
if expected_stats[fname]["checksum"] != stats[fname]["checksum"]:
print(
" - Expected checksum",
expected_stats[fname]["checksum"],
", checksum",
stats[fname]["checksum"],
)
def get_main_parser() -> ArgumentParser:
# Generates the 'main' parser by patching a 'Config' parser
p = func_argparse.func_argparser(Config)
# Override defaults value to None, so we know what was set by the user.
# Note that it will keep the original default values in the help message.
p.set_defaults(**{f: None for f in Config._fields})
p.add_argument("--config", type=str, default="base")
p.set_defaults(__command=main)
return p
def main(config: str = "base", **config_as_dict: Any) -> None:
# Use the given 'config' as default value.
config_base = config
if config_base in PREDEF_CONFIGS:
conf = PREDEF_CONFIGS[config_base]
elif Path(config_base).exists():
conf = Config.from_json(Path(config_base))
else:
raise ValueError(
f"Invalid value {config_base} for --config. "
f"Choose from ({', '.join(PREDEF_CONFIGS)}) or give an existing .json file."
)
conf = conf._replace(**{k: v for (k, v) in config_as_dict.items() if v is not None})
print(f"Will run cc_net.mine.main with the following config:", conf)
all_files = mine(conf)
if conf.will_split:
assert all_files
assert all(d.is_dir() for d in all_files)
all_dirs = all_files
if "split_by_lang" in conf.pipeline:
# Only try regrouping if we split the shards.
regroup(conf, all_dirs)
elif "split_by_segment" in conf.pipeline:
# If we split by segment then regrouping is trivial, since segments appear in only one shard.
move_segments(conf, all_dirs)
if conf.config_name == "test":
_validate_test(conf, conf.get_mined_dir(regroup=True))
if __name__ == "__main__":
func_argparse.parse_and_call(get_main_parser())
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/mine.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Creates mono-lingual corpus from Wikipedia.
"""
import functools
import re
import subprocess
import urllib.request
from pathlib import Path
from typing import Dict
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql, text_normalizer
CIRRUS_URL = "https://dumps.wikimedia.org/other/cirrussearch"
CIRRUS_DUMP_RE = re.compile(r"^(.*)wiki-\d+-cirrussearch-content\.json\.gz")
def tmp(file: Path) -> Path:
return file.parent / ("tmp." + file.name)
def opening(file: Path, output: Path = None, n_docs: int = 1_000_000):
"""Will dump the tokenized opening text of the given Wikipedia.
Args:
- file: File containing the Wikipedia dump.
- output: Output file.
- n_docs: How many docs to parse
- tokenize: whether to tokenize the text
- lang: Language code used to chose the tokenizer
"""
assert file.exists()
return jsonql.run_pipes(
functools.partial(extract_opening_text, n_docs=n_docs),
file=file,
output=tmp(output) if output else None,
)
if output:
tmp(output).replace(output)
def extract_opening_text(source, n_docs: int = 10_000):
i = 0
for doc in jsonql.read_jsons(source):
if not doc:
continue
text = doc.get("opening_text")
if not text:
continue
yield text_normalizer.normalize(text)
i += 1
if i >= n_docs:
break
def dl(lang: str, output_dir: Path, date: str = None):
"""Download the cirrus extract for the given lang.
See https://dumps.wikimedia.org/other/cirrussearch for the full list of files.
Args:
- lang: The Wikipedia code for the language.
- output_dir: Output directory. File will be `{lang}.json.gz`
- date: Date of a specific Cirrus dump.
"""
urls = get_cirrus_urls(date)
assert (
lang in urls
), f"--lang {lang} not found. Available languages are: {urls.keys()}"
assert output_dir, "--output_dir folder needed."
output_dir.mkdir(exist_ok=True)
output = output_dir / (lang + ".json.gz")
print(f"Downloading {lang} wiki from {urls[lang]} to {output}")
wget(urls[lang], output)
def get_cirrus_urls(date: str = None) -> Dict[str, str]:
if date is None:
cirrus_page = BeautifulSoup(
urllib.request.urlopen(CIRRUS_URL), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in cirrus_page.findAll("a")]
dumps.remove("..")
dumps.remove("current")
# We take the oldest dump since the most recent might be incomplete.
# The page only link to the N latest dumps so the dump won't be too old.
date = min(dumps)
cirrus_url = "/".join((CIRRUS_URL, date))
print("Will use the Wikipedia dump from:", date, cirrus_url)
cirrus_page = BeautifulSoup(
urllib.request.urlopen(cirrus_url), features="html.parser"
)
urls = {}
for link in cirrus_page.findAll("a"):
match = CIRRUS_DUMP_RE.match(link.get("href"))
if not match:
continue
urls[match.group(1)] = "/".join([cirrus_url, link.get("href")])
assert urls, f"No valid download urls found at {cirrus_url}"
return urls
def wget(url: str, output: Path):
subprocess.run(["wget", url, "-O", tmp(output), "-q"], check=True)
tmp(output).replace(output)
assert (
output.stat().st_size > 10_000
), f"File {output} downloaded from {url} looks too small"
if __name__ == "__main__":
func_argparse.main(dl, opening)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/get_wiki_cirrus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Manipulate files containing one json per line.
"""
import argparse
import collections
import contextlib
import functools
import glob
import gzip
import importlib
import inspect
import io
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import tempfile
import time
import typing as tp
import warnings
import zlib
from pathlib import Path
from typing import (
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import psutil # type: ignore
import requests
from typing_extensions import Protocol
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(process)d:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
NEWLINE = " N3WL1N3 "
FilterFn = Callable[[dict], bool]
FileDescriptor = Union[Path, List[Path], str]
WritableFileLike = Union[FileDescriptor, TextIO, "SimpleIO", None]
ReadableFileLike = Union[Iterable[str], FileDescriptor, None]
def io_parser():
"""Parser shared by all commands to get input/output files."""
parser = argparse.ArgumentParser(add_help=False)
file_help = """File to read from. Can be specified several times for several files.
Be careful that bash will expand glob patterns **before** sending the args
to python. To use globs put it inside single quotes:
jsonql where --file 'data/perplexity/*.json' '{length} > 100' | head -1
jsonql --file 'data/perplexity/*.json' where '{length} > 100' | head -1
[Invalid] jsonql where '{length} > 100' --file data/perplexity/*.json | head -1
[Invalid] jsonql where --file data/perplexity/*.json '{length} > 100' | head -1
"""
parser.add_argument("-f", "--file", type=Path, action="append", help=file_help)
parser.add_argument("-o", "--output", type=Path, default="-")
parser.add_argument("--processes", type=int, default=1)
return parser
def get_parser():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them"
)
subparsers = parser.add_subparsers()
def add_subparser(function, arguments):
doc = function.__doc__.split("\n")[0]
p = subparsers.add_parser(function.__name__, help=doc, parents=[io_parser()])
p.set_defaults(command=function)
for k, v in arguments.items():
p.add_argument(k, **v)
add_subparser(
select,
{
"columns": dict(nargs="+", help="Extract the value of the given fields"),
"--skip_empty": dict(
action="store_true", help="Skip lines without the requested fields"
),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE,
help="Replace newlines found in the text by the given string",
),
},
)
add_subparser(
where,
{
"clauses": dict(nargs="+", help=""),
"--requires": dict(
action="append", help="Python module required by the clauses code."
),
},
)
add_subparser(
merge,
{
"columns": dict(nargs="+", help=""),
"--separator": dict(
default="\t", help="Separator to use between the different columns"
),
"--newline": dict(
default=NEWLINE, help="Replace the given string by actual newlines"
),
},
)
add_subparser(
describe,
{
"columns": dict(nargs="*", help=""),
"--bins": dict(
default="auto", help="Number of bins for computing the histograms"
),
"--cumulative": dict(
action="store_true", help="Compute cumulative histograms"
),
"--weights": dict(type=str, help="Column used to weight histograms"),
},
)
add_subparser(split, {"--pattern": dict(type=str)})
add_subparser(shard, {})
return parser
def _split_array(array, sep):
last = 0
for i, x in enumerate(array):
if x != sep:
continue
yield array[last:i]
last = i + 1
if last != len(array):
yield array[last:]
def main(raw_args):
parser = get_parser()
pipeline = []
file = "-"
output = "-"
processes = 1
for args_group in _split_array(raw_args, "--"):
args = vars(parser.parse_args(args_group))
command = args.pop("command")
file = args.pop("file") or file
output = args.pop("output") or output
processes = args.pop("processes") or processes
pipeline.append(as_pipe(command, args))
if not pipeline:
parser.print_help()
return
run_pipes(*pipeline, file=Path(file), output=Path(output), processes=processes)
class Transformer:
"""
Wrapper around functions transforming documents.
This allows `run_pipes` to automatically parallelize the pipeline.
Provides:
* Automatic logging. Logging can be changed with the `summary` method.
Loggin frequency with _log_freq (in second) or $JSONQL_LOG_FREQ env variable.
* Automatic parallelization without pickling. The transformers are shared
across processes, and the object is usually not pickled.
* Basic pickling / unpickling in case it's still needed.
By default will only pickle the arguments passed to the constructor.
* Delayed initialization. Internal state which is not pickable should be set
inside the `_prepare` function.
"""
parallelisable: bool = True
expect_json: bool = False
warn_when_pickling: bool = False
ready: bool = False
def __init_subclass__(cls, expect_json: bool = None):
"""Detects if the subclass expects json as input."""
spec = inspect.getfullargspec(cls.do)
if expect_json is None:
expect_json = spec.annotations.get(spec.args[1], None) == dict
cls.expect_json = expect_json
def __new__(cls, *args, **kwargs):
"""Creates the transformer and save the arguments passed to the constructor."""
t = super().__new__(cls)
Transformer.__init__(t, args, kwargs)
return t
def __init__(self, state_args: tuple = None, state_kwargs: dict = None):
"""
Init the transformer counters.
If state_args/state_kwargs are set they will override whatever was
originally passed to the subclass constructor.
"""
if state_args is not None:
self.__args = state_args
if state_kwargs is not None:
self.__kwargs = state_kwargs
self.start_time = time.time()
self.__last_log = self.start_time
self.processed = 0
# Log every 5 min unless specified other wise.
self._log_freq = int(os.environ.get("JSONQL_LOG_FREQ", 5 * 60))
self.__cls = type(self)
self._logger = logging.getLogger(self.__cls.__name__)
def __call__(self, x):
assert self.ready, f"{self} is not ready."
if x is None:
return
y = self.do(x)
self.processed += 1
if time.time() - self.__last_log > self._log_freq:
self.log_summary()
return y
def do(self, x):
raise NotImplementedError(f"'do' not implemented in {type(self)}")
def summary(self) -> List[str]:
return [self.speed_summary()]
def speed_summary(self) -> str:
delay = time.time() - self.start_time
h = delay / 3600
s = self.processed / delay
return f"Processed {self.processed:_} documents in {h:.2}h ({s:5.1f} doc/s)."
def log(self, message):
self._logger.info(message)
def log_summary(self) -> None:
if not self.ready:
self.log("Not ready.")
return
summ = self.summary() or []
for line in summ:
self.log(line)
self.__last_log = time.time()
def map(self, source: Iterable) -> Iterator:
if self.ready:
for x in source:
yield self(x)
# since we have been prepared by caller,
# caller is also responsible for calling `close`.
return
else:
with self:
for x in source:
yield self(x)
def __getstate__(self) -> Tuple[tuple, dict, bool]:
return (self.__args, self.__kwargs, self.expect_json)
def __setstate__(self, state: Tuple[tuple, dict, bool]):
if self.warn_when_pickling:
warnings.warn(f"Unpickling transformer: {type(self)}. This can be slow.")
(args, kwargs, expect_json) = state
# When unpickling `__new__` isn't called so we have to doit ourselves.
Transformer.__init__(self, state_args=args, state_kwargs=kwargs)
type(self).__init__(self, *args, **kwargs)
assert self.expect_json == expect_json
# __setstate__ is called by multiprocessing right before calling
# the object so we need to initialize everything.
self.__enter__()
def _prepare(self) -> None:
pass
def __enter__(self) -> "Transformer":
# In multiprocessing __enter__ is always called twice, so we are idempotent.
# Because we call __enter__ when deserializing this transformer and
# also when the parent transformer is deserialized.
self.start_time = time.time()
if self.ready:
return self
self._prepare()
self.ready = True
return self
def __exit__(self, *args) -> None:
self.close()
self.log_summary()
def close(self) -> None:
pass
def as_pipe(transformer, kwargs):
if isinstance(transformer, type):
return transformer(**kwargs)
return lambda source: transformer(source, **kwargs)
def compose(fns: List[Transformer]) -> Transformer:
if len(fns) == 1:
return fns[0]
return MultiTransformer(fns)
class MultiTransformer(Transformer):
def __init__(self, transformers: List[Transformer]):
super().__init__()
self.transformers = transformers
def __repr__(self) -> str:
pipeline = " | ".join(type(t).__name__ for t in self.transformers)
return f"<{pipeline}>"
def do(self, x):
for t in self.transformers:
x = t(x)
return x
def _prepare(self):
for t in self.transformers:
t.__enter__()
return self
def __exit__(self, *args):
for t in self.transformers:
t.__exit__(*args)
def summary(self):
return itertools.chain(*(t.summary() for t in self.transformers))
class Mapper(Transformer):
def __init__(self, fn):
super().__init__()
self.fn = fn
def do(self, x):
return self.fn(x)
def run_pipe(
command,
kwargs: dict = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
):
kwargs = kwargs or {}
if isinstance(kwargs, argparse.ArgumentParser):
kwargs = vars(kwargs.parse_args())
file = file or Path(kwargs.pop("file", "-"))
output = output or Path(kwargs.pop("output", "-"))
return run_pipes(as_pipe(command, kwargs), file=file, output=output)
def run_pipes(
*fns: Union[Transformer, Callable[[Iterable], Iterable]],
inputs: Iterable[dict] = None,
file: ReadableFileLike = None,
output: WritableFileLike = None,
processes: int = 1,
chunksize: int = 10_000,
):
"""
Run full document processing pipeline.
- fns: list of functions to run over the documents. Can be:
* `Iterable -> Iterable` function
* jsonql.Transformer instance
Using transformers allow the pipeline to process documents in parallel.
- inputs: iterable to read the documents from
- file: if inputs is not given, will read documents from this file.
- output: writable file like.
- processes: number of processes to use. -1 means all CPU available.
- chunksize: chunksize for multiprocessing.Pool.imap_unordered
"""
expect_json = len(fns) and isinstance(fns[0], Transformer) and fns[0].expect_json
if expect_json and inputs is None:
fns = (JsonReader(),) + fns
transformers = []
for t in fns:
if not isinstance(t, Transformer):
break
if not t.parallelisable:
break
transformers.append(t)
pipes = fns[len(transformers) :]
log = logging.getLogger(__name__).info
if inputs is None:
data: Iterable = open_read(file)
else:
data = inputs
if processes == -1:
processes = os.cpu_count() or 0
with contextlib.suppress(BrokenPipeError), contextlib.ExitStack() as stack:
if transformers:
log(f"preparing {transformers}")
transform = stack.enter_context(compose(transformers))
if processes <= 1:
data = transform.map(data)
else:
p = multiprocessing.current_process()
log(f"Will start {processes} processes from {p.name}, Pid: {p.pid}")
pool = stack.enter_context(
multiprocessing.Pool(
processes=processes,
initializer=_set_global_transformer,
initargs=(transform,),
)
)
data = pool.imap_unordered(
_global_transformer, data, chunksize=chunksize
)
for fn in pipes:
if isinstance(fn, Transformer):
data = fn.map(data)
else:
data = fn(data)
write_jsons(data, output)
# Allows to share transformer acroos subprocess.
# Used by `run_pipes`
_GLOBAL_TRANSFORMER: Optional[Transformer] = None
def _set_global_transformer(transformer: Transformer):
global _GLOBAL_TRANSFORMER
p = multiprocessing.current_process()
logging.info(
f"Started subprocess {p.name}:{p.pid} from {os.getppid()} for {transformer}"
)
assert transformer.ready, f"{transformer} isn't ready"
_GLOBAL_TRANSFORMER = transformer
def _global_transformer(document: str) -> Optional[dict]:
assert _GLOBAL_TRANSFORMER is not None
return _GLOBAL_TRANSFORMER(document)
def lines(file: ReadableFileLike) -> Iterator[str]:
return (line.strip("\n") for line in open_read(file))
def read_jsons(file: ReadableFileLike, strict=False) -> Iterator[dict]:
reader = JsonReader(strict=strict)
lines = open_read(file)
for line in lines:
if line is None:
continue
yield reader(line)
reader.log_summary()
def write_jsons(source: Iterable[dict], file: WritableFileLike) -> None:
eol = os.linesep
with open_write(file) as o:
for res in source:
if res is None:
continue
if isinstance(res, dict):
json.dump(res, o, ensure_ascii=False)
o.write(eol)
continue
if isinstance(res, str):
res = res.rstrip("\n")
print(res, file=o)
class JsonReader(Transformer):
def __init__(self, strict: bool = False):
super().__init__()
self.ready = True
self.strict = strict
self.num_errors = 0
def do(self, line: str) -> Optional[dict]:
if line is None:
return None
if isinstance(line, dict):
return line
line = line.rstrip("\n")
if not line:
return None
try:
return json.loads(line)
except json.decoder.JSONDecodeError as e:
self.log_error(e)
if self.strict:
raise
return None
def log_error(self, e: json.decoder.JSONDecodeError):
self.num_errors += 1
if self.num_errors > 10:
return
MAX_LEN = 80
snippet, snippet_len = e.doc, len(e.doc)
col = e.pos
if snippet_len > MAX_LEN:
if col < MAX_LEN:
start = 0
elif snippet_len - col < MAX_LEN:
start = snippet_len - MAX_LEN
else:
start = col - MAX_LEN // 2
snippet = e.doc[start : start + MAX_LEN]
col = col - start
logging.warning(
"\n".join(
[
f"Invalid json (length={len(e.doc)}) {e}",
snippet,
" " * (col - 1) + "^",
]
)
)
def summary(self):
summ = super().summary()
if self.num_errors > 0:
summ.append(f"Skipped {self.num_errors} invalid json.")
return summ
def compile_column(column, newline):
if callable(column):
return column
if column == "*":
return json.dumps
if re.match(r"[_a-z][_a-z0-9]*", column):
def extract_col(doc):
v = doc.get(column, "")
if isinstance(v, str) and newline != "\n":
v = v.rstrip("\n").replace("\n", newline)
return v
return extract_col
return compile_expr(column)
def select(lines, columns, skip_empty=False, separator="\t", newline="\n"):
"""Yields the content of the requested columns."""
column_parsers = [compile_column(c, newline) for c in columns]
for doc in read_jsons(lines):
values = []
empty = True
for parse_col in column_parsers:
v = parse_col(doc)
values.append(str(v) or "")
empty = empty and v is None
if skip_empty and empty:
continue
yield separator.join(values)
def compile_expr(clause: Union[str, FilterFn], requires: List[str] = None):
if not isinstance(clause, str):
return clause
args_re = r"(?i:\{([_a-z][_a-z0-9]*)\})"
args_list = list(re.findall(args_re, clause))
if not args_list:
# This is only a warning because you may want to have eg random sampling
# that doesn't depend on the document.
logging.warn(
f"Warning: No variable found in expression: <{clause}>\n"
"Variables should be written inside braces, eg: {language}=='en'"
)
python_like = re.sub(args_re, r"doc.get('\1', None)", clause)
requires = requires or []
modules = {r: importlib.import_module(r) for r in requires}
return eval(f"lambda doc: {python_like}", modules)
class where(Transformer):
"""Filters the data using python code.
Ex: `jsonql where 'len({text}) > 100'`
"""
def __init__(
self, clauses: Sequence[Union[str, FilterFn]], requires: List[str] = []
):
super().__init__()
self.raw_clauses = clauses
self.requires = requires
self.n_selected = 0
self.clauses: List[FilterFn] = []
def _prepare(self):
self.clauses = [compile_expr(c, self.requires) for c in self.raw_clauses]
def do(self, doc: dict) -> Optional[dict]:
assert self.clauses
if not doc or not all((c(doc) for c in self.clauses)):
return None
self.n_selected += 1
return doc
def summary(self):
n_selected, n_docs = self.n_selected, self.processed
selectivity = n_selected / n_docs if n_docs else 0
return [f"Selected {n_selected} documents out of {n_docs} ({selectivity:5.1%})"]
def merge(lines, columns, separator="\t", newline=NEWLINE):
"""Reads tab separated columns and output a json using the given headers.
Headers are of form {key}[%{type}]
{type} can be one of {"f": float, "i": int, "b": bool, "s": string}.
Default type is string.
A special header "_" means interpret this column as json, and append all other
columns to it. Must appear only once and on last position.
Ex:
`echo '1\thello' | jsonql merge n t` --> `{"n": "1", "t": "hello"}`
`echo '1\thello" | jsonql merge n%i t` --> `{"n": 1, "t": "hello"}`
`echo '1\thello\t{"f": "bar"}' | jsonql merge n%i t _` --> `{"n": 1, "t": "hello", "f": "bar"}`
"""
handle_newlines = lambda s: s.replace(newline, "\n")
type_mapping: Dict[str, Callable] = {
"f": float,
"i": int,
"b": bool,
"s": handle_newlines,
}
type_parsing = [
type_mapping.get(f.split("%")[-1], handle_newlines) for f in columns
]
columns = [f.split("%")[0] for f in columns]
doc_index = columns.index("_") if "_" in columns else -1
read_json = JsonReader()
def parse(line):
parts = line.split(separator, len(columns) - 1)
doc: Dict[str, tp.Any] = {}
for i, value in enumerate(parts):
if columns[i] == "_":
doc.update(read_json(parts[doc_index]))
else:
try:
doc[columns[i]] = type_parsing[i](value)
except ValueError:
logging.error(
f"Error when parsing column {i} of line: {line[:100]}..."
)
return doc
for line in lines:
yield json.dumps(parse(line))
class split(Transformer):
"""Split a files in several smaller files based on the value of a field."""
# Not parallelisable since we are writing to files.
parallelisable = False
def __init__(
self,
pattern: Union[Path, str] = None,
split_fn: Callable[[dict], str] = None,
mkdir: bool = False,
):
super().__init__()
assert not (
pattern and split_fn
), "split can't have both a pattern and a split_fn"
if split_fn is not None:
self.split_fn = split_fn
else:
assert pattern, "split need either a pattern or a split_fn"
self.split_fn = self.make_split_fn(str(pattern))
self.mkdir = mkdir
self.o: dict = {}
def make_split_fn(self, pattern: str) -> Callable[[dict], str]:
candidates = list(re.findall(r"(?i:\{([_a-z][_a-z0-9]*)\})", pattern))
return lambda doc: pattern.format(**{c: doc[c] for c in candidates})
def do(self, doc):
filename = self.split_fn(doc)
if not filename:
return
o = self.o.get(filename, None)
if o is None:
if self.mkdir:
Path(filename).parent.mkdir(parents=True, exist_ok=True)
self.o[filename] = open_write(filename)
print(json.dumps(doc, ensure_ascii=False), file=self.o[filename], flush=True)
def summary(self):
summ = super().summary()
summ.append(f"Found {len(self.o)} splits.")
return summ
def close(self):
for file in self.o.values():
file.close()
def histogram(values, bins, weights):
hist, bins = np.histogram(values, bins=bins)
# n_bins = len(hist)
if weights is not None:
# Bins can't be auto-determined if weights is supplied.
# So we first compute the bins without the weights then recompute
# the histogram with the weights.
hist, bins = np.histogram(values, bins=bins, weights=weights)
# cumsum = np.cumsum(hist)
# total = cumsum[-1]
# for i in range(n_bins - 1):
# if cumsum[i] / total > 0.9:
# useful_range = np.linspace(bins[0], bins[i + 1], n_bins)
# new_bins = np.append(useful_range, [bins[-1]])
# return np.histogram(values, bins=new_bins, weights=weights)
return hist, bins
def _parse_bins(bins):
try:
if isinstance(bins, str):
if "," in bins:
bins = [int(b) for b in bins.split(",")]
else:
bins = int(bins)
except ValueError:
pass
return bins
ALL_DOCUMENTS = "<ALL_DOCUMENTS>"
MAX_LABEL_LEN = 100
def bar_chart(hist, bins):
n = sum(hist)
max_h = max(hist)
out = []
for i, h in enumerate(hist):
h_size = 80 * h // max_h
dh_size = 80 * (h - hist[i - 1]) // max_h
if h_size == 0 or dh_size == 0:
continue
bar = "█" * h_size
out.append(f"{bins[i]:8.3f} {bar:80} ({h:5d}, {h / n:5.1%}) {bins[i+1]:8.3f}")
out.append(f"{bins[-1]:8.3f}")
return out
def display_stats(stats, key, weights=None, bins="auto", cumulative=False):
out = []
documents = stats[ALL_DOCUMENTS]
count = stats.get(key, 0)
r = count / documents if documents else 0
out.append(f"Field {key} saw {count} times ({r:5.1%})")
length = stats.get(key + ".length", None)
avg_length = length // count if length else 0
if length is not None:
out[-1] += f", average length is {length // count}"
values = stats.get(key + ".val", None)
if values:
out[-1] += f", histogram is: (bins={bins})"
if weights:
if weights not in stats:
logging.warn(f"Warning: weights column {weights} not found.")
if weights + ".val" not in stats:
logging.warn(
f"Warning: weights column {weights} is not a numeric column."
)
weights = stats.get(weights + ".val")
hist, bins = histogram(values, _parse_bins(bins), weights)
if cumulative:
hist = np.cumsum(hist)
out += bar_chart(hist, bins)
cnt = stats.get(key + ".cnt", None)
if avg_length < MAX_LABEL_LEN and cnt and max(cnt.values()) > 1:
cnt = sorted(cnt.items(), key=lambda kv: kv[1], reverse=True)
out[-1] += ", top 100 labels:"
for label, n in cnt[:100]:
if n < 5:
continue
out.append(f"{label:25}: {n:6} ({n / count:5.1%})")
return out
def describe(source, columns=None, weights=None, **kwargs):
"""Compute some statistics about a dataset.
Stats can be restricted to a subset of columns."""
MAX_HIST_SIZE = 100_000_000
MAX_CNT_SIZE = 1000
stats = {ALL_DOCUMENTS: 0}
needed = columns + [weights] if columns else None
for doc in read_jsons(source):
stats[ALL_DOCUMENTS] += 1
for k, v in doc.items():
if needed and k not in needed:
continue
stats[k] = get_or_set(stats, k, 0) + 1
if isinstance(v, str):
stats[k + ".length"] = get_or_set(stats, k + ".length", 0) + len(v)
if len(v) > MAX_LABEL_LEN: # Don't treat too long string as labels
continue
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
if v in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[v] += 1
elif type(v) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values.append(v)
elif type(v) is list and len(v) and type(v[0]) in (int, float):
values = get_or_set(stats, k + ".val", [])
if len(values) < MAX_HIST_SIZE:
values += v
elif type(v) is dict:
cnt = get_or_set(stats, k + ".cnt", collections.defaultdict(int))
for label in v:
if label in cnt or len(cnt) < MAX_CNT_SIZE:
cnt[label] += 1
documents = stats[ALL_DOCUMENTS]
yield f"Stats computed on {documents} documents:"
for k in stats:
if columns and k not in columns:
continue
if "." in k or k == ALL_DOCUMENTS:
continue
for line in display_stats(stats, k, weights=weights, **kwargs):
yield line
def shard(lines):
"""Shard a file in several smaller ones."""
# The creation of the shard is handle in a generic way. Do we need this ?
return lines
# *** Utils ***
def get_or_set(dictionary, key, default):
if key not in dictionary:
dictionary[key] = default
return dictionary[key]
class SimpleIO(Protocol):
"""A subset of methods from TextIO."""
def close(self) -> None:
...
def write(self, line: str) -> int:
...
def __enter__(self) -> "SimpleIO":
...
def __exit__(self, exc_type, exc_value, traceback):
...
def open_read(filename: ReadableFileLike) -> Iterable[str]:
"""Open the given file, list of files or files matching the given glob and read lines.
`filename` is None or "-" -> reads from stdin
`filename` is a Path / str -> interprets filename as a glob and open files matching it
`filename` is a list -> opens sequentially all files from the list using `open_read`
`filename` is something else -> returns the object wrapped in a `nullcontext`
This allows to pass already openened files or iterables.
`open_read` will decompress gzip files, given they have ".gz" suffix.
"""
if filename is None:
return sys.stdin
if isinstance(filename, list):
assert isinstance(filename[0], Path)
if len(filename) == 0:
return []
if len(filename) > 1:
return _yield_from(filename)
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
if filename.startswith("http://") or filename.startswith("https://"):
return open_remote_file(filename)
filename = Path(filename)
if not isinstance(filename, Path):
# we might have received an iterable, return it unmodified.
return filename # type: ignore
# Expand glob patterns only when reading
files = [Path(f) for f in sorted(glob.glob(str(filename)))]
if len(files) > 1:
return _yield_from(files)
if len(files) == 1:
filename = files[0]
assert isinstance(filename, Path)
if filename.name.endswith("]"):
return block_reader(filename)
logging.getLogger(__name__).info(f"Opening {filename} with mode 'rt'")
if filename.suffix == ".gz":
file: TextIO = gzip.open(filename, "rt") # type: ignore
else:
file = open(filename, "rt")
return _close_when_exhausted(file)
def _close_when_exhausted(file: TextIO) -> Iterable[str]:
with file:
yield from file
def _yield_from(files: list) -> Iterable[str]:
for file in files:
yield from open_read(file)
def open_write(
filename: WritableFileLike, max_size: str = "4G"
) -> tp.ContextManager[TextIO]:
"""Open the given file, list of files or files matching the given glob.
The return value is a ContextManager meant to be used inside a `with` block:
```
with open_write("foo.txt") as o:
...
Write mode:
replaces "?" from filename by numbers ranging from 0 to 9, generatings files of size `max_size`.
If filename ends with ".gz", creates a blocked gzip file with random access.
"""
if filename is None:
return contextlib.nullcontext(sys.stdout)
if isinstance(filename, list):
if len(filename) > 1:
return MultiFile(filename, "w", max_size)
else:
filename = tp.cast(Path, filename[0])
if isinstance(filename, str):
filename = Path(filename)
if not isinstance(filename, Path):
assert hasattr(filename, "write"), f"{filename} doesn't have a .write method."
# We return a 'TextIO' even though we only check for `.write` method,
# this works better with eg `print`.
return contextlib.nullcontext(tp.cast(TextIO, filename))
mode = "wt"
if "?" in filename.name:
return sharded_file(filename, mode, max_size)
logging.getLogger(__name__).info(f"Opening {filename} with mode {mode}")
# TODO: should we use another format ?
if filename.suffix == ".gz":
return BlockedGzipWriter(Path(filename), mode, block_size="64M")
return open(filename, "wt")
def parse_size(size):
unit_map = {"B": 1, "K": 1024, "M": 1024 ** 2, "G": 1024 ** 3}
unit = size[-1].upper()
assert (
unit in unit_map
), f"Unsupported size unit for {size}. Use one of: {unit_map.keys()}."
return int(size[:-1]) * unit_map[unit]
class MultiFile(SimpleIO):
def __init__(self, files: Iterable[Path], mode="w", max_size="4G"):
self.name = str(files)
self.mode = mode
self.files = iter(files)
self.max_size = parse_size(max_size)
self.current_handle: Optional[TextIO] = None
self.current_block_size = 0
self._open_next_handle() # Opening 1st handle allows to write directly.
def write(self, content) -> int:
# Avoid splitting newlines to a new file.
# use current_block_size since it's faster than `tell()`
if content != "\n" and self.current_block_size >= self.max_size:
self._open_next_handle()
if self.current_handle is None:
raise Exception("No more files to write to...")
written = self.current_handle.write(content)
self.current_block_size += written
return written
def _open_next_handle(self) -> bool:
self.close()
file = next(self.files, None)
if file is None:
return False
self.current_handle = open_write(file).__enter__()
self.current_block_size = 0
return True
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def closed(self):
return self.current_handle is None
def close(self):
if self.current_handle is None:
return
# log("Closing", self.current_handle.name, "with mode", self.current_handle.mode)
self.current_handle.__exit__(None, None, None)
self.current_handle = None
# not sure it helps since connections are reseted anyway.
_session = functools.lru_cache()(requests.Session)
def request_get_content(url: str, n_retry: int = 3) -> bytes:
"""Retrieve the binary content at url.
Retry on connection errors.
"""
t0 = time.time()
logging.info(f"Starting download of {url}")
for i in range(1, n_retry + 1):
try:
r = _session().get(url)
r.raise_for_status()
break
except requests.exceptions.RequestException as e:
# Sleep and try again on error, unless it's a 404.
message = e.args[0] if isinstance(e.args[0], str) else ""
if i == n_retry or "Client Error" in message:
raise e
warnings.warn(
f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})"
)
time.sleep(10 * 2 ** i)
dl_time = time.time() - t0
dl_speed = len(r.content) / dl_time / 1024
logging.info(
f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)"
)
return r.content
def open_remote_file(url: str, cache: Path = None) -> Iterable[str]:
"""Download the files at the given url to memory and opens it as a file.
Assumes that the file is small, and fetch it when this function is called.
"""
if cache and cache.exists():
return open_read(cache)
# TODO: open the remote file in streaming mode.
# The hard part is that we need to write the content on disk at the same time,
# to implement disk caching.
raw_bytes = request_get_content(url)
content = io.BytesIO(raw_bytes)
if url.endswith(".gz"):
f: TextIO = gzip.open(content, mode="rt") # type: ignore
else:
f = io.TextIOWrapper(content)
if cache and not cache.exists():
# The file might have been created while downloading/writing.
tmp_cache = _tmp(cache)
tmp_cache.write_bytes(raw_bytes)
if not cache.exists():
tmp_cache.replace(cache)
else:
tmp_cache.unlink()
return _close_when_exhausted(f)
def sharded_file(file_pattern: Path, mode: str, max_size: str = "4G") -> MultiFile:
folder, name = file_pattern.parent, file_pattern.name
assert "?" in name, f"Can't expand give file_pattern: {file_pattern}"
n = name.count("?")
assert 0 < n < 8
assert "?" * n in name, f"The '?' need to be adjacents in {file_pattern}"
assert "r" not in mode
files = (folder / name.replace("?" * n, f"%0{n}d" % i) for i in range(10 ** n))
return MultiFile(files, mode, max_size)
class SplitFile:
def __init__(self, filename: Path, chunk: int, n_chunks: int, mode: str = "r"):
assert mode == "r"
size = os.path.getsize(filename)
self.handle = open(filename, mode)
start = chunk * size // n_chunks
self.end: int = (chunk + 1) * size // n_chunks
if start > 0:
self.handle.seek(start - 1)
# Skip incomplete line. This avoid crashing when reading eg the middle
# of a unicode char. `self.handle.buffer` is a binary file reader.
self.handle.buffer.readline() # type: ignore
def __enter__(self):
return self
def __iter__(self):
while True:
line = self.handle.readline()
if not line:
return
yield line
if self.handle.tell() >= self.end:
return
def readlines(self):
return list(self.__iter__())
def close(self):
self.handle.close()
def __exit__(self, *args):
self.close()
def get_block_readers(filename: Path, n_readers, mode="t"):
index_filename = filename.parent / (filename.name + ".index")
if not index_filename.exists():
return [gzip.open(filename, "r" + mode)]
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
start = 0
readers = []
for i in range(n_readers):
end = index[min((i + 1) * chunk_per_reader - 1, n_chunks - 1)]
r = _blocked_gzip_reader(filename, start, end, mode)
readers.append(r)
start = end
return readers
def block_reader(filename: Path) -> Iterable[str]:
root, pattern = str(filename)[:-1].split("[", 1)
assert root.endswith(".gz"), "Can only read block of a .gz file for now."
ii, nn = pattern.strip().split("/")
i, n_readers = int(ii), int(nn)
index_filename = root + ".index"
assert os.path.exists(
index_filename
), f"Index {index_filename} not found for {filename}"
index: List[int] = np.load(index_filename)
n_chunks = len(index)
chunk_per_reader = int(np.ceil(n_chunks / n_readers))
n_readers = int(np.ceil(n_chunks / chunk_per_reader))
# I'm not sure how to handle the case where there is less reader than expected.
# Currently we return empty readers.
start = 0
if i > 0:
start = index[min((i - 1) * chunk_per_reader, n_chunks - 1)]
end = index[min(i * chunk_per_reader, n_chunks - 1)]
return _blocked_gzip_reader(root, start, end, mode="t")
def _blocked_gzip_reader(filename, start, end, mode="t") -> Iterable[str]:
handle = gzip.open(filename, "r" + mode)
handle.seek(start)
try:
while handle.tell() < end:
line = handle.readline()
if not line:
break
yield line
finally:
handle.close()
class BlockedGzipWriter(MultiFile):
"""Writes a Gzip files which can be read by block.
Decreasing the block size may hurt compression, but provides more split points.
"""
def __init__(self, filename: Path, mode: str, block_size: str = "256M"):
assert "w" in mode
self.filename = Path(filename)
self.index: List[int] = []
self.zipfile: Optional[gzip.GzipFile] = None
super().__init__([], mode, block_size)
def _open_next_handle(self) -> bool:
"""Here we never actually close/open handles,
we just write the end of block sequence."""
if not self.current_handle:
mode = self.mode + "t"
self.current_handle = tp.cast(TextIO, gzip.open(self.filename, mode))
assert isinstance(self.current_handle.buffer, gzip.GzipFile)
self.zipfile = self.current_handle.buffer
return True
# Use Z_FULL_FLUSH to allow random access:
# https://github.com/madler/zlib/blob/cacf7f1d4e3d44d871b605da3b647f07d718623f/zlib.h#L313
self.current_handle.buffer.flush(zlib_mode=zlib.Z_FULL_FLUSH) # type: ignore
self.index.append(self.current_handle.tell())
self.current_block_size = 0
return True
def flush(self):
assert self.current_handle is not None
self.current_handle.flush()
def close(self):
if self.current_handle is None:
return
self.current_handle.flush()
self.index.append(self.current_handle.tell())
self.current_handle.close()
self.current_handle = None
index = np.array(self.index, dtype=np.uint64)
with open(str(self.filename) + ".index", "wb") as o:
np.save(o, index)
def grouper(iterable, n):
group = []
for x in iterable:
group.append(x)
if len(group) == n:
yield group
group = []
if group:
yield group
PROCESS = psutil.Process()
def mem_footprint_gb(pid=None):
rss = PROCESS.memory_info().rss
return rss / 1_000_000_000
def _tmp(output: Path) -> Path:
suffix = "".join(output.suffixes)
suffix = ".tmp" + suffix
prefix = output.name[: -len(suffix)]
_, tmp_path = tempfile.mkstemp(dir=output.parent, prefix=prefix, suffix=suffix)
return Path(tmp_path)
@functools.lru_cache()
def _tmp_dir() -> Path:
job_id = os.environ.get("SLURM_JOB_ID")
if job_id:
return Path("/scratch/slurm_tmpdir") / job_id
checkpoint = Path("/checkpoint") / os.environ.get("USER", "")
if checkpoint.exists():
tmp = checkpoint / "tmp"
tmp.mkdir(exist_ok=True)
return tmp
return Path("/tmp")
if __name__ == "__main__":
multiprocessing.set_start_method("fork")
main(sys.argv[1:])
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import functools
import itertools
import logging
import os
import sys
import time
import warnings
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Optional, Sequence, Sized
import submitit
from typing_extensions import Protocol
class Executor(Protocol):
def __call__(self, function: Callable[..., str], *args: Iterable) -> None:
...
class SubmititRetryOnTimeout(submitit.helpers.Checkpointable):
def __init__(self, fn: Callable):
self.fn = fn
self.__name__ = fn.__name__
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def get_executor(
name: str,
log_dir: Path,
execution: str,
timeout_hour: float = 1.0,
mem_gb: int = 1,
cpus: int = 1,
task_parallelism: int = -1,
options: dict = {},
) -> Executor:
execution_mode = execution.split(",")[0]
options.update(
{kv.split("=", 1)[0]: kv.split("=", 1)[1] for kv in
execution.split(",")[1:]}
)
if execution_mode == "mp":
warnings.warn("Execution mode 'mp' is deprecated, use 'local'.")
execution_mode = "local"
cluster = None if execution_mode == "auto" else execution_mode
# use submitit to detect which executor is available
ex = submitit.AutoExecutor(log_dir, cluster=cluster)
if task_parallelism == -1: # we are on slurm
ex.parameters['slurm_time'] = int(timeout_hour * 60)
else:
ex.parameters['timeout_min'] = int(timeout_hour * 60)
if ex.cluster == "local":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
# LocalExecutor doesn't respect task_parallelism
return functools.partial(custom_map_array, ex, task_parallelism)
if ex.cluster == "debug":
ex.parameters['timeout_min'] = int(timeout_hour * 60)
return debug_executor
# We are on slurm
if task_parallelism == -1:
task_parallelism = 500
ex.update_parameters(
name=name,
slurm_time=int(timeout_hour * 60),
slurm_mem_per_cpu=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
else:
ex.update_parameters(
name=name,
timeout_min=int(timeout_hour * 60),
mem_gb=mem_gb,
cpus_per_task=cpus,
slurm_array_parallelism=task_parallelism,
**options,
)
return functools.partial(map_array_and_wait, ex)
def map_array_and_wait(
ex: submitit.AutoExecutor, function: Callable[..., str],
*args: Iterable
):
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
approx_length = _approx_length(*args)
print(f"Submitting {f_name} in a job array ({approx_length} jobs)")
jobs = ex.map_array(function, *args)
if not jobs:
return
failed_jobs = []
done = 0
total = len(jobs)
job_array_id = jobs[0].job_id.split("_")[0]
print(f"Started {f_name} in job array {job_array_id} ({len(jobs)} jobs).")
for job in submitit.helpers.as_completed(jobs):
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def debug_executor(function: Callable[..., Optional[str]],
*args: Iterable) -> None:
logging.getLogger().setLevel(logging.DEBUG)
approx_length = _approx_length(*args)
for i, x in enumerate(zip(*args)):
try:
message = function(*x)
except Exception:
try:
import ipdb as pdb # type: ignore
except ImportError:
import pdb # type: ignore
import traceback
traceback.print_exc()
print("")
pdb.post_mortem()
sys.exit(1)
if message is not None:
print(message, f"({i + 1} / {approx_length})")
def _approx_length(*args: Iterable):
for a in args:
if isinstance(a, Sized):
return len(a)
return -1
def custom_map_array(
ex: submitit.AutoExecutor,
parallelism: int,
function: Callable[..., Optional[str]],
*args: Iterable,
) -> None:
f_name = function.__name__
assert len(args) > 0, f"No arguments passed to {f_name}"
jobs_args = list(zip(*args))
total = len(jobs_args)
if parallelism < 0:
parallelism = os.cpu_count() or 0
assert parallelism >= 0, f"Can't run any jobs with task_parallelism={parallelism}"
print(
f"Submitting {total} jobs for {f_name}, with task_parallelism={parallelism}")
enqueued = 0
done = 0
running_jobs: List[submitit.Job] = []
failed_jobs: List[submitit.Job] = []
while done < len(jobs_args):
# Try to queue more job if we have some bandwidth.
if enqueued < total and len(running_jobs) < parallelism:
running_jobs.append(ex.submit(function, *jobs_args[enqueued]))
enqueued += 1
continue
# Else wait for some job to finish
if not running_jobs:
warnings.warn(
f"No more running jobs, yet we submitted only {enqueued} / {total} and finished {done} / {total}"
)
break
job = get_next_job(running_jobs)
running_jobs.remove(job)
done += 1
e = job.exception()
if not e:
print(f"Finished job {job.job_id} ({done} / {total}).",
job.result())
continue
print(f"Failed job {job.job_id} ({done} / {total}):", e)
failed_jobs.append(job)
if failed_jobs:
n_failures = 10
message = f"{len(failed_jobs)} / {done} jobs failed while running {f_name}"
print(message)
for job in failed_jobs[:n_failures]:
print(f"Failed {job.job_id} -> {job.paths.stderr}")
if len(failed_jobs) > n_failures:
print(f"... ({len(failed_jobs) - n_failures} failed job skipped)")
raise Exception(message)
def get_next_job(
jobs: Sequence[submitit.Job], poll_frequency: float = 10
) -> submitit.Job:
"""
Waits for any of the job to finish and returns it.
jobs: list of jobs
poll_frequency: frequency in second at which we check job status
"""
start = time.time()
waiting = False
while True:
for job in jobs:
if job.done():
return job
if not waiting:
job_ids = [j.job_id for j in jobs[:4]]
suffix = "..." if len(jobs) > 4 else ""
print(
f"Waiting on {len(jobs)} running jobs. Job ids: {','.join(job_ids)}{suffix}"
)
waiting = True
time.sleep(poll_frequency)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/execution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import warnings
from typing import Iterable, Iterator, Sequence, Sized, Tuple, Type
import numpy as np
HASH_TYPE: Type[np.uint64] = np.uint64
GETPY_WARNING = False
class AbstractDedupHashSet(Sized, Iterable[np.uint64]):
"""A dict-like that returns `True` for keys that have been added more than once.
The API is batched and expect np.array as input. This batching grants better
perf when using the C++ implementation.
"""
dtype: Type[np.uint64] = HASH_TYPE
def __repr__(self):
implementation = type(self).__name__
return f"[{implementation}, len: {len(self)}"
def __len__(self) -> int:
...
def __contains__(self, values: Sequence[np.uint64]) -> np.ndarray:
...
def __getitem__(self, values) -> np.ndarray:
...
def __setitem__(self, keys, values) -> None:
...
def items(self) -> Iterable[Tuple[np.uint64, np.uint8]]:
...
def keys(self) -> Iterable[np.uint64]:
...
def __iter__(self) -> Iterator[np.uint64]:
return iter(self.keys())
def add(self, h, contains=None):
"""Add the given keys. First time a key is added the value is set to 0,
then it's set to one."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
if contains is None:
contains = self.__contains__(h)
self.__setitem__(h, contains)
return contains
def merge(self, keys, values):
contains = self.__contains__(keys)
self.__setitem__(keys, contains | values)
def dump(self, filename):
return self.dump_np(filename)
def load(self, filename):
return self.load_np(filename)
def dump_np(self, filename):
kv_type = np.dtype([("k", HASH_TYPE), ("v", np.uint8)])
items = np.fromiter(self.items(), dtype=kv_type, count=len(self))
with open(filename, "wb") as f:
np.save(f, items)
def load_np(self, filename):
items = np.load(str(filename))
keys = items["k"].copy()
values = items["v"].copy()
self.merge(keys, values)
def dump_np2(self, filename):
keys = np.fromiter(
(k for (k, v) in self.items()), dtype=HASH_TYPE, count=len(self)
)
with open(filename, "wb") as f:
np.save(f, keys)
values = np.fromiter(
(v for (k, v) in self.items()), dtype=np.uint8, count=len(self)
)
with open(str(filename) + ".val", "wb") as f:
np.save(f, values)
def load_np2(self, filename):
keys = np.load(filename)
values = np.load(str(filename) + ".val")
self.merge(keys, values)
class NaiveHashSet(dict, AbstractDedupHashSet):
"""Pure python implementation of AbstractDedupHashSet.
This implementation is quite fast, since Python dict are heavily optimized.
"""
def __init__(self, iterable=None):
super().__init__()
global GETPY_WARNING
if GETPY_WARNING:
warnings.warn(
"Module 'getpy' not found. Deduplication will take more RAM."
" Try `pip install cc_net[getpy]"
)
GETPY_WARNING = False
def __contains__(self, values):
"""Returns `True` if the object has been added at list once."""
contains_point = super().__contains__
return np.fromiter(
map(contains_point, values), count=len(values), dtype=np.uint8
)
def __getitem__(self, values):
"""Returns `True` if the object has been added at list twice."""
get_point = super().get
return np.fromiter(
map(lambda x: get_point(x, False), values),
count=len(values),
dtype=np.uint8,
)
def __setitem__(self, keys, values):
assert len(keys) == len(values)
for k, v in zip(keys, values):
dict.__setitem__(self, k, v)
try:
import getpy as gp # type: ignore
class _FlatHashSet(gp.Dict, AbstractDedupHashSet):
"""C++ backed implementation of AbstractDedupHashSet.
This implementation is slightly slower than the Python one but uses
3x less RAM.
See https://github.com/atom-moyer/getpy.
"""
def __init__(self):
super().__init__(HASH_TYPE, np.uint8, default_value=False)
def __contains__(self, h):
"""Returns `True` if the object has been added at list once."""
if not isinstance(h, np.ndarray):
h = np.array(h, dtype=HASH_TYPE)
c = gp.Dict.__contains__(self, h)
c.dtype = np.uint8
return c
def dump(self, filename):
return self.dump_gp(filename)
def load(self, filename):
return self.load_gp(filename)
def dump_gp(self, filename):
return gp.Dict.dump(self, str(filename))
def load_gp(self, filename):
"""Override gp.Dict.load, to correctly merge values instead of overwriting."""
other = gp.Dict(HASH_TYPE, np.uint8, default_value=False)
other.load(str(filename))
n = len(other)
keys = np.fromiter(
(k for (k, v) in other.items()), dtype=HASH_TYPE, count=n
)
values = np.fromiter(
(v for (k, v) in other.items()), dtype=np.uint8, count=n
)
self.merge(keys, values)
FlatHashSet: Type[AbstractDedupHashSet] = _FlatHashSet
except ImportError:
GETPY_WARNING = True
FlatHashSet = NaiveHashSet
def timeit(message, function, *args):
start = time.time()
function(*args)
end = time.time()
print(message, f"took {end - start:.0f}s")
def compare_load(*filenames):
assert filenames, "No file given"
def load_list():
hashes = []
for f in filenames:
h = FlatHashSet()
h.load(f)
print(f"Loaded {h} from {f}.")
hashes.append(h)
return hashes
def load_all(load, ext):
hashes = FlatHashSet()
for f in filenames:
load(hashes, f + ext)
def dump_all(hashes, dump, ext):
for h, f in zip(hashes, filenames):
dump(h, f + ext)
hashes = load_list()
dump_gp = getattr(FlatHashSet, "dump_gp")
if dump_gp is not None:
timeit("Dumping using gp.dump", dump_all, hashes, dump_gp, ".gp.test")
timeit("Dumping using dump_np", dump_all, hashes, FlatHashSet.dump_np, ".npy.test")
timeit(
"Dumping using dump_np2", dump_all, hashes, FlatHashSet.dump_np2, ".npy2.test"
)
load_gp = getattr(FlatHashSet, "load_gp")
if load_gp is not None:
timeit("Loading using gp.load", load_all, load_gp, ".gp.test")
timeit("Loading using load_np", load_all, FlatHashSet.load_np, ".npy.test")
timeit("Loading using load_np2", load_all, FlatHashSet.load_np2, ".npy2.test")
# Loading 10 shards:
# [dedup] Dumping using gp.dump took 52s
# [dedup] Dumping using dump_np took 270s
# [dedup] Dumping using dump_np2 took 483s
#
# [dedup] Loading using gp.load took 654s
# [dedup] Loading using load_np took 82s
# [dedup] Loading using load_np2 took 76s
if __name__ == "__main__":
compare_load(*sys.argv[1:])
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import base64
import hashlib
import itertools
import urllib.parse
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.execution import get_executor
from cc_net.jsonql import mem_footprint_gb
HASH_SIZE = 4
HASH_TYPE = np.uint32
PUBLIC_FIELDS = ["url", "digest"]
COMPUTED_FIELDS = ["cc_segment", "language", "language_score", "bucket", "perplexity"]
DATA = Path(__file__).parent.parent / "data"
# This is similar to dedup methods but with use 32 bits hashes.
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def _str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
def get_hashes(lines: Iterable[str]) -> List[bytes]:
h = HASH_SIZE
return [hashlib.sha1(bytes(l, encoding="utf-8")).digest()[:h] for l in lines]
def encode_hashes(hashes: Iterable[bytes]) -> str:
return base64.b64encode(b"".join(hashes)).decode("ascii")
def encode_as_hashes(lines: Iterable[str]) -> str:
return encode_hashes(get_hashes(lines))
def decode_hashes(compact: str) -> List[bytes]:
all_hashes = base64.b64decode(compact)
res = []
assert len(all_hashes) % HASH_SIZE == 0
for i in range(len(all_hashes) // HASH_SIZE):
chunk = all_hashes[i * HASH_SIZE : (i + 1) * HASH_SIZE]
res.append(chunk)
return res
def encode_line_ids(line_ids: Sequence[int]) -> str:
arr = np.array(line_ids, dtype="<u2")
return base64.b64encode(arr.tobytes()).decode("ascii")
def decode_line_ids(compact: str) -> List[int]:
ids_bytes = bytearray(base64.b64decode(compact))
return np.ndarray(len(ids_bytes) // 2, dtype="<i2", buffer=ids_bytes)
def get_doc_key(digest: str) -> int:
assert digest.startswith("sha1:")
h = base64.b32decode(digest[5:])
return _b2i(h[:HASH_SIZE])
class Minifier(jsonql.Transformer):
ready = True
def __init__(self):
self.fields = frozenset(COMPUTED_FIELDS + PUBLIC_FIELDS)
def do(self, doc: dict) -> Optional[dict]:
line_ids: List[int] = doc.pop("line_ids")
fields = self.fields
keys = list(doc.keys())
for k in keys:
if k not in fields:
doc.pop(k, None)
p = doc.get("perplexity", 0)
doc["line_ids"] = encode_line_ids(line_ids)
if p:
doc["perplexity"] = round(p, 1)
s = doc.get("language_score", 0)
if s:
doc["language_score"] = round(s, 2)
return doc
class MetadataFetcher(jsonql.Transformer):
"""Reads documents from CC snapshot and join precomputed metadata.
CC snapshots are split in segments. Each segment is 64Mb long.
The metadata must also be stored in segments of the same size and names.
"""
def __init__(self, folder: Union[Path, str]):
self.ready = True
self.metadata: Dict[int, dict] = {}
self._segments: Set[str] = set()
self.read_doc = 0
self.missed_doc = 0
self.missed_par = 0
self.processed_par = 0
if isinstance(folder, str):
# detect path passed as string
if urllib.parse.urlparse(folder).scheme == "":
folder = Path(folder)
assert folder.exists(), f"Metadata folder not found: {folder}"
self.folder = folder
self.segment: str = ""
self.segments_read_twice = 0
def meta_file(self, segment: str) -> str:
file_name = segment.split("/")[-1]
assert file_name.endswith(".warc.wet.gz") or file_name.endswith(".warc.wet")
if isinstance(self.folder, str):
return urllib.parse.urljoin(
self.folder, file_name.replace(".warc.wet", ".json")
)
meta_file = self.folder / file_name.replace(".warc.wet", ".json")
assert (
meta_file.exists()
), f"Couldn't find metadata file for segment {segment} at {meta_file}"
return str(meta_file)
def fetch_metadata(self, segment: str) -> None:
meta_file = self.meta_file(segment)
k = get_doc_key
self.metadata = {}
collision = 0
for m in jsonql.read_jsons(meta_file):
key = k(m["digest"])
if key in self.metadata:
collision += 1
self.metadata[key] = m
self.log(f"Loaded {len(self.metadata)} metadatas from {meta_file}")
if collision > 0:
self._logger.warning(f"Found {collision} collisions !")
self.segment = segment
if segment in self._segments:
self.log("Cache miss")
self.segments_read_twice += 1
self._segments.add(segment)
def do(self, doc: dict) -> Optional[dict]:
if self.segment != doc["cc_segment"]:
self.fetch_metadata(doc["cc_segment"])
digest = doc["digest"]
key = get_doc_key(digest)
if key not in self.metadata:
return None
metadata = self.metadata.pop(key)
return self.clean(metadata, doc)
def clean(self, metadata: dict, full_doc: dict) -> Optional[dict]:
line_ids = decode_line_ids(metadata.pop("line_ids"))
lines = full_doc["raw_content"].split("\n")
cleaned = []
for l in line_ids:
if l >= len(lines) or l < 0:
self.missed_par += 1
continue
cleaned.append(lines[l])
self.processed_par += len(line_ids)
if not cleaned:
self.missed_doc += 1
return None
full_doc["raw_content"] = "\n".join(cleaned)
full_doc["original_nlines"] = full_doc["nlines"]
full_doc["original_length"] = full_doc["length"]
full_doc["nlines"] = len(cleaned)
full_doc["length"] = len(full_doc["raw_content"])
for key, value in metadata.items():
full_doc[key] = value
return full_doc
def summary(self) -> List[str]:
summ = super().summary()
mem = mem_footprint_gb()
len_cache = len(self.metadata)
summ.append(
f"Read {self.read_doc:_}, stocking {len_cache:_} doc in {mem:.1f}g."
)
if self.missed_doc:
r = self.missed_doc / self.processed
summ.append(f"! Missed {self.missed_doc} documents ({r:.1%}) !")
if self.missed_par:
r = self.missed_par / self.processed
summ.append(f"! Missed {self.missed_par} paragraphs ({r:.1%}) !")
return summ
def _expand_files(files: List[Path]) -> List[Path]:
if len(files) == 1 and files[0].is_dir():
folder = files[0]
files = sorted(folder.glob("*.json.gz"))
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert files, "No files found"
return files
def minify_file(file: Path, output: Path) -> str:
"""Minify the given file."""
jsonql.run_pipes(Minifier(), file=file, output=output)
return f"Minified {output}"
def minify(
files: List[Path], output_dir: Path, execution: str = "mp", parallelism: int = -1
):
"""Minify all the files in the given folder."""
files = _expand_files(files)
output_dir.mkdir(exist_ok=True)
with open(output_dir / "files.txt", "w") as o:
for f in files:
print(f.name, file=o)
outputs = [output_dir / f.name for f in files]
ex = get_executor(
"minify",
output_dir / "logs",
execution,
timeout_hour=2,
cpus=1,
task_parallelism=parallelism,
)
ex(minify_file, files, outputs)
def fetch_metadata_file(
file: Union[Path, str],
metadata_dir: Union[Path, str],
output: Path,
cache_dir: Path = None,
):
unminifier = MetadataFetcher(metadata_dir)
tmp = output.with_name("tmp." + output.name)
jsonql.run_pipes(unminifier, file=file, output=tmp)
tmp.rename(output)
return f"Fetched metadata for {file}. Results at {output}."
def fetch_metadata(
files: List[str],
metadata_dir: Union[Path, str],
output_dir: Path,
execution: str = "mp",
parallelism: int = -1,
cache_dir: Path = None,
):
if len(files) == 1 and Path(files[0]).is_dir():
folder = Path(files[0])
files = [str(f) for f in sorted(folder.glob("*.json.gz"))]
print(f"Found {len(files)} files under {folder}/*.json.gz")
assert len(files) > 0, "No files given."
output_dir.mkdir(exist_ok=True)
outputs = [output_dir / str(f).split("/")[-1] for f in files]
if cache_dir is None:
cache_dir = output_dir / "wet_cache"
cache_dir.mkdir(exist_ok=True)
if str(cache_dir) == "none":
cache_dir = None
files = [f for f, o in zip(files, outputs) if not o.exists()]
outputs = [o for o in outputs if not o.exists()]
if not files:
return
ex = get_executor(
"unminify",
output_dir / "logs",
execution,
timeout_hour=8,
cpus=1,
task_parallelism=parallelism,
mem_gb=32,
)
ex(fetch_metadata_file, files, outputs, itertools.repeat(cache_dir))
if __name__ == "__main__":
import func_argparse
func_argparse.main(minify_file, minify, fetch_metadata, fetch_metadata_file)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import unicodedata
UNICODE_PUNCT = {
",": ",",
"。": ".",
"、": ",",
"„": '"',
"”": '"',
"“": '"',
"«": '"',
"»": '"',
"1": '"',
"」": '"',
"「": '"',
"《": '"',
"》": '"',
"´": "'",
"∶": ":",
":": ":",
"?": "?",
"!": "!",
"(": "(",
")": ")",
";": ";",
"–": "-",
"—": " - ",
".": ". ",
"~": "~",
"’": "'",
"…": "...",
"━": "-",
"〈": "<",
"〉": ">",
"【": "[",
"】": "]",
"%": "%",
"►": "-",
}
UNICODE_PUNCT_RE = re.compile(f"[{''.join(UNICODE_PUNCT.keys())}]")
def replace_unicode_punct(text: str) -> str:
return "".join((UNICODE_PUNCT.get(c, c) for c in text))
def remove_unicode_punct(text: str) -> str:
"""More aggressive version of replace_unicode_punct but also faster."""
return UNICODE_PUNCT_RE.sub("", text)
def strip_accents(line: str) -> str:
"""Strips accents from a piece of text."""
nfd = unicodedata.normalize("NFD", line)
output = [c for c in nfd if unicodedata.category(c) != "Mn"]
if len(output) == line:
return line
return "".join(output)
# Build a regex matching all control characters.
NON_PRINTING_CHARS_RE = re.compile(
f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]"
)
DIGIT_RE = re.compile(r"\d")
PUNCT_OR_NON_PRINTING_CHARS_RE = re.compile(
(UNICODE_PUNCT_RE.pattern + NON_PRINTING_CHARS_RE.pattern).replace("][", "")
)
def remove_non_printing_char(text: str) -> str:
return NON_PRINTING_CHARS_RE.sub("", text)
def normalize_spacing_for_tok(text: str, language: str = "en") -> str:
res = (
text.replace("\r", "")
# remove extra spaces
.replace("(", " (")
.replace(")", ") ")
.replace(" +", " ")
)
res = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)\1", res)
res = res.replace("( ", "(").replace(" )", ")")
res = re.sub(r"(\d) \%", r"\1\%", res)
res = res.replace(" :", ":").replace(" ;", ";")
res = res.replace("`", "'").replace("''", ' " ')
res = (
res.replace("„", '"')
.replace("“", '"')
.replace("”", '"')
.replace("–", "-")
.replace("—", " - ")
.replace(" +", " ")
.replace("´", "'")
.replace("([a-z])‘([a-z])", r"\1'\2/")
.replace("([a-z])’([a-z])", r"\1'\2/")
.replace("‘", '"')
.replace("‚", '"')
.replace("’", '"')
.replace("''", '"')
.replace("´´", '"')
.replace("…", "...")
# French quotes
.replace(" « ", ' "')
.replace("« ", '"')
.replace("«", '"')
.replace(" » ", '" ')
.replace(" »", '"')
.replace("»", '"')
# handle pseudo-spaces
.replace(" %", "%")
.replace("nº ", "nº ")
.replace(" :", ":")
.replace(" ºC", " ºC")
.replace(" cm", " cm")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ;", ";")
.replace(", ", ", ")
.replace(" +", " ")
.replace(".", ". ")
)
# English "quotation," followed by comma, style
if language == "en":
res = re.sub(r"\"([,\.]+)", r"\1\"", res)
# Czech is confused
elif language == "cs" or language == "cz":
pass
# German/Spanish/French "quotation", followed by comma, style
else:
res = res.replace(',"', '",')
res = re.sub(
r"(\.+)\"(\s*[^<])", r"\"\1\2", res
) # don't fix period at end of sentence
if (
language == "de"
or language == "es"
or language == "cz"
or language == "cs"
or language == "fr"
):
res = re.sub(r"(\d) (\d)", r"\1,\2", res)
else:
res = re.sub(r"(\d) (\d)", r"\1.\2", res)
return res
def normalize(line: str, accent=True, case=True, numbers=True, punct=1) -> str:
line = line.strip()
if not line:
return line
if case:
line = line.lower()
if accent:
line = strip_accents(line)
if numbers:
line = DIGIT_RE.sub("0", line)
if punct == 1:
line = replace_unicode_punct(line)
elif punct == 2:
line = remove_unicode_punct(line)
line = remove_non_printing_char(line)
return line
def slow_normalize_for_dedup(line: str) -> str:
return normalize(line, accent=False, case=True, numbers=True, punct=2)
def normalize_for_dedup(line: str) -> str:
line = line.strip()
if not line:
return line
# case
line = line.lower()
# numbers
line = DIGIT_RE.sub("0", line)
line = PUNCT_OR_NON_PRINTING_CHARS_RE.sub("", line)
return line
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/text_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import subprocess
from pathlib import Path
from typing import List
import func_argparse
import numpy as np
from cc_net import jsonql
def get_index(file: Path) -> Path:
return file.parent / (file.name + ".index")
def _get_tmp(output: Path) -> Path:
return output.parent / (output.stem + ".tmp" + output.suffix)
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def fast_reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Same as reshard but don't re-compress the output.
This will lead to a bigger output file, especially if the shards are very small.
"""
if tmp is None:
tmp = _get_tmp(output)
with open(tmp, "wb") as o:
subprocess.run(["cat"] + [str(f) for f in inputs], stdout=o)
tmp.replace(output)
indexes_files = [get_index(i) for i in inputs]
existing_indexes = sum(i.exists() for i in indexes_files)
assert (
existing_indexes == len(indexes_files) or existing_indexes == 0
), "some indexes don't exist."
if existing_indexes > 0:
indexes = [np.load(idx) for idx in indexes_files]
for i in range(len(indexes) - 1):
indexes[i + 1] += indexes[i][-1]
with open(str(output) + ".index", "wb") as o:
np.save(o, np.concatenate(indexes))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output
def determine_groups(
inputs: List[Path], target_size: int = 4 * 1024 ** 3
) -> List[List[Path]]:
if len(inputs) == 0:
return []
sample = inputs[:10]
typical_size = sum(s.stat().st_size for s in sample) / len(sample)
group_size = min(target_size // typical_size, len(inputs))
group_size = max(group_size, 1)
return jsonql.grouper(inputs, group_size)
if __name__ == "__main__":
func_argparse.single_main(reshard)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import time
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
import kenlm # type: ignore
import numpy as np # type: ignore
import pandas as pd # type: ignore
import sentencepiece # type: ignore
from cc_net import jsonql, text_normalizer
LMDescriptor = Union[Dict[str, Path], Union[Path, str]]
def get_args():
parser = argparse.ArgumentParser(
description="Compute the score of each sentences of a document",
parents=[jsonql.io_parser()],
)
parser.add_argument("--models", type=str)
parser.add_argument("--sentences", action="store_true", default=False)
parser.add_argument(
"--languages", type=str, help="Ignore doc with another language"
)
parser.add_argument("--field", type=str, default=None)
parser.add_argument("--newline", type=str, default="\n")
return vars(parser.parse_args())
def pp(log_score, length):
return 10.0 ** (-log_score / length)
class SentencePiece(jsonql.Transformer):
# Sentence Pieces model have to be read back from disk.
warning_when_pickling = True
def __init__(
self,
model: Path,
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.model = model
self.field = field
self.output_field = output_field
self.normalize = normalize
self.sp: sentencepiece.SentencePieceProcessor = None
def _prepare(self):
if self.sp is not None:
return
self.sp = sentencepiece.SentencePieceProcessor()
self.sp.load(str(self.model))
return self
def do(self, document: dict) -> dict:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
tokenized = self.sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class MultiSentencePiece(jsonql.Transformer):
warning_when_pickling = True
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "tokenized",
normalize: bool = False,
):
super().__init__()
self.field = field
self.output_field = output_field
self.normalize = normalize
self._prefetch: Sequence[str] = []
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.sp: Dict[str, sentencepiece.SentencePieceProcessor] = {}
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_sp(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_sp(self, lang) -> Optional[sentencepiece.SentencePieceProcessor]:
sp = self.sp.get(lang)
if sp is not None:
return sp
if lang not in self.models:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
sp = sentencepiece.SentencePieceProcessor()
sp.load(str(self.models[lang]))
self.sp[lang] = sp
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return sp
def do(self, document: dict) -> Optional[dict]:
text = document[self.field]
if self.normalize:
text = text_normalizer.normalize(text)
sp = self.get_sp(document.get("language"))
if sp is None:
return document
tokenized = sp.encode_as_pieces(text)
document[self.output_field] = " ".join(tokenized)
return document
class DocLM(jsonql.Transformer):
def __init__(
self,
models: Union[Path, Dict[str, Path]],
field: str,
output_field: str = "perplexity",
newline: str = "\n",
normalize: bool = True,
load_method: int = 2,
):
super().__init__()
self.field = field
self.output_field = output_field
self.newline = newline
self.normalize = normalize
self._prefetch: Sequence[str] = []
self.lm_config = kenlm.Config()
# This is the default settings
# POPULATE will mmap the models and populate the pages.
# Maybe that's not the best way when the models are on a network disk.
# TODO: try copying models file, try READ or PARALLEL_READ
self.lm_config.load_method = load_method
if isinstance(models, Path):
self.models = {
m.name.split(".")[0]: m for m in models.parent.glob(models.name)
}
else:
self.models = models
self._prefetch = list(models.keys())
self.lm: Dict[str, kenlm.Model] = {}
self.n_lines = 0
def _prepare(self) -> None:
for lang in self._prefetch:
assert (
self.get_lm(lang) is not None
), f"No model found for {lang} at {self.models.get(lang)}."
def get_lines(self, document: dict) -> List[str]:
lang = document.get("language")
if not lang:
return []
if lang not in self.models:
return []
content = document.get(self.field)
if not content:
return []
lines = content.split(self.newline)
self.n_lines += len(lines)
return lines
def get_lm(self, lang: Optional[str]) -> Optional[kenlm.Model]:
if lang is None:
return None
lm = self.lm.get(lang)
if lm is not None:
return lm
model = self.models.get(lang)
if model is None:
return None
start_load = time.time()
self.log(f"Loading {self.models[lang]}...")
lm = kenlm.Model(str(model), self.lm_config)
self.lm[lang] = lm
load_time = time.time() - start_load
self.log(f"Loaded {self.models[lang]} (took {load_time / 60:.1f}min)")
return lm
def do(self, document: dict) -> dict:
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return document
doc_log_score, doc_length = 0, 0
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
document[self.output_field] = round(pp(doc_log_score, doc_length), 1)
return document
def summary(self):
delay = time.time() - self.start_time
h = delay / 3600
s = self.n_lines / delay
summ = super().summary()
summ.append(f"Processed {self.n_lines:_} lines in {h:.2}h ({s:.1} lines/s).")
return summ
class SentencesLM(DocLM):
"""Returns the score of each individual paragraph."""
def do(self, document: dict) -> Optional[str]: # type: ignore
lines = self.get_lines(document)
model = self.get_lm(document.get("language"))
if not lines or not model:
return None
sentences = []
for line in lines:
if self.normalize:
line = text_normalizer.normalize(line)
log_score = model.score(line)
length = len(line.split()) + 1
sentences.append(f"{pp(log_score, length)}\t{line}")
return "\n".join(sentences)
class PerplexityBucket(jsonql.Transformer):
def __init__(
self, cutoff_csv: Path, percentile_head: int = 30, percentile_tail: int = 60
):
super().__init__()
self.cutoff_csv = cutoff_csv
self.percentile_head = percentile_head
self.percentile_tail = percentile_tail
self.cutoffs: Dict[str, Tuple[float, float]] = {}
def _prepare(self) -> None:
cutoffs = pd.read_csv(self.cutoff_csv, index_col=0)
self.cutoffs = {
l: (cutoffs[l][self.percentile_head], cutoffs[l][self.percentile_tail])
for l in cutoffs.columns
}
def get_bucket(self, doc: dict) -> str:
perplexity = doc.get("perplexity", -1)
lang = doc.get("language")
if lang not in self.cutoffs or perplexity < 0:
return "all"
pp_head, pp_tail = self.cutoffs[lang]
if perplexity < pp_head:
return "head"
if perplexity < pp_tail:
return "middle"
return "tail"
def do(self, doc: dict) -> dict:
doc["bucket"] = self.get_bucket(doc)
return doc
class DropKeys(jsonql.Transformer):
def __init__(self, *keys):
super().__init__()
self.keys = keys
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
for key in self.keys:
document.pop(key, None)
return document
class RemoveSmall(jsonql.Transformer):
def __init__(self, field, min_len):
super().__init__()
self.field = field
self.min_len = min_len
self.removed = 0
def do(self, document: dict) -> Optional[dict]:
if not document:
return None
content = document.get(self.field)
if not content or len(content) < self.min_len:
self.removed += 1
return None
return document
def summary(self):
r, n = self.removed, self.processed
ratio = r / n if n else 0
return [f"Removed {r} small documents out of {n} ({ratio:.1%})"]
def perplexity_to_bin(file: Path, output: Path, models, tok_field: str):
pp_field = "perplexity"
lm = DocLM(models, tok_field, output_field=pp_field)
stats: List[float] = []
max_stats = 1_000_000
batch_size = 100_000
i = 0
batch = []
with open(output, "wb") as o:
for doc in jsonql.read_jsons(file):
i += 1
pp = lm(doc)[pp_field]
if len(stats) < max_stats:
stats.append(pp)
batch.append(pp)
if len(batch) >= batch_size:
np.array(batch, dtype=np.float32).tofile(o)
batch = []
if len(batch) > 0:
np.array(batch, dtype=np.float32).tofile(o)
if __name__ == "__main__":
args = get_args()
output = Path(args["output"])
if output.suffix == ".bin":
perplexity_to_bin(args["file"], output, args["models"], args["field"])
else:
jsonql.run_pipe(DocLM, args)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/perplexity.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tokenizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to remove duplicate paragraphs across one or several shards.
"""
import argparse
import gc
import hashlib
import logging
import multiprocessing
import os
import tempfile
import time
from pathlib import Path
from typing import Iterable, List, Optional, Set, Union
import numpy as np
from cc_net import jsonql
from cc_net.flat_hash_set import HASH_TYPE, AbstractDedupHashSet, FlatHashSet
from cc_net.jsonql import mem_footprint_gb
from cc_net.text_normalizer import normalize_for_dedup
BYTE_ORDER = "little"
HASH_SIZE = HASH_TYPE(0).nbytes
DISABLE_MULTI_PROCESSING = False
FilesOrDir = Union[List[Path], Path]
def get_args():
parser = argparse.ArgumentParser(
description="Read a set of json files and allow to query them",
parents=[jsonql.io_parser()],
)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--output_hashes", type=str)
parser.add_argument("--no_finalize", action="store_false", dest="finalize")
# parser.add_argument("--mem_gb", type=int)
parser.add_argument("--hashes", type=str)
return vars(parser.parse_args())
def _b2i(b: bytes) -> int:
return np.frombuffer(b[:HASH_SIZE], dtype=HASH_TYPE, count=1, offset=0).item(0)
def str_hash(s: str) -> int:
h = hashlib.sha1(bytes(s, encoding="utf-8"))
return _b2i(h.digest())
log = logging.getLogger(__name__).info
def run_par(processes):
# This is different from multiprocessing.map since it allows for kwargs.
processes = list(processes)
if len(processes) == 1 or DISABLE_MULTI_PROCESSING:
for f, args, kwargs in processes:
f(*args, **kwargs)
return
log(f"Starting {len(processes)} subprocess")
processes = [
multiprocessing.Process(target=f, args=a, kwargs=kw) for (f, a, kw) in processes
]
for p in processes:
p.start()
for p in processes:
p.join()
failed = 0
for p in processes:
if p.exitcode != 0:
log(f"Process failed with code {p.exitcode}: {p}")
failed += 1
assert failed == 0, f"{failed} processes failed..."
def split_file(file, n_splits):
for i in range(n_splits):
yield jsonql.SplitFile(file, i, n_splits)
def merge(hashes_1, hashes_2, output):
if isinstance(hashes_1, str):
h1 = FlatHashSet()
h1.load(hashes_1)
else:
h1 = hashes_1
if isinstance(hashes_2, str):
h2 = FlatHashSet()
h2.load(hashes_2)
else:
h2 = hashes_2
h2_np = np.fromiter(h2.keys(), dtype=FlatHashSet.dtype, count=len(h2))
dup = h1.__contains__(h2_np)
# Dups between h1 and h2 will be set to 1, keys unique to h2 are copied to
# h1 with their value.
h1[h2_np] = dup
if output:
h1.dump(output)
return h1
def merge_shard(hash_files, output):
h = FlatHashSet()
h.load(hash_files[0])
for hash_file in hash_files[1:]:
h = merge(h, hash_file, output=None)
print(f"Merged {hash_file}. We now have {len(h)} hashes.")
h.dump(output)
print(f"Saved {len(h)} hashes to {output}.")
def _dump_sentence_hashes(source: Path, output: Path, field: str):
treated = 0
started = time.time()
with open(output, "wb") as o:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content:
continue
h = compute_hashes(content)
if h is None:
continue
h.tofile(o)
treated += 1
if treated % 100_000 == 0:
delay = time.time() - started
log(
f"Computed {treated} documents hashes in {delay / 3600:.2f}h ({treated / delay} doc / s)"
)
def _remove_duplicate_hashes(duplicates, source, output):
batch_size = 100_000
n_lines, n_lines_kept = 0, 0
with open(source, "rb") as f, open(output, "wb") as o:
log(f"Opening {source} with mode rb")
log(f"Opening {output} with mode wb")
while True:
hashes = np.fromfile(f, dtype=HASH_TYPE, count=batch_size)
if hashes.size == 0:
break
keep = duplicates[hashes] < 1
kept = keep.sum()
hashes *= keep
hashes.tofile(o)
n_lines += hashes.size
n_lines_kept += kept
removed = n_lines - n_lines_kept
selectivity = n_lines_kept / n_lines if n_lines else 0
log(f"Removed {removed} duplicate hashes with selectivity: {selectivity:3.1%}")
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup()
def compute_hashes(content) -> Optional[np.ndarray]:
if not content:
return None
lines = content.split("\n")
# save hashes as bytes but reinterpret them as uint64.
hashes = np.fromiter(
(
hashlib.sha1(bytes(normalize_for_dedup(l), encoding="utf-8")).digest()[
:HASH_SIZE
]
for l in lines
),
dtype=np.dtype((bytes, HASH_SIZE)),
count=len(lines),
)
return np.ndarray(dtype=HASH_TYPE, buffer=hashes.data, shape=hashes.shape)
def finalize_doc(doc, field, hashes=None):
content = doc.get(field)
lines = content.split("\n")
n_chars = len(content)
if "original_nlines" not in doc:
doc["original_nlines"] = doc.get("nlines", len(lines))
if "original_length" not in doc:
doc["original_length"] = doc.get("length", n_chars)
if hashes is None:
hashes = doc.pop(field + "_hash")
# Remove duplicates inside doc
seen: Set[int] = set()
original_line_ids = doc.get("line_ids", range(len(hashes)))
line_ids = []
new_lines = []
for l, line, h in zip(original_line_ids, lines, hashes):
if h not in seen and h != 0:
line_ids.append(l)
new_lines.append(line)
seen.add(h)
doc[field] = "\n".join(new_lines)
doc["nlines"] = len(line_ids)
n_chars_kept = len(doc[field])
doc["length"] = n_chars_kept
doc["line_ids"] = line_ids
return n_chars, n_chars_kept
class HashesCollector(jsonql.Transformer):
"""
Collect all hashes found of lines found in the `field` of the source documents.
"""
parallelisable = False
def __init__(
self, field: str, output: Path = None, hashes: AbstractDedupHashSet = None
):
super().__init__()
self.n_lines = 0
self.field = field
self.output = output
self.hashes = FlatHashSet() if hashes is None else hashes
self.num_hashes_end = 0
self.num_hashes_start = len(self.hashes)
def summary(self) -> List[str]:
summ = super().summary()
h = self.num_hashes_end if self.hashes is None else len(self.hashes)
h = (h - self.num_hashes_start) // 1000
max_mem = mem_footprint_gb()
n = self.n_lines // 1000
summ.append(
f"Found {h:_}k unique hashes over {n:_}k lines. Using {max_mem:.1f}GB of RAM."
)
return summ
def do(self, doc: dict) -> None:
doc_hashes = compute_hashes(doc.get(self.field))
if doc_hashes is None:
return
self.hashes.add(doc_hashes)
self.n_lines += doc_hashes.size
def close(self):
if self.output and self.hashes:
self.hashes.dump(self.output)
self.log(f"Saved {len(self.hashes)} hashes to {self.output}")
# Save the number of hashes.
self.num_hashes_end = len(self.hashes)
# Free up mem even if the transformer is kept somewhere else.
self.hashes = None # type: ignore
class DuplicatesRemover(jsonql.Transformer):
"""DuplicatesRemover"""
# The hashes can't be pickled so they will have to be read back from disk.
warn_when_pickling = True
def __init__(self, field: str, hashes_files: List[Path], collect: bool = False):
"""
Remove duplicates
"""
super().__init__()
self.field = field
self.collect = collect
self.hashes_files = hashes_files
self.duplicates: Optional[AbstractDedupHashSet] = None
self.n_lines, self.n_lines_kept = 0, 0
self.n_chars, self.n_chars_kept = 0, 0
def _prepare(self):
if self.duplicates is not None:
return
self.duplicates = FlatHashSet()
start = time.time()
for h in self.hashes_files:
shard_start = time.time()
self.duplicates.load(str(h))
delay = time.time() - shard_start
self.log(
f"Loaded hashes from {h} ({mem_footprint_gb():.3f}GB total, took {delay / 60:.1}m)"
)
delay = time.time() - start
self.log(
f"Loaded {len(self.duplicates):_d} hashes from {len(self.hashes_files)} files. ({mem_footprint_gb():.1f}GB total, took {delay / 60:.1}m)"
)
def do(self, doc: dict) -> Optional[dict]:
content = doc.get(self.field)
if not content:
return None
doc_hashes = compute_hashes(content)
assert self.duplicates is not None
seen = (
self.duplicates.add(doc_hashes)
if self.collect
else self.duplicates[doc_hashes]
)
keep = seen < True
kept = keep.sum()
if kept == 0:
return None
doc_hashes = doc_hashes * keep
self.n_lines += keep.size
self.n_lines_kept += kept
chars, kept_chars = finalize_doc(doc, self.field, hashes=doc_hashes)
self.n_chars += chars
self.n_chars_kept += kept_chars
return doc
def summary(self) -> List[str]:
summ = super().summary()
end_time = time.time()
n_lines_kept, n_lines, n_docs = self.n_lines_kept, self.n_lines, self.processed
speed = n_docs / (end_time - self.start_time)
summ.append(
f"Processed {self.n_lines} lines in {n_docs} docs. [{speed:.1f} doc/s]"
)
selectivity = self.n_lines_kept / self.n_lines if n_lines else 0
summ.append(f"Kept {n_lines_kept} lines out of {n_lines} ({selectivity:.1%}).")
n_chars_kept, n_chars = self.n_chars_kept, self.n_chars
selectivity = n_chars_kept / n_chars if n_chars else 0
summ.append(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
return summ
def deduplicate(
file: jsonql.ReadableFileLike, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (but keep the first occurence)."""
dup_remover = DuplicatesRemover(field, [], collect=True)
return dup_remover.map(jsonql.read_jsons(file))
def deduplicate_two_pass(
file: jsonql.FileDescriptor, field: str = "raw_content"
) -> Iterable[dict]:
"""Remove duplicates of the given file (even removing the first occurence).
This is what is done in the paper, and in mine.py
"""
try:
if isinstance(file, Path):
hash_file: Path = file.with_suffix(".bin")
else:
hash_file = jsonql._tmp(Path("hashes.bin"))
jsonql.run_pipes(
jsonql.JsonReader(), HashesCollector(field, output=hash_file), file=file
)
dup_remover = DuplicatesRemover(field, [hash_file])
return dup_remover.map(jsonql.read_jsons(file))
finally:
if hash_file.exists():
hash_file.unlink()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import logging
import re
import tempfile
import time
import urllib.request
from pathlib import Path
from typing import ContextManager, Iterable, Iterator, List, Optional, Sequence
from urllib.parse import urlparse
import func_argparse
from bs4 import BeautifulSoup # type: ignore
from cc_net import jsonql
WET_URL_ROOT = "https://data.commoncrawl.org"
logger = logging.getLogger(__name__)
def cc_wet_paths_url(dump_id: str) -> str:
return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"])
@functools.lru_cache()
def cc_segments(dump_id: str, cache_dir: Path = None) -> List[str]:
wet_paths = cc_wet_paths_url(dump_id)
cache_dir = cache_dir or jsonql._tmp_dir()
wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz"
f = jsonql.open_remote_file(wet_paths, cache=wet_paths_cache)
return [segment.strip() for segment in f]
def list_dumps() -> List[str]:
home_page = BeautifulSoup(
urllib.request.urlopen("http://index.commoncrawl.org/"), features="html.parser"
)
dumps = [a.get("href").strip("/") for a in home_page.findAll("a")]
dumps = [a[8:] for a in dumps if re.match(r"^CC-MAIN-\d\d\d\d-\d\d$", a)]
return sorted(dumps)
def ls():
for dump in list_dumps():
print(dump, "->", cc_wet_paths_url(dump))
def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]:
"""BEFORE 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: [url]
WARC-Date: [crawldate: 2019-02-15T19:15:59Z]
WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e>
WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7>
WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2
Content-Type: text/plain
Content-Length: 7743
AFTER 2020, Headers format is:
WARC/1.0
WARC-Type: conversion
WARC-Target-URI: http://100greatpiano.com/video/wilhelm-kempff-plays-beethovens-moonlight-sonata/
WARC-Date: 2023-01-26T22:21:08Z
WARC-Record-ID: <urn:uuid:ccafeba8-a08b-47d0-86be-cf0855f4f6d0>
WARC-Refers-To: <urn:uuid:935a6ef4-8708-41f5-a152-412cdf1b48c1>
WARC-Block-Digest: sha1:2WURD74BLDCLPV6INBQEQ6OOJRQDPJBA
WARC-Identified-Content-Language: eng,jpn
Content-Type: text/plain
Content-Length: 886
"""
if not headers or not doc:
return None
try:
warc_type = headers[1].split()[1]
if warc_type != "conversion":
return None
url = headers[2].split()[1]
date = headers[3].split()[1]
digest = headers[6].split()[1]
# process length, may be in the 8th or 9th position
try:
length = int(headers[9].split()[1])
except IndexError as e:
length = int(headers[8].split()[1])
except Exception as e:
logger.warning("Can't parse header:", e, headers, doc)
return None
# Docs are separated by two empty lines.
last = None
if not doc[-1] and not doc[-2]:
last = -2
title, doc = doc[0], doc[1:last]
return {
"url": url,
"date_download": date,
"digest": digest,
"length": length,
"nlines": len(doc),
"source_domain": urlparse(url).netloc,
"title": title,
"raw_content": "\n".join(doc),
}
def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]:
doc: List[str] = []
headers, read_headers = [], True
for warc in warc_lines:
warc = warc.strip()
if read_headers:
headers.append(warc)
read_headers = warc != ""
continue
if warc == "WARC/1.0":
# We reached the beginning of the new doc.
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
headers, doc, read_headers = [warc], [], True
continue
doc.append(warc)
# Return the last document
if doc:
parsed = parse_doc(headers, doc)
if parsed is not None:
yield parsed
def parse_warc_file(lines: Iterable[str], min_len: int = 1) -> Iterator[dict]:
n_doc = 0
n_ok = 0
for doc in group_by_docs(lines):
n_doc += 1
if not doc or len(doc["raw_content"]) < min_len:
continue
n_ok += 1
yield doc
if n_doc > 0:
logger.info(f"Kept {n_ok:_d} documents over {n_doc:_d} ({n_ok / n_doc:.1%}).")
else:
logger.info(f"Found no documents")
def dl(
dump: str,
shard: int,
num_shards: int,
output: Path = None,
num_segments_per_shard: int = 0,
):
"""Download a shard of the common crawl, and export it to json.
Arguments:
output: filename of the output file
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: manual control of the number of segment per shard.
"""
reader = CCShardReader(dump, shard, num_shards, num_segments_per_shard)
jsonql.run_pipes(inputs=reader, output=output)
logger.info(f"Done. {output} is ready.")
class CCSegmentsReader(Iterable[dict]):
def __init__(
self, segments: Sequence[str], min_len: int = 0, cache_dir: Path = None
):
self._segments = segments
self.min_len = min_len
if cache_dir is not None:
cache_dir = Path(cache_dir)
cache_dir.mkdir(exist_ok=True)
self.cache_dir = cache_dir
self.retrieved_segments = 0
def segment_url(self, segment: str):
return "/".join((WET_URL_ROOT, segment))
@property
def segments(self) -> Sequence[str]:
return self._segments
def open_segment(self, segment: str) -> Iterable[str]:
url = self.segment_url(segment)
file: Optional[Path] = None
if self.cache_dir:
file = self.cache_dir / segment.split("/")[-1]
if not file or not file.exists():
self.retrieved_segments += 1
return jsonql.open_remote_file(url, cache=file)
def __iter__(self) -> Iterator[dict]:
n = len(self.segments)
for i, segment in enumerate(self.segments):
start = time.time()
# TODO: start downloading the next segment in the background
for doc in parse_warc_file(self.open_segment(segment), self.min_len):
doc["cc_segment"] = segment
yield doc
if i + 1 >= n:
continue
end = time.time()
delay = (end - start) / 3600 * (n - 1 - i)
logger.info(
f"Parsed {i + 1} / {n} files. Estimated remaining time: {delay:.1f}h"
)
class CCShardReader(CCSegmentsReader):
def __init__(
self,
dump: str,
shard: int,
num_shards: int = -1,
num_segments_per_shard: int = 40,
min_len: int = 300,
cache_dir: Path = None,
):
"""Downloads a shard of Common Crawl, and yields dict.
Arguments:
dump: CC dump id
shard: id of the shard
num_shards: total number of shards
num_segments_per_shard: if set will limit the number of files by shard.
Useful for testing.
"""
super().__init__([], min_len=min_len, cache_dir=cache_dir)
self.dump = dump
self.shard = shard
assert num_shards > 0 or num_segments_per_shard > 0
self.num_shards = num_shards
self.num_segments_per_shard = num_segments_per_shard
@property
def segments(self) -> Sequence[str]:
# Delaying the initialization allows to delay the looking up of the WET files
if self._segments:
return self._segments
segments = cc_segments(self.dump, self.cache_dir)
n = len(segments)
if self.num_shards < 0:
self.num_shards = n // self.num_segments_per_shard
i_min = (self.shard * n) // self.num_shards
i_max = ((self.shard + 1) * n) // self.num_shards
if self.num_segments_per_shard > 0:
i_max = min(i_max, i_min + self.num_segments_per_shard)
self._segments = segments[i_min:i_max]
return self._segments
def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path:
_, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)
return Path(tmp_path)
@contextlib.contextmanager
def timer(name: str = "-"):
start = time.time()
yield None
delay = time.time() - start
print(f"{name} took {delay:.1f}s")
def benchmark(tmp_path: Path):
segments = [
"crawl-data/CC-MAIN-2019-09/segments/1550249406966.99/wet/CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
]
seg_file = tmp_path / "CC-MAIN-20190222220601-20190223002601-00441.warc.wet.gz"
with timer("from network"):
list(CCSegmentsReader(segments))
with timer("from network, with caching"):
list(CCSegmentsReader(segments, cache_dir=tmp_path))
assert seg_file.exists()
with timer("from disk"):
CCSegmentsReader(segments, cache_dir=tmp_path)
seg_file.unlink()
if __name__ == "__main__":
func_argparse.main(ls, dl)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/process_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import func_argparse
import cc_net.mine
def main():
func_argparse.parse_and_call(cc_net.mine.get_main_parser())
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/__main__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
from pathlib import Path
from typing import Dict, Optional
import fasttext # type: ignore
from cc_net import jsonql
def get_args():
parser = argparse.ArgumentParser(
description="Read a list of json files and split them ",
parents=[jsonql.io_parser()],
)
parser.add_argument("--pattern", type=str)
parser.add_argument("--field", type=str, default="raw_content")
parser.add_argument("--threshold", type=float, default=0)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--out_field", type=str, default="language")
parser.add_argument("--top", type=int, default=1)
return vars(parser.parse_args())
def predict(model, text: str, k: int = 1):
labels, scores = model.predict(text, k=k)
labels = [l.replace("__label__", "") for l in labels]
return labels, scores
def avg_predict(model, text):
# Overall gives the same results than predict(model, text.replace("\n", ""))
text = text.split("\n")
text_len = sum(len(line) for line in text)
if text_len == 0:
return None, 0
scores = [predict(model, line) for line in text]
scores_by_label: Dict[str, float] = collections.defaultdict(float)
for (label, score), line in zip(scores, text):
scores_by_label[label] += score * len(line)
label, score = max(scores_by_label.items(), key=lambda kv: kv[1])
return label, score / text_len
class Classifier(jsonql.Transformer):
def __init__(
self,
model: Path,
field: str,
out_field: str,
threshold: float = 0,
top: int = 1,
language: str = None,
rounding: int = 2,
):
super().__init__()
self.model = model
assert model.exists(), f"Model {model} doesn't exist."
self.field = field
self.out_field = out_field
self.threshold = threshold
self.top = top
self.language = language
self.rounding = rounding
# Fasttext model is a C object and can't be pickled
self.fasttext_model: fasttext._FastText = None
self.n_doc, self.n_accepted, self.n_ignored, self.n_disagreement = 0, 0, 0, 0
self.cnt: Dict[str, int] = {}
def _prepare(self):
self.log(f"Loading {self.model}")
self.fasttext_model = fasttext.load_model(str(self.model))
def predict(self, text):
return predict(self.fasttext_model, text.replace("\n", ""), k=self.top)
def do(self, doc: dict) -> Optional[dict]:
text = doc.get(self.field, None)
if not text:
return None
if self.language and doc.get("language") != self.language:
self.n_ignored += 1
return doc
self.n_doc += 1
labels, scores = self.predict(text)
scores.round(self.rounding, out=scores)
for l in labels:
self.cnt[l] = self.cnt.get(l, 0) + 1
if self.top == 1:
existing_label = doc.get(self.out_field, None)
if existing_label and labels[0] != existing_label:
self.n_disagreement += 1
if all(s < self.threshold for s in scores):
return None
self.n_accepted += 1
if self.top == 1:
doc[self.out_field] = labels[0]
doc[self.out_field + "_score"] = scores[0]
else:
doc[self.out_field] = {l: s for l, s in zip(labels, scores)}
return doc
def summary(self):
n_doc, n_accepted, n_disagreement, cnt, out_field = (
self.n_doc,
self.n_accepted,
self.n_disagreement,
self.cnt,
self.out_field,
)
summ = super().summary()
if self.threshold > 0:
ratio = n_accepted / n_doc if n_doc else 0
summ.append(f"Kept {n_accepted} docs over {n_doc} ({ratio :.1%})")
summ.append(f"Found {len(cnt)} {out_field} labels: {cnt}")
disagreement = n_disagreement / n_doc if n_doc else 0
if disagreement:
summ.append(f"{out_field} disagreement is at {disagreement:.1%}.")
return summ
def __repr__(self):
return f"Classifier({self.model})"
def classify_and_split(file, output, pattern, **kwargs):
classifier = Classifier(**kwargs)
splitter = jsonql.split(pattern)
jsonql.run_pipes(classifier, splitter, file=file, output=output)
if __name__ == "__main__":
args = get_args()
pattern = args.get("pattern")
if pattern:
classify_and_split(**args)
else:
args.pop("pattern")
jsonql.run_pipe(Classifier, args)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/split_by_lang.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import contextlib
import functools
import gzip
import logging
import multiprocessing
from collections import defaultdict
from pathlib import Path
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple
import cc_net
from cc_net import jsonql
from cc_net.process_wet_file import CCSegmentsReader
# Set this to a directory to use as cache for intermediary files.
# This helps for debugging.
WET_CACHE = None
# WET_CACHE = Path("wet_cache")
S3_BUCKET = "https://dl.fbaipublicfiles.com/cc100"
VERSION = "1.0.0"
CC_100_SNAPSHOTS = [
"2018-05",
"2018-09",
"2018-13",
"2018-17",
"2018-22",
"2018-26",
"2018-30",
"2018-34",
"2018-39",
"2018-43",
"2018-47",
"2018-51",
]
BIG_LANGUAGES = {
"es_XX",
"fr_XX",
"de_DE",
"ja_XX",
"ru_RU",
"zh_CN",
"en_XX",
"it_IT",
"ar_AR",
"nl_XX",
"pl_PL",
"pt_XX",
"tr_TR",
"zh_TW",
}
class Paragraph(NamedTuple):
lang: str
text: str
lm_score: float
def _dl_shard(snapshot: str, shard: int) -> Iterator[Paragraph]:
"""
Download metadata from a shards.
Sample metadata:
{
"cc_segment": "crawl-data/CC-MAIN-2018-51/segments/1544376823009.19/wet/CC-MAIN-20181209185547-20181209211547-00000.warc.wet.gz",
"digest": "sha1:222LWNHN5FM26XGS7WJSMI6IISTVWBKJ",
"url": "http://personals.gearplay.com/ads/DRJONES.htm",
"line_ids": [10],
"languages": ["en_XX"],
"lm_scores": [-2.658],
}
"""
snapshot = snapshot.replace("-", "_")
name = f"snap_{snapshot}_batch_{shard}.json.gz"
url = "/".join([S3_BUCKET, VERSION, name])
shard_metadata: Dict[str, Dict[str, dict]] = defaultdict(dict)
try:
cache_file: Optional[Path] = None
if WET_CACHE is not None:
cache_file = WET_CACHE / name
metadata_file = jsonql.open_remote_file(url, cache_file)
except:
logging.warning(f"Couldn't open {url}")
return
for meta in jsonql.read_jsons(metadata_file):
shard_metadata[meta["cc_segment"]][meta["digest"]] = meta
found_pars, missed_pars = 0, 0
for seg, segment_metadata in shard_metadata.items():
for doc in CCSegmentsReader([seg], cache_dir=WET_CACHE):
if doc["digest"] not in segment_metadata:
continue
meta = segment_metadata[doc["digest"]]
full_pars = [doc["title"]] + doc["raw_content"].split("\n")
assert len(meta["line_ids"]) == len(meta["languages"])
assert len(meta["line_ids"]) == len(meta["lm_scores"])
for i, lang, score in zip(
meta["line_ids"], meta["languages"], meta["lm_scores"]
):
if snapshot != "2018-51" and lang in BIG_LANGUAGES:
# Big languages only come from "2018-51" snapshot
continue
if i >= len(full_pars):
# This is because CC100 was created by saving only urls.
# Some urls appears in different snapshot with slightly different
# versions, but we don't know which one is correct.
# Here we read both versions, but some index may end up
# being incorrect.
# This impact ~3% documents.
missed_pars += 1
continue
yield Paragraph(lang, full_pars[i], score)
found_pars += 1
if missed_pars > 0:
logging.warning(
f"Missed {missed_pars} ({missed_pars / found_pars:%}) paragraphes."
)
def _split_by_par(
paragraphes: Iterator[Paragraph], snapshot: str, shard: int, outdir: Path
) -> int:
outdir.mkdir(exist_ok=True)
outfiles = {}
num_pars = 0
try:
for par in paragraphes:
# MODIFY ME: filter paragraph if needed (languages, score, ...)
if par.lang not in outfiles:
(outdir / par.lang).mkdir(exist_ok=True)
outfile = outdir / par.lang / f"snap_{snapshot}_batch_{shard}.gz"
outfiles[par.lang] = gzip.open(outfile, "wt")
print(par.text, file=outfiles[par.lang])
num_pars += 1
finally:
for o in outfiles.values():
o.close()
logging.info(f"Extracted {num_pars:_d} paragraphs from shard {snapshot}_{shard}")
return num_pars
def dl_shard(snapshot: str, shard: int, outdir: Path) -> int:
return _split_by_par(_dl_shard(snapshot, shard), snapshot, shard, outdir)
@contextlib.contextmanager
def unordered_map(processes: int):
if processes == 0:
yield map
return
with multiprocessing.Pool(processes) as pool:
yield pool.imap_unordered
def dl_snapshot(snapshot: str, outdir: Path, processes: int = 1) -> None:
_dl_shard = functools.partial(dl_shard, snapshot, outdir=outdir)
with unordered_map(processes) as umap:
num_pars = sum(umap(_dl_shard, range(500)))
logging.info(f"Extracted {num_pars:_d} paragraphs from snapshot {snapshot}.")
def dl(
snapshot: str = None, outdir: Path = Path("data_cc100"), processes: int = 1
) -> None:
"""
Download CC100 corpus.
Will create one text file per language and CC snapshot.
- snapshot: restrict to one snapshot. Useful for parallelization.
- outdir: output directory
- processes: number of processes to use
"""
if snapshot is None:
snapshots = CC_100_SNAPSHOTS
else:
snapshots = snapshot.split(",")
invalids = [s for s in snapshots if s not in CC_100_SNAPSHOTS]
assert not invalids, f"Invalid snapshots {invalids}, chose from {CC_100_SNAPSHOTS}"
for snapshot in snapshots:
dl_snapshot(snapshot, outdir, processes)
if __name__ == "__main__":
import func_argparse
func_argparse.single_main(dl)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/dl_cc_100.py
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This code is used to train a fastText classifier to label document with DMOZ categories.
The data, distributed under the cc-by 3.0 license
(https://web.archive.org/web/20140605215533/http://www.dmoz.org/license.html),
can be downloaded from
https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz.
"""
import urllib.request
from io import StringIO
from pathlib import Path
from typing import Dict, Set
from urllib.parse import urlparse
import func_argparse
from lxml import etree # type: ignore
from cc_net import jsonql
TaggedUrls = Dict[str, Set[str]]
DMOZ_TAGS_URL = "https://web.archive.org/web/20140617145301/http://rdf.dmoz.org/rdf/content.rdf.u8.gz"
def add_tags(url: str, tags: Set[str], url2tags: TaggedUrls):
if url in url2tags:
url2tags[url] &= tags
else:
url2tags[url] = tags
def load_tags(filename: Path = None) -> TaggedUrls:
if filename is None:
with StringIO("".join(jsonql.open_remote_file(DMOZ_TAGS_URL))) as dmoz:
tree = etree.parse(dmoz)
else:
tree = etree.parse(str(filename))
root = tree.getroot()
url2tags: Dict[str, Set[str]] = {}
for external_page in root.iterfind("{http://dmoz.org/rdf/}ExternalPage"):
url = external_page.get("about")
domain = urlparse(url).netloc
for topic in external_page.iterfind("{http://dmoz.org/rdf/}topic"):
# print(url, topic.text)
# Tags looks like Top/Arts/Animation/Anime/Collectibles
tags = set(topic.text.split("/")[1:])
add_tags(url, tags, url2tags)
add_tags(domain, tags, url2tags)
return url2tags
def dl(output: Path) -> None:
urllib.request.urlretrieve(DMOZ_TAGS_URL, output)
def make_corpus(file: Path, tags_file: Path = None, output: Path = None) -> None:
"""
Loads a tags file and create a training dataset using the given webpages.
Arguments:
- file: CC shard file
- tags_file: dmoz tagging file, (like the one produced by `dl`)
- output: ""
"""
url2tags = load_tags(tags_file)
with jsonql.open_write(output) as o:
for document in jsonql.read_jsons(file):
if not document:
continue
url = document["url"]
domain = document["source_domain"]
if url in url2tags:
tags = url2tags[url]
elif domain in url2tags:
tags = url2tags[domain]
else:
continue
if len(tags) == 0:
continue
fasttext_tags = ["__label__" + tag for tag in tags]
content = document["tokenized"].replace("\n", " ").lower()
if len(content) > 200:
print(" ".join(fasttext_tags), content, file=o) # type: ignore
if __name__ == "__main__":
func_argparse.single_main(make_corpus)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/make_dmoz_corpus.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.