from metaloop.client import MDS import os import json from filelock import FileLock export_type = os.getenv("DATASET_EXPORT_TYPE", "external") import csv import json import os import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ Adapt metaloop's private data format to the HuggingFace dataset format. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "http://data.deepglint.com/" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "MIT" # TODO: Add link to the official dataset URLs here _URLS = {} # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class MetaloopVlmDataset(datasets.GeneratorBasedBuilder): """huggingface dataset adapted from metaloop dataset""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="metaloop", version=VERSION, description=_DESCRIPTION), ] DEFAULT_CONFIG_NAME = "metaloop" def _info(self): features = datasets.Features( { "id": datasets.Value("string"), "job_id": datasets.Value("string"), "images": [datasets.Value("string")], "tag": datasets.Value("string"), "question": datasets.Value("string"), "options": [datasets.Value("string")], "answer": datasets.Value("string"), "question_type": datasets.Value("string"), } ) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and # specify them. They'll be used if as_supervised=True in builder.as_dataset. # supervised_keys=("sentence", "label"), # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def prepare(self, token, callback_url, dataset_name, dataset_version): if callback_url == '': mds = MDS(token) else: mds = MDS(token, callback_url) mds = MDS(token, callback_url) data_dir = os.path.join(self.config.data_dir, dataset_name, str(dataset_version)) dir_lock = os.path.join(data_dir, '_lock') dataset = mds.get_dataset(dataset_name, dataset_meta_internal_type='leaderboard') dataset.checkout(dataset_version) data_type = dataset._data_type if not os.path.exists(data_dir): try: os.makedirs(data_dir, 0o0775) except: print('dir existed') with FileLock(dir_lock): if not os.path.exists(os.path.join(data_dir, 'output.json')): dataset.export_data(file_path=data_dir, storage_type=export_type, unencrypted=True) else: print(f'use dataset({data_dir}) cache, skip export!!!!!') return data_dir, data_type def parse_vlm_data(self, data_dir): output_json = os.path.join(data_dir, 'output.json') objs=[] with open(output_json, "r") as f: lines = f.readlines() for line in lines: line = line.strip() if line != '': obj = json.loads(line) objs.append(obj) return objs def parse_tag_data(self, data_dir, tags): output_json = os.path.join(data_dir, 'output.json') objs=[] with open(output_json, "r") as f: lines = f.readlines() for line in lines: line = line.strip() if line != '': obj = json.loads(line) if 'result' in obj and len(obj['result']) > 0 and obj['result'][0]['tagtype'] != 'delete': has = False for tag in tags: for r in obj['result']: if 'datatype' in r and (r['tagtype'] == tag): has =True break image_path = obj['url_image'] image_path = os.path.join(data_dir, image_path) if has: objs.append((image_path, 1)) else: objs.append((image_path, 0)) return objs def _split_generators(self, dl_manager): with open(self.config.data_files['task_config'][0]) as f: config = json.load(f) mds_token = config['token'] mds_server = config['callback_url'] dataset_name = config['test_datasets'][0]['name'] version = config['test_datasets'][0]['version'] data_dir, data_type = self.prepare(mds_token, mds_server, dataset_name, version) return [ datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "data_dir": data_dir, "split": "all", "config": config, "data_type": data_type }, ) ] def _generate_examples(self, config, data_dir, split, data_type): if data_type=="vlm": for tag_info in config['data_detail']['vlm']['tags']: tag = tag_info['name'] if 'enable' in tag_info and tag_info['enable']==False: continue data_list = self.parse_vlm_data(data_dir) for iquery, query in enumerate(tag_info['query']): for idata, data in enumerate(data_list): id = f'{tag}-{iquery}-{idata}' vlm_root = '/vlm/data/train_images/' if ('type' in data) and data['type']=='sub_video': sub_videos_str = data['sub_videos'] sub_videos = json.loads(sub_videos_str) for sub_video in sub_videos: yield id, { "id": f'{id}-{sub_video}', "job_id": id, "images": [os.path.join(vlm_root,sub_video['video'])], "tag": tag, "question": query, "options": [data['options']], "answer": sub_video['answer'], "question_type": data['type'], } else: yield id, { "id": id, "job_id": id, "images": [os.path.join(vlm_root,image) for image in data['images']], "tag": tag, "question": query, "options": [data['options']], "answer": data['answer'], "question_type": 'multi-choice', } else: for tag_info in config['data_detail']['vlm']['tags']: tag = tag_info['name'] if 'enable' in tag_info and tag_info['enable']==False: continue anno_tags = tag_info['anno_tags'] if len(anno_tags) == 0 : anno_tags = [tag] data_list = self.parse_tag_data(data_dir, anno_tags) for iquery, query in enumerate(tag_info['query']): for idata, data in enumerate(data_list): id = f'{tag}-{iquery}-{idata}' yield id, { "id": id, "job_id": id, "images": [data[0]], "tag": tag, "question": query, "options": ['(A) Yes', '(B) No'], "answer": 'A' if (data[1] == 1) else 'B', "question_type": 'multi-choice', }