import json import os import pandas as pd from concurrent.futures import ThreadPoolExecutor from tqdm import tqdm from datasets import Dataset, load_dataset, Image import boto3 from PIL import Image as PILImage def load_jsonl(file_path): """ Loads a JSONL file and returns a list of Python dictionaries. Each dictionary represents a JSON object from a line in the file. """ data = [] with open(file_path, 'r', encoding='utf-8') as f: for line in f: try: # Parse each line as a JSON object json_object = json.loads(line.strip()) data.append(json_object) except json.JSONDecodeError as e: print(f"Error decoding JSON on line: {line.strip()} - {e}") return data s3 = boto3.client('s3') def download_file(image_id, split): bucket_name = "open-images-dataset" object_key = f"{split}/{image_id}.jpg" store_name = f"./open_images/images_files/{split}/{image_id}.jpg" if os.path.exists(store_name): return try: s3.download_file(bucket_name, object_key, store_name) except KeyboardInterrupt: raise except: print(f"Error getting {bucket_name}") return def download_split(split): workdir = "./open_images" annot_file = os.path.join(workdir, f"{split}-images-with-rotation.csv") df = pd.read_csv(annot_file) df = pd.DataFrame(df) os.makedirs(os.path.join(workdir, "images_files", split), exist_ok=True) # Prepare list of image IDs for multithreading image_ids = df["ImageID"].tolist() # Use ThreadPoolExecutor with 6 threads and tqdm for progress with ThreadPoolExecutor(max_workers=6) as executor: try: # Submit all download tasks futures = [executor.submit(download_file, image_id, split) for image_id in image_ids] # Use tqdm to show progress for future in tqdm(futures, desc=f"Downloading {split} images", unit="image"): future.result() # Wait for completion and handle any exceptions except KeyboardInterrupt: print("\nKeyboardInterrupt received. Shutting down threads...") # Cancel all pending futures for future in futures: future.cancel() # Shutdown the executor immediately executor.shutdown(wait=False, cancel_futures=True) print("All threads stopped.") raise # Re-raise to exit the program return def test_dataset(): ds = load_dataset("./open_images/open-image-narratives/", split="train") print(ds) print(ds[0]) return def prepare_subset(split): workdir = "open_images" csv_file = os.path.join(workdir, f"{split}-images-with-rotation.csv") df = pd.read_csv(csv_file) df = pd.DataFrame(df) narratives = load_jsonl(os.path.join(workdir, "narratives", f"open_images_{split}_captions.jsonl")) narratives = {x["image_id"]: x for x in narratives} def gen(): for _, row in df.iterrows(): narrative = narratives.get(row["ImageID"], None) if narrative is None: continue image_fn = os.path.join(workdir, "images_files", split, row["ImageID"] + ".jpg") item = { "image_id": row["ImageID"], "original_url": str(row["OriginalURL"]), "license": str(row["License"]), "author": str(row["Author"]), "caption": str(row["Title"]), "image_id": row["ImageID"], "image": image_fn, "narrative": str(narrative["caption"]), } yield item ds = Dataset.from_generator(gen) ds = ds.cast_column("image", Image()) ds.save_to_disk(f"{workdir}/datasets/data/{split}", max_shard_size="400MB") return if __name__ == "__main__": # for split in ["train", "validation", "test"]: # download_split(split) # for split in ["train"]: # prepare_subset(split) test_dataset()