RunyiY's picture
Upload structured3d/download.py with huggingface_hub
36ed084 verified
#!/usr/bin/env python3
"""
Download script for structured3d dataset chunks from Hugging Face
"""
import os
import sys
from pathlib import Path
try:
from huggingface_hub import hf_hub_download, list_repo_files
except ImportError:
print("Error: huggingface_hub not installed")
print("Install with: pip install huggingface_hub")
sys.exit(1)
DATASET_NAME = "structured3d"
DEFAULT_REPO_ID = f"your-username/{DATASET_NAME}-dataset"
def download_chunks(repo_id, token=None):
"""Download structured3d chunks from Hugging Face."""
try:
# List files in the repository
files = list_repo_files(repo_id=repo_id, repo_type="dataset", token=token)
# Filter chunk files
chunk_files = [f for f in files if f.startswith(f"{DATASET_NAME}/{DATASET_NAME}_part_")]
if not chunk_files:
print(f"Error: No chunks found in {repo_id}")
print(f"Expected files like {DATASET_NAME}/{DATASET_NAME}_part_000")
return False
print(f"Found {len(chunk_files)} chunks to download")
print(f"Warning: This will download ~307GB of data. Ensure you have enough disk space!")
response = input("Continue with download? (y/N): ")
if response.lower() != 'y':
print("Download cancelled.")
return False
# Create chunks directory
chunks_dir = Path("chunks")
chunks_dir.mkdir(exist_ok=True)
# Download each chunk
for i, file_path in enumerate(sorted(chunk_files)):
chunk_name = Path(file_path).name
local_path = chunks_dir / chunk_name
print(f"Downloading {chunk_name} ({i+1}/{len(chunk_files)})...")
try:
hf_hub_download(
repo_id=repo_id,
repo_type="dataset",
filename=file_path,
local_dir=".",
token=token
)
# Move to chunks directory
downloaded_path = Path(file_path)
if downloaded_path.exists():
downloaded_path.rename(local_path)
except Exception as e:
print(f" ✗ Error downloading {chunk_name}: {e}")
continue
# Download helper scripts
helper_files = [f for f in files if f.startswith(f"{DATASET_NAME}/") and f.endswith(('.sh', '.py'))]
for file_path in helper_files:
script_name = Path(file_path).name
if script_name != "download.py": # Don't overwrite ourselves
print(f"Downloading {script_name}...")
try:
hf_hub_download(
repo_id=repo_id,
repo_type="dataset",
filename=file_path,
local_dir=".",
token=token
)
# Move to current directory and make executable
downloaded_path = Path(file_path)
if downloaded_path.exists():
downloaded_path.rename(script_name)
if script_name.endswith('.sh'):
os.chmod(script_name, 0o755)
except Exception as e:
print(f" ✗ Error downloading {script_name}: {e}")
# Clean up empty directories
dataset_dir = Path(DATASET_NAME)
if dataset_dir.exists() and not any(dataset_dir.iterdir()):
dataset_dir.rmdir()
print(f"\n✓ Download complete!")
print(f"Downloaded {len(chunk_files)} chunks to chunks/ directory")
print("\nNext steps:")
print("1. Run ./merge.sh to reassemble the original file")
print("2. Run ./extract.sh to extract contents")
print("\nWarning: Extraction will require additional ~307GB of disk space!")
return True
except Exception as e:
print(f"Error accessing repository {repo_id}: {e}")
return False
def main():
import argparse
parser = argparse.ArgumentParser(description=f"Download {DATASET_NAME} chunks from Hugging Face")
parser.add_argument("repo_id", nargs="?", default=DEFAULT_REPO_ID, help="Hugging Face repository ID")
parser.add_argument("--token", help="Hugging Face token (or set HF_TOKEN env var)")
args = parser.parse_args()
# Get token (optional for public repos)
token = args.token or os.getenv("HF_TOKEN")
print(f"Downloading from: {args.repo_id}")
success = download_chunks(
repo_id=args.repo_id,
token=token
)
if not success:
sys.exit(1)
if __name__ == "__main__":
main()