Spaces:
Running
Running
File size: 51,157 Bytes
2a3c13d 9e99866 2a3c13d 9e99866 77b32f4 2a3c13d 9e99866 2a3c13d 9e99866 2a3c13d 9e99866 2a3c13d 9e99866 2a3c13d 9e99866 2a3c13d 9e99866 2a3c13d 7a1296f 2a3c13d 7a1296f 2a3c13d 7a1296f 2a3c13d 7a1296f 2a3c13d 37151a8 2a3c13d e2c58d0 2a3c13d e2c58d0 2a3c13d e2c58d0 2a3c13d 37151a8 7a1296f 37151a8 9da5cfa 37151a8 2a3c13d 37151a8 9da5cfa 7a1296f 37151a8 2a3c13d 37151a8 b74a2eb 37151a8 b74a2eb e2c58d0 b74a2eb e2c58d0 b74a2eb e2c58d0 b74a2eb 37151a8 b74a2eb e2c58d0 b74a2eb 37151a8 2a3c13d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 |
#!/usr/bin/env python3
"""
Camie-Tagger-V2 Application
A Streamlit web app for tagging images using an AI model.
"""
import streamlit as st
import os
import sys
import traceback
import tempfile
import time
import platform
import subprocess
import webbrowser
import glob
import numpy as np
import matplotlib.pyplot as plt
import io
import base64
import json
from matplotlib.colors import LinearSegmentedColormap
from PIL import Image
from pathlib import Path
from huggingface_hub import hf_hub_download
# Add parent directory to path to allow importing from utils - updated for new structure
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Import utilities
from utils.image_processing import process_image, batch_process_images
from utils.file_utils import save_tags_to_file, get_default_save_locations
from utils.ui_components import display_progress_bar, show_example_images, display_batch_results
from utils.onnx_processing import batch_process_images_onnx
# Add environment variables for HF Spaces permissions
os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib'
os.environ['HF_HOME'] = '/tmp/huggingface'
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers'
# Fix Streamlit permission issues
os.environ['STREAMLIT_SERVER_HEADLESS'] = 'true'
os.environ['STREAMLIT_SERVER_ENABLE_CORS'] = 'false'
os.environ['STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION'] = 'false'
os.environ['STREAMLIT_BROWSER_GATHER_USAGE_STATS'] = 'false'
os.environ['STREAMLIT_GLOBAL_DEVELOPMENT_MODE'] = 'false'
# Constants - matching your v1 pattern
MODEL_REPO = "Camais03/camie-tagger-v2"
ONNX_MODEL_FILE = "camie-tagger-v2.onnx"
SAFETENSORS_MODEL_FILE = "camie-tagger-v2.safetensors"
METADATA_FILE = "camie-tagger-v2-metadata.json"
VALIDATION_FILE = "full_validation_results.json"
def get_model_files():
"""Download model files from HF Hub and return paths - optimized for HF Spaces"""
try:
# Use smaller /tmp directory and be more careful with large files
cache_dir = "/tmp/hf_cache"
os.makedirs(cache_dir, exist_ok=True)
# Download metadata first (small file)
metadata_path = hf_hub_download(
repo_id=MODEL_REPO,
filename=METADATA_FILE,
cache_dir=cache_dir,
resume_download=True # Allow resuming if interrupted
)
# Try streaming download for large ONNX file
try:
onnx_path = hf_hub_download(
repo_id=MODEL_REPO,
filename=ONNX_MODEL_FILE,
cache_dir=cache_dir,
resume_download=True,
force_download=False # Use cached version if available
)
except Exception as e:
print(f"ONNX download failed: {e}")
# Fallback: try direct URL download with requests
import requests
onnx_url = f"https://huggingface.co/{MODEL_REPO}/resolve/main/{ONNX_MODEL_FILE}"
onnx_path = os.path.join(cache_dir, ONNX_MODEL_FILE)
print(f"Trying direct download from: {onnx_url}")
response = requests.get(onnx_url, stream=True)
response.raise_for_status()
with open(onnx_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
print(f"Direct download successful: {onnx_path}")
# Try optional files
try:
safetensors_path = hf_hub_download(
repo_id=MODEL_REPO,
filename=SAFETENSORS_MODEL_FILE,
cache_dir=cache_dir,
resume_download=True
)
except Exception as e:
print(f"SafeTensors model not available: {e}")
safetensors_path = None
try:
validation_path = hf_hub_download(
repo_id=MODEL_REPO,
filename=VALIDATION_FILE,
cache_dir=cache_dir,
resume_download=True
)
except Exception as e:
print(f"Validation results not available: {e}")
validation_path = None
return {
'onnx_path': onnx_path,
'safetensors_path': safetensors_path,
'metadata_path': metadata_path,
'validation_path': validation_path
}
except Exception as e:
print(f"Failed to download model files: {e}")
return None
# Define threshold profile descriptions and explanations
threshold_profile_descriptions = {
"Micro Optimized": "Maximizes micro-averaged F1 score (best for dominant classes). Optimal for overall prediction quality.",
"Macro Optimized": "Maximizes macro-averaged F1 score (equal weight to all classes). Better for balanced performance across all tags.",
"Balanced": "Provides a trade-off between precision and recall with moderate thresholds. Good general-purpose setting.",
"Overall": "Uses a single threshold value across all categories. Simplest approach for consistent behavior.",
"Category-specific": "Uses different optimal thresholds for each category. Best for fine-tuning results."
}
threshold_profile_explanations = {
"Micro Optimized": """
### Micro Optimized Profile
**Technical definition**: Maximizes micro-averaged F1 score, which calculates metrics globally across all predictions.
**When to use**: When you want the best overall accuracy, especially for common tags and dominant categories.
**Effects**:
- Optimizes performance for the most frequent tags
- Gives more weight to categories with many examples (like 'character' and 'general')
- Provides higher precision in most common use cases
**Performance from validation**:
- Micro F1: ~67.3%
- Macro F1: ~46.3%
- Threshold: ~0.614
""",
"Macro Optimized": """
### Macro Optimized Profile
**Technical definition**: Maximizes macro-averaged F1 score, which gives equal weight to all categories regardless of size.
**When to use**: When balanced performance across all categories is important, including rare tags.
**Effects**:
- More balanced performance across all tag categories
- Better at detecting rare or unusual tags
- Generally has lower thresholds than micro-optimized
**Performance from validation**:
- Micro F1: ~60.9%
- Macro F1: ~50.6%
- Threshold: ~0.492
""",
"Balanced": """
### Balanced Profile
**Technical definition**: Same as Micro Optimized but provides a good reference point for manual adjustment.
**When to use**: For general-purpose tagging when you don't have specific recall or precision requirements.
**Effects**:
- Good middle ground between precision and recall
- Works well for most common use cases
- Default choice for most users
**Performance from validation**:
- Micro F1: ~67.3%
- Macro F1: ~46.3%
- Threshold: ~0.614
""",
"Overall": """
### Overall Profile
**Technical definition**: Uses a single threshold value across all categories.
**When to use**: When you want consistent behavior across all categories and a simple approach.
**Effects**:
- Consistent tagging threshold for all categories
- Simpler to understand than category-specific thresholds
- User-adjustable with a single slider
**Default threshold value**: 0.5 (user-adjustable)
**Note**: The threshold value is user-adjustable with the slider below.
""",
"Category-specific": """
### Category-specific Profile
**Technical definition**: Uses different optimal thresholds for each category, allowing fine-tuning.
**When to use**: When you want to customize tagging sensitivity for different categories.
**Effects**:
- Each category has its own independent threshold
- Full control over category sensitivity
- Best for fine-tuning results when some categories need different treatment
**Default threshold values**: Starts with balanced thresholds for each category
**Note**: Use the category sliders below to adjust thresholds for individual categories.
"""
}
def load_validation_results(results_path):
"""Load validation results from JSON file"""
try:
with open(results_path, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print(f"Error loading validation results: {e}")
return None
def extract_thresholds_from_results(validation_data):
"""Extract threshold information from validation results"""
if not validation_data or 'results' not in validation_data:
return {}
thresholds = {
'overall': {},
'categories': {}
}
# Process results to extract thresholds
for result in validation_data['results']:
category = result['CATEGORY'].lower()
profile = result['PROFILE'].lower().replace(' ', '_')
threshold = result['THRESHOLD']
micro_f1 = result['MICRO-F1']
macro_f1 = result['MACRO-F1']
# Map profile names
if profile == 'micro_opt':
profile = 'micro_optimized'
elif profile == 'macro_opt':
profile = 'macro_optimized'
threshold_info = {
'threshold': threshold,
'micro_f1': micro_f1,
'macro_f1': macro_f1
}
if category == 'overall':
thresholds['overall'][profile] = threshold_info
else:
if category not in thresholds['categories']:
thresholds['categories'][category] = {}
thresholds['categories'][category][profile] = threshold_info
return thresholds
def load_model_and_metadata():
"""Load model and metadata from HF Hub"""
# Download model files
model_files = get_model_files()
if not model_files:
return None, None, {}
model_info = {
'safetensors_available': model_files['safetensors_path'] is not None,
'onnx_available': model_files['onnx_path'] is not None,
'validation_results_available': model_files['validation_path'] is not None
}
# Load metadata
metadata = None
if model_files['metadata_path']:
try:
with open(model_files['metadata_path'], 'r') as f:
metadata = json.load(f)
except Exception as e:
print(f"Error loading metadata: {e}")
# Load validation results for thresholds
thresholds = {}
if model_files['validation_path']:
validation_data = load_validation_results(model_files['validation_path'])
if validation_data:
thresholds = extract_thresholds_from_results(validation_data)
# Add default thresholds if not available
if not thresholds:
thresholds = {
'overall': {
'balanced': {'threshold': 0.5, 'micro_f1': 0, 'macro_f1': 0},
'micro_optimized': {'threshold': 0.6, 'micro_f1': 0, 'macro_f1': 0},
'macro_optimized': {'threshold': 0.4, 'micro_f1': 0, 'macro_f1': 0}
},
'categories': {}
}
# Store file paths in session state for later use
st.session_state.model_files = model_files
return model_info, metadata, thresholds
def load_safetensors_model(safetensors_path, metadata_path):
"""Load SafeTensors model"""
try:
from safetensors.torch import load_file
import torch
# Load metadata
with open(metadata_path, 'r') as f:
metadata = json.load(f)
# Import the model class (assuming it's available)
# You'll need to make sure the ImageTagger class is importable
from utils.model_loader import ImageTagger # Update this import
model_info = metadata['model_info']
dataset_info = metadata['dataset_info']
# Recreate model architecture
model = ImageTagger(
total_tags=dataset_info['total_tags'],
dataset=None,
model_name=model_info['backbone'],
num_heads=model_info['num_attention_heads'],
dropout=0.0,
pretrained=False,
tag_context_size=model_info['tag_context_size'],
use_gradient_checkpointing=False,
img_size=model_info['img_size']
)
# Load weights
state_dict = load_file(safetensors_path)
model.load_state_dict(state_dict)
model.eval()
return model, metadata
except Exception as e:
raise Exception(f"Failed to load SafeTensors model: {e}")
def get_profile_metrics(thresholds, profile_name):
"""Extract metrics for the given profile from the thresholds dictionary"""
profile_key = None
# Map UI-friendly names to internal keys
if profile_name == "Micro Optimized":
profile_key = "micro_optimized"
elif profile_name == "Macro Optimized":
profile_key = "macro_optimized"
elif profile_name == "Balanced":
profile_key = "balanced"
elif profile_name in ["Overall", "Category-specific"]:
profile_key = "macro_optimized" # Use macro as default for these modes
if profile_key and 'overall' in thresholds and profile_key in thresholds['overall']:
return thresholds['overall'][profile_key]
return None
def on_threshold_profile_change():
"""Handle threshold profile changes"""
new_profile = st.session_state.threshold_profile
# Clear any existing results to prevent UI duplication
if hasattr(st.session_state, 'all_probs'):
del st.session_state.all_probs
if hasattr(st.session_state, 'tags'):
del st.session_state.tags
if hasattr(st.session_state, 'all_tags'):
del st.session_state.all_tags
if hasattr(st.session_state, 'thresholds') and hasattr(st.session_state, 'settings'):
# Initialize category thresholds if needed
if st.session_state.settings['active_category_thresholds'] is None:
st.session_state.settings['active_category_thresholds'] = {}
current_thresholds = st.session_state.settings['active_category_thresholds']
# Map profile names to keys
profile_key = None
if new_profile == "Micro Optimized":
profile_key = "micro_optimized"
elif new_profile == "Macro Optimized":
profile_key = "macro_optimized"
elif new_profile == "Balanced":
profile_key = "balanced"
# Update thresholds based on profile
if profile_key and 'overall' in st.session_state.thresholds and profile_key in st.session_state.thresholds['overall']:
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall'][profile_key]['threshold']
# Set category thresholds if categories exist
if hasattr(st.session_state, 'categories'):
for category in st.session_state.categories:
if category in st.session_state.thresholds['categories'] and profile_key in st.session_state.thresholds['categories'][category]:
current_thresholds[category] = st.session_state.thresholds['categories'][category][profile_key]['threshold']
else:
current_thresholds[category] = st.session_state.settings['active_threshold']
elif new_profile == "Overall":
# Use balanced threshold for Overall profile
if 'overall' in st.session_state.thresholds and 'balanced' in st.session_state.thresholds['overall']:
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall']['balanced']['threshold']
else:
st.session_state.settings['active_threshold'] = 0.5
# Clear category-specific overrides
st.session_state.settings['active_category_thresholds'] = {}
elif new_profile == "Category-specific":
# Initialize with balanced thresholds
if 'overall' in st.session_state.thresholds and 'balanced' in st.session_state.thresholds['overall']:
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall']['balanced']['threshold']
else:
st.session_state.settings['active_threshold'] = 0.5
# Initialize category thresholds if categories exist
if hasattr(st.session_state, 'categories'):
for category in st.session_state.categories:
if category in st.session_state.thresholds['categories'] and 'balanced' in st.session_state.thresholds['categories'][category]:
current_thresholds[category] = st.session_state.thresholds['categories'][category]['balanced']['threshold']
else:
current_thresholds[category] = st.session_state.settings['active_threshold']
def apply_thresholds(all_probs, threshold_profile, active_threshold, active_category_thresholds, min_confidence, selected_categories):
"""Apply thresholds to raw probabilities and return filtered tags"""
tags = {}
all_tags = []
# Handle None case for active_category_thresholds
active_category_thresholds = active_category_thresholds or {}
for category, cat_probs in all_probs.items():
# Get the appropriate threshold for this category
threshold = active_category_thresholds.get(category, active_threshold)
# Filter tags above threshold
tags[category] = [(tag, prob) for tag, prob in cat_probs if prob >= threshold]
# Add to all_tags if selected
if selected_categories.get(category, True):
for tag, prob in tags[category]:
all_tags.append(tag)
return tags, all_tags
def image_tagger_app():
"""Main Streamlit application for image tagging."""
st.set_page_config(layout="wide", page_title="Camie Tagger", page_icon="🖼️")
st.title("Camie-Tagger-v2 Interface")
st.markdown("---")
# Prevent UI duplication by using container
if 'app_container' not in st.session_state:
st.session_state.app_container = True
# Initialize settings
if 'settings' not in st.session_state:
st.session_state.settings = {
'show_all_tags': False,
'compact_view': True,
'min_confidence': 0.01,
'threshold_profile': "Macro",
'active_threshold': 0.5,
'active_category_thresholds': {}, # Initialize as empty dict, not None
'selected_categories': {},
'replace_underscores': False
}
st.session_state.show_profile_help = False
# Session state initialization for model
if 'model_loaded' not in st.session_state:
st.session_state.model_loaded = False
st.session_state.model = None
st.session_state.thresholds = None
st.session_state.metadata = None
st.session_state.model_type = "onnx" # Default to ONNX
# Sidebar for model selection and information
with st.sidebar:
# Support information
st.subheader("💡 Notes")
st.markdown("""
This tagger was trained on a subset of the available data due to hardware limitations.
A more comprehensive model trained on the full 3+ million image dataset would provide:
- More recent characters and tags.
- Improved accuracy.
If you find this tool useful and would like to support future development:
""")
# Add Buy Me a Coffee button with Star of the City-like glow effect
st.markdown("""
<style>
@keyframes coffee-button-glow {
0% { box-shadow: 0 0 5px #FFD700; }
50% { box-shadow: 0 0 15px #FFD700; }
100% { box-shadow: 0 0 5px #FFD700; }
}
.coffee-button {
display: inline-block;
animation: coffee-button-glow 2s infinite;
border-radius: 5px;
transition: transform 0.3s ease;
}
.coffee-button:hover {
transform: scale(1.05);
}
</style>
<a href="https://ko-fi.com/camais" target="_blank" class="coffee-button">
<img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png"
alt="Buy Me A Coffee"
style="height: 45px; width: 162px; border-radius: 5px;" />
</a>
""", unsafe_allow_html=True)
st.markdown("""
Your support helps with:
- GPU costs for training
- Storage for larger datasets
- Development of new features
- Future projects
Thank you! 🙏
Full Details: https://huggingface.co/Camais03/camie-tagger-v2
""")
st.header("Model Selection")
# Load model information
try:
with st.spinner("Loading model from HF Hub..."):
model_info, metadata, thresholds = load_model_and_metadata()
except Exception as e:
st.error(f"Failed to load model information: {e}")
st.stop()
# Check if model info loaded successfully
if model_info is None:
st.error("Could not download model files from Hugging Face Hub")
st.info("Please check your internet connection or try again later")
st.stop()
# Check if model info loaded successfully
if model_info is None:
st.error("Could not download model files from Hugging Face Hub")
st.info("Please check your internet connection or try again later")
st.stop()
# Determine available model options
model_options = []
if model_info['onnx_available']:
model_options.append("ONNX (Recommended)")
if model_info['safetensors_available']:
model_options.append("SafeTensors (PyTorch)")
if not model_options:
st.error("No model files found!")
st.info("Expected files in Camais03/camie-tagger-v2:")
st.info("- camie-tagger-v2.onnx")
st.info("- camie-tagger-v2.safetensors")
st.info("- camie-tagger-v2-metadata.json")
st.stop()
# Model type selection
default_index = 0 if model_info['onnx_available'] else 0
model_type = st.radio(
"Select Model Type:",
model_options,
index=default_index,
help="ONNX: Optimized for speed and compatibility\nSafeTensors: Native PyTorch format"
)
# Convert selection to internal model type
if model_type == "ONNX (Recommended)":
selected_model_type = "onnx"
else:
selected_model_type = "safetensors"
# If model type changed, reload
if selected_model_type != st.session_state.model_type:
st.session_state.model_loaded = False
st.session_state.model_type = selected_model_type
# Reload button
if st.button("Reload Model") and st.session_state.model_loaded:
st.session_state.model_loaded = False
st.info("Reloading model...")
# Try to load the model
if not st.session_state.model_loaded:
try:
with st.spinner(f"Loading {st.session_state.model_type.upper()} model..."):
if st.session_state.model_type == "onnx":
# Load ONNX model - matching your v1 approach exactly
import onnxruntime as ort
onnx_path = st.session_state.model_files['onnx_path']
# Initialize ONNX Runtime session (like your v1)
session = ort.InferenceSession(onnx_path, providers=["CPUExecutionProvider"])
st.session_state.model = session
st.session_state.device = "CPU" # Simplified like your v1
st.session_state.param_dtype = "float32"
else:
# Load SafeTensors model
safetensors_path = st.session_state.model_files['safetensors_path']
metadata_path = st.session_state.model_files['metadata_path']
model, loaded_metadata = load_safetensors_model(safetensors_path, metadata_path)
st.session_state.model = model
device = next(model.parameters()).device
param_dtype = next(model.parameters()).dtype
st.session_state.device = device
st.session_state.param_dtype = param_dtype
metadata = loaded_metadata # Use loaded metadata instead
# Store common info
st.session_state.thresholds = thresholds
st.session_state.metadata = metadata
st.session_state.model_loaded = True
# Get categories
if metadata and 'dataset_info' in metadata:
tag_mapping = metadata['dataset_info']['tag_mapping']
categories = list(set(tag_mapping['tag_to_category'].values()))
st.session_state.categories = categories
# Initialize selected categories
if not st.session_state.settings['selected_categories']:
st.session_state.settings['selected_categories'] = {cat: True for cat in categories}
# Set initial threshold from validation results
if 'overall' in thresholds and 'macro_optimized' in thresholds['overall']:
st.session_state.settings['active_threshold'] = thresholds['overall']['macro_optimized']['threshold']
except Exception as e:
st.error(f"Error loading model: {str(e)}")
st.code(traceback.format_exc())
st.stop()
# Display model information in sidebar
with st.sidebar:
st.header("Model Information")
if st.session_state.model_loaded:
if st.session_state.model_type == "onnx":
st.success("Using ONNX Model")
else:
st.success("Using SafeTensors Model")
st.write(f"Device: {st.session_state.device}")
st.write(f"Precision: {st.session_state.param_dtype}")
if st.session_state.metadata:
if 'dataset_info' in st.session_state.metadata:
total_tags = st.session_state.metadata['dataset_info']['total_tags']
st.write(f"Total tags: {total_tags}")
elif 'total_tags' in st.session_state.metadata:
st.write(f"Total tags: {st.session_state.metadata['total_tags']}")
# Show categories
with st.expander("Available Categories"):
if hasattr(st.session_state, 'categories'):
for category in sorted(st.session_state.categories):
st.write(f"- {category.capitalize()}")
else:
st.write("Categories will be available after model loads")
# About section
with st.expander("About this app"):
st.write("""
This app uses a trained image tagging model to analyze and tag images.
**Model Options**:
- **ONNX (Recommended)**: Optimized for inference speed with broad compatibility
- **SafeTensors**: Native PyTorch format for advanced users
**Features**:
- Upload or process images in batches
- Multiple threshold profiles based on validation results
- Category-specific threshold adjustment
- Export tags in various formats
- Fast inference with GPU acceleration (when available)
**Threshold Profiles**:
- **Micro Optimized**: Best overall F1 score (67.3% micro F1)
- **Macro Optimized**: Balanced across categories (50.6% macro F1)
- **Balanced**: Good general-purpose setting
- **Overall**: Single adjustable threshold
- **Category-specific**: Fine-tune each category individually
""")
# Main content area - Image upload and processing
col1, col2 = st.columns([1, 1.5])
with col1:
st.header("Image")
upload_tab, batch_tab = st.tabs(["Upload Image", "Batch Processing"])
image_path = None
with upload_tab:
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
if uploaded_file:
# Create temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
tmp_file.write(uploaded_file.getvalue())
image_path = tmp_file.name
st.session_state.original_filename = uploaded_file.name
# Display image
image = Image.open(uploaded_file)
st.image(image, use_container_width=True)
with batch_tab:
st.subheader("Batch Process Images")
# Note about batch processing in HF Spaces
st.info("Note: Batch processing from local folders is not available in HF Spaces. Use the single image upload instead.")
# Folder selection (disabled for HF Spaces)
batch_folder = st.text_input("Enter folder path containing images:", "", disabled=True)
st.write("For batch processing, please:")
st.write("1. Download this code and run locally")
st.write("2. Or upload images one by one using the Upload Image tab")
# Column 2: Controls and Results
with col2:
st.header("Tagging Controls")
# Only show controls if model is loaded
if not st.session_state.model_loaded:
st.info("Model loading... Controls will appear once the model is ready.")
return
# Threshold profile selection
all_profiles = [
"Micro Optimized",
"Macro Optimized",
"Balanced",
"Overall",
"Category-specific"
]
profile_col1, profile_col2 = st.columns([3, 1])
with profile_col1:
threshold_profile = st.selectbox(
"Select threshold profile",
options=all_profiles,
index=1, # Default to Macro
key="threshold_profile",
on_change=on_threshold_profile_change
)
with profile_col2:
if st.button("ℹ️ Help", key="profile_help"):
st.session_state.show_profile_help = not st.session_state.get('show_profile_help', False)
# Show profile help
if st.session_state.get('show_profile_help', False):
st.markdown(threshold_profile_explanations[threshold_profile])
else:
st.info(threshold_profile_descriptions[threshold_profile])
# Show profile metrics if available
if st.session_state.model_loaded and hasattr(st.session_state, 'thresholds'):
metrics = get_profile_metrics(st.session_state.thresholds, threshold_profile)
if metrics:
metrics_cols = st.columns(3)
with metrics_cols[0]:
st.metric("Threshold", f"{metrics['threshold']:.3f}")
with metrics_cols[1]:
st.metric("Micro F1", f"{metrics['micro_f1']:.1f}%")
with metrics_cols[2]:
st.metric("Macro F1", f"{metrics['macro_f1']:.1f}%")
# Threshold controls based on profile
if st.session_state.model_loaded:
active_threshold = st.session_state.settings.get('active_threshold', 0.5)
active_category_thresholds = st.session_state.settings.get('active_category_thresholds', {})
if threshold_profile in ["Micro Optimized", "Macro Optimized", "Balanced"]:
# Show reference threshold (disabled)
st.slider(
"Threshold (from validation)",
min_value=0.01,
max_value=1.0,
value=float(active_threshold),
step=0.01,
disabled=True,
help="This threshold is optimized from validation results"
)
elif threshold_profile == "Overall":
# Adjustable overall threshold
active_threshold = st.slider(
"Overall threshold",
min_value=0.01,
max_value=1.0,
value=float(active_threshold),
step=0.01
)
st.session_state.settings['active_threshold'] = active_threshold
elif threshold_profile == "Category-specific":
# Show reference overall threshold
st.slider(
"Overall threshold (reference)",
min_value=0.01,
max_value=1.0,
value=float(active_threshold),
step=0.01,
disabled=True
)
st.write("Adjust thresholds for individual categories:")
# Category sliders
slider_cols = st.columns(2)
if not active_category_thresholds:
active_category_thresholds = {}
if hasattr(st.session_state, 'categories'):
for i, category in enumerate(sorted(st.session_state.categories)):
col_idx = i % 2
with slider_cols[col_idx]:
default_val = active_category_thresholds.get(category, active_threshold)
new_threshold = st.slider(
f"{category.capitalize()}",
min_value=0.01,
max_value=1.0,
value=float(default_val),
step=0.01,
key=f"slider_{category}"
)
active_category_thresholds[category] = new_threshold
st.session_state.settings['active_category_thresholds'] = active_category_thresholds
# Display options
with st.expander("Display Options", expanded=False):
col1, col2 = st.columns(2)
with col1:
show_all_tags = st.checkbox("Show all tags (including below threshold)",
value=st.session_state.settings['show_all_tags'])
compact_view = st.checkbox("Compact view (hide progress bars)",
value=st.session_state.settings['compact_view'])
replace_underscores = st.checkbox("Replace underscores with spaces",
value=st.session_state.settings.get('replace_underscores', False))
with col2:
min_confidence = st.slider("Minimum confidence to display", 0.0, 0.5,
st.session_state.settings['min_confidence'], 0.01)
# Update settings
st.session_state.settings.update({
'show_all_tags': show_all_tags,
'compact_view': compact_view,
'min_confidence': min_confidence,
'replace_underscores': replace_underscores
})
# Category selection
st.write("Categories to include in 'All Tags' section:")
category_cols = st.columns(3)
selected_categories = {}
if hasattr(st.session_state, 'categories'):
for i, category in enumerate(sorted(st.session_state.categories)):
col_idx = i % 3
with category_cols[col_idx]:
default_val = st.session_state.settings['selected_categories'].get(category, True)
selected_categories[category] = st.checkbox(
f"{category.capitalize()}",
value=default_val,
key=f"cat_select_{category}"
)
st.session_state.settings['selected_categories'] = selected_categories
# Run tagging button
if image_path and st.button("Run Tagging"):
if not st.session_state.model_loaded:
st.error("Model not loaded")
else:
# Create progress indicators
progress_bar = st.progress(0)
status_text = st.empty()
try:
status_text.text("Starting image analysis...")
progress_bar.progress(10)
# Process image based on model type
if st.session_state.model_type == "onnx":
# Check if we have the necessary modules
try:
from utils.onnx_processing import process_single_image_onnx
progress_bar.progress(20)
status_text.text("Module imported successfully...")
except ImportError as import_e:
st.error(f"Missing required module: {import_e}")
st.error("This suggests the utils modules aren't properly configured")
return
# Update progress before inference
status_text.text("Running ONNX inference... This may take 2-5 seconds.")
progress_bar.progress(30)
# Add timeout warning
st.warning("⏳ Model inference in progress. Please wait and don't refresh the page.")
result = process_single_image_onnx(
image_path=image_path,
model_path=st.session_state.model_files['onnx_path'],
metadata=st.session_state.metadata,
threshold_profile=threshold_profile,
active_threshold=st.session_state.settings['active_threshold'],
active_category_thresholds=st.session_state.settings.get('active_category_thresholds', {}),
min_confidence=st.session_state.settings['min_confidence']
)
progress_bar.progress(90)
status_text.text("Processing results...")
else:
# SafeTensors processing
try:
from utils.image_processing import process_image
progress_bar.progress(20)
except ImportError as import_e:
st.error(f"Missing required module: {import_e}")
return
status_text.text("Running SafeTensors inference...")
progress_bar.progress(30)
result = process_image(
image_path=image_path,
model=st.session_state.model,
thresholds=st.session_state.thresholds,
metadata=st.session_state.metadata,
threshold_profile=threshold_profile,
active_threshold=st.session_state.settings['active_threshold'],
active_category_thresholds=st.session_state.settings.get('active_category_thresholds', {}),
min_confidence=st.session_state.settings['min_confidence']
)
progress_bar.progress(90)
if result and result.get('success'):
progress_bar.progress(95)
status_text.text("Organizing results...")
# Process results in smaller chunks to prevent browser blocking
try:
# Limit result size to prevent memory issues but allow more tags
all_probs = result.get('all_probs', {})
# Count total items
total_items = sum(len(cat_items) for cat_items in all_probs.values())
# Increased limits - 256 per category, higher total limit
MAX_TAGS_PER_CATEGORY = 256
MAX_TOTAL_TAGS = 1500 # Increased to accommodate more categories
limited_all_probs = {}
limited_tags = {}
total_processed = 0
for category, cat_probs in all_probs.items():
if total_processed >= MAX_TOTAL_TAGS:
break
# Limit items per category
limited_cat_probs = cat_probs[:MAX_TAGS_PER_CATEGORY]
limited_all_probs[category] = limited_cat_probs
# Get filtered tags for this category
filtered_cat_tags = result.get('tags', {}).get(category, [])
limited_cat_tags = filtered_cat_tags[:MAX_TAGS_PER_CATEGORY]
if limited_cat_tags:
limited_tags[category] = limited_cat_tags
total_processed += len(limited_cat_probs)
# Create limited all_tags list
limited_all_tags = []
for category, cat_tags in limited_tags.items():
for tag, _ in cat_tags:
limited_all_tags.append(tag)
# Store the limited results
st.session_state.all_probs = limited_all_probs
st.session_state.tags = limited_tags
st.session_state.all_tags = limited_all_tags
progress_bar.progress(100)
status_text.text("Analysis completed!")
# Show performance info
if 'inference_time' in result:
st.success(f"Analysis completed in {result['inference_time']:.2f} seconds! Found {len(limited_all_tags)} tags.")
else:
st.success(f"Analysis completed! Found {len(limited_all_tags)} tags.")
# Show limitation notice if we hit limits
if total_items > MAX_TOTAL_TAGS:
st.info(f"Note: Showing top {MAX_TOTAL_TAGS} results out of {total_items} total predictions for optimal performance.")
except Exception as result_e:
st.error(f"Error processing results: {result_e}")
# Clear progress indicators
progress_bar.empty()
status_text.empty()
else:
error_msg = result.get('error', 'Unknown error') if result else 'No result returned'
st.error(f"Analysis failed: {error_msg}")
progress_bar.empty()
status_text.empty()
except Exception as e:
st.error(f"Error during analysis: {str(e)}")
st.code(traceback.format_exc())
progress_bar.empty()
status_text.empty()
# Display results
if image_path and hasattr(st.session_state, 'all_probs'):
st.header("Predictions")
# Apply current thresholds
filtered_tags, current_all_tags = apply_thresholds(
st.session_state.all_probs,
threshold_profile,
st.session_state.settings['active_threshold'],
st.session_state.settings.get('active_category_thresholds', {}),
st.session_state.settings['min_confidence'],
st.session_state.settings['selected_categories']
)
all_tags = []
# Display by category
for category in sorted(st.session_state.all_probs.keys()):
all_tags_in_category = st.session_state.all_probs.get(category, [])
filtered_tags_in_category = filtered_tags.get(category, [])
if all_tags_in_category:
expander_label = f"{category.capitalize()} ({len(filtered_tags_in_category)} tags)"
with st.expander(expander_label, expanded=True):
# Get threshold for this category (handle None case)
active_category_thresholds = st.session_state.settings.get('active_category_thresholds') or {}
threshold = active_category_thresholds.get(category, st.session_state.settings['active_threshold'])
# Determine tags to display
if st.session_state.settings['show_all_tags']:
tags_to_display = all_tags_in_category
else:
tags_to_display = [(tag, prob) for tag, prob in all_tags_in_category if prob >= threshold]
if not tags_to_display:
st.info(f"No tags above {st.session_state.settings['min_confidence']:.2f} confidence")
continue
# Display tags
if st.session_state.settings['compact_view']:
# Compact view
tag_list = []
replace_underscores = st.session_state.settings.get('replace_underscores', False)
for tag, prob in tags_to_display:
percentage = int(prob * 100)
display_tag = tag.replace('_', ' ') if replace_underscores else tag
tag_list.append(f"{display_tag} ({percentage}%)")
if prob >= threshold and st.session_state.settings['selected_categories'].get(category, True):
all_tags.append(tag)
st.markdown(", ".join(tag_list))
else:
# Expanded view with progress bars
for tag, prob in tags_to_display:
replace_underscores = st.session_state.settings.get('replace_underscores', False)
display_tag = tag.replace('_', ' ') if replace_underscores else tag
if prob >= threshold and st.session_state.settings['selected_categories'].get(category, True):
all_tags.append(tag)
tag_display = f"**{display_tag}**"
else:
tag_display = display_tag
st.write(tag_display)
st.markdown(display_progress_bar(prob), unsafe_allow_html=True)
# All tags summary
st.markdown("---")
st.subheader(f"All Tags ({len(all_tags)} total)")
if all_tags:
replace_underscores = st.session_state.settings.get('replace_underscores', False)
if replace_underscores:
display_tags = [tag.replace('_', ' ') for tag in all_tags]
tags_text = ", ".join(display_tags)
else:
tags_text = ", ".join(all_tags)
st.write(tags_text)
# Add download button for tags
st.download_button(
label="📥 Download Tags",
data=tags_text,
file_name=f"{st.session_state.get('original_filename', 'image')}_tags.txt",
mime="text/plain"
)
else:
st.info("No tags detected above the threshold.")
if __name__ == "__main__":
image_tagger_app() |