File size: 6,973 Bytes
18faf97 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
# ----------------------------------------------------------------------
# FULL PIPELINE TEST
# ----------------------------------------------------------------------
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import time
import json
from PIL import Image
from config import TEST_IMAGE_URL, TEST_PRODUCT_TYPE
# ----------------------------------------------------------------------
# INTEGRATION TEST - PROCESS REAL IMAGE WITH ALL MODELS
# ----------------------------------------------------------------------
def test_process_image_through_entire_pipeline():
print(f"\n{'='*60}")
print(f"TESTING FULL PIPELINE WITH: {TEST_IMAGE_URL}")
print(f"Product Type: {TEST_PRODUCT_TYPE}")
print(f"{'='*60}\n")
# Import needed modules
from src.utils import ProcessingContext
from src.pipeline import run_functions_in_sequence, PIPELINE_STEPS
from src.models import model_loader
# ----------------------------------------------------------------------
# CHECK MODEL LOADING STATE
# ----------------------------------------------------------------------
print("\nπ CHECKING MODEL LOADING STATE...")
# In Zero GPU environment, models are loaded on-demand
if os.getenv("SPACE_ID"):
print("β
Running in Zero GPU environment - models will load on-demand")
print(f" MODELS_LOADED: {model_loader.MODELS_LOADED}")
print(f" LOAD_ERROR: {model_loader.LOAD_ERROR}")
else:
# For local testing, try to ensure models are loaded
print("π₯ LOADING ALL MODELS...")
try:
model_loader.ensure_models_loaded()
if model_loader.MODELS_LOADED:
print("β
Models loaded successfully!")
else:
print("β οΈ Models not fully loaded but continuing...")
except Exception as e:
print(f"β οΈ Model loading encountered issues: {e}")
print("Continuing with test anyway...")
# ----------------------------------------------------------------------
# PREPARE TEST DATA
# ----------------------------------------------------------------------
print("\nπ¦ PREPARING TEST DATA...")
# Create processing context
contexts = [ProcessingContext(url=TEST_IMAGE_URL, product_type=TEST_PRODUCT_TYPE, keywords=[])]
# Track processing steps
batch_logs = []
# ----------------------------------------------------------------------
# RUN PIPELINE
# ----------------------------------------------------------------------
print("\nπ RUNNING FULL PIPELINE...")
print(f" Pipeline steps: {[step.__name__ for step in PIPELINE_STEPS]}")
start_time = time.time()
# Run the entire pipeline
batch_logs = run_functions_in_sequence(contexts, PIPELINE_STEPS)
end_time = time.time()
processing_time = end_time - start_time
# ----------------------------------------------------------------------
# ANALYZE RESULTS
# ----------------------------------------------------------------------
ctx = contexts[0]
print(f"\nπ PROCESSING RESULTS:")
print(f" Total processing time: {processing_time:.2f} seconds")
# Check if processing was skipped
if hasattr(ctx, 'skip_processing') and ctx.skip_processing:
print(f"β Processing was skipped")
if hasattr(ctx, 'error') and ctx.error:
print(f" Error: {ctx.error}")
# Check each processing step
print("\nπ STEP-BY-STEP RESULTS:")
# 1. Image Download
if "original" in ctx.pil_img:
print(f"β
Step 1: Image downloaded - Size: {ctx.pil_img['original'].size}")
else:
print(f"β Step 1: Image download failed")
# 2. Background Removal
if "background_removed" in ctx.pil_img:
print(f"β
Step 2: Background removed successfully")
else:
print(f"β οΈ Step 2: Background removal skipped")
if os.getenv("SPACE_ID"):
print(f" (Expected in Zero GPU - models load on-demand)")
# 3. Object Detection
if hasattr(ctx, 'detection_result') and ctx.detection_result:
print(f"β
Step 3: Objects detected - {len(ctx.detection_result)} detections")
# Show detection details
for det_type, detections in ctx.detection_result.items():
if detections:
print(f" - {det_type}: {len(detections)} objects")
else:
print(f"β οΈ Step 3: Object detection skipped")
# 4. Cropping/Padding
if "cropped" in ctx.pil_img:
print(f"β
Step 4: Image cropped - Size: {ctx.pil_img['cropped'].size}")
elif "final" in ctx.pil_img:
print(f"β
Step 4: Final image created - Size: {ctx.pil_img['final'].size}")
else:
print(f"β οΈ Step 4: Cropping/padding skipped")
# 5. Base64 Encoding
if hasattr(ctx, 'result_image') and ctx.result_image:
print(f"β
Step 5: Image encoded to base64 - Length: {len(ctx.result_image)}")
else:
print(f"β Step 5: Base64 encoding failed")
# ----------------------------------------------------------------------
# SHOW BATCH LOGS
# ----------------------------------------------------------------------
if batch_logs:
print(f"\nπ BATCH LOGS ({len(batch_logs)} entries):")
for i, log in enumerate(batch_logs):
print(f"\nLog {i+1}:")
print(json.dumps(log, indent=2))
# ----------------------------------------------------------------------
# SHOW PROCESSING LOGS
# ----------------------------------------------------------------------
if hasattr(ctx, 'processing_logs') and ctx.processing_logs:
print(f"\nπ PROCESSING LOGS ({len(ctx.processing_logs)} entries):")
for i, log in enumerate(ctx.processing_logs[-10:]): # Show last 10 logs
print(f" {i+1}. {log}")
# ----------------------------------------------------------------------
# TEST SUMMARY
# ----------------------------------------------------------------------
print(f"\n{'='*60}")
# Determine overall status
if hasattr(ctx, 'result_image') and ctx.result_image:
print(f"β
PIPELINE TEST COMPLETED SUCCESSFULLY")
print(f" Processing time: {processing_time:.2f}s")
if "final" in ctx.pil_img:
print(f" Output image size: {ctx.pil_img['final'].size}")
else:
print(f"β οΈ PIPELINE TEST COMPLETED WITH WARNINGS")
if os.getenv("SPACE_ID"):
print(f" Note: Limited processing expected in Zero GPU environment")
print(f"{'='*60}\n")
# ----------------------------------------------------------------------
# MAIN EXECUTION
# ----------------------------------------------------------------------
if __name__ == "__main__":
test_process_image_through_entire_pipeline()
|