diff --git "a/lighting_analyzer.py" "b/lighting_analyzer.py" --- "a/lighting_analyzer.py" +++ "b/lighting_analyzer.py" @@ -1,1299 +1,436 @@ import numpy as np import cv2 +import logging +import traceback from typing import Dict, Any, Optional +from configuration_manager import ConfigurationManager +from feature_extractor import FeatureExtractor +from indoor_outdoor_classifier import IndoorOutdoorClassifier +from lighting_condition_analyzer import LightingConditionAnalyzer + + class LightingAnalyzer: """ - Analyzes lighting conditions of an image, providing enhanced indoor/outdoor - determination and light type classification, with a focus on lighting analysis. + Comprehensive lighting analysis system facade that coordinates feature extraction, + indoor/outdoor classification, and lighting condition determination. + 此class是一個總窗口,主要匯總各式光線分析相關的class + + This facade class maintains the original interface while internally delegating + work to specialized components for improved maintainability and modularity. """ def __init__(self, config: Optional[Dict[str, Any]] = None): """ - Initializes the LightingAnalyzer. + Initialize the lighting analyzer with configuration. + + Args: + config: Optional configuration dictionary. If None, uses default configuration. + """ + self.logger = self._setup_logger() + + try: + # Initialize configuration manager + self.config_manager = ConfigurationManager() + + # Override default configuration if provided + if config is not None: + self._update_configuration(config) + + # Initialize specialized components + self.feature_extractor = FeatureExtractor(self.config_manager) + self.indoor_outdoor_classifier = IndoorOutdoorClassifier(self.config_manager) + self.lighting_condition_analyzer = LightingConditionAnalyzer(self.config_manager) + + # Legacy configuration access for backward compatibility + self.config = self.config_manager.get_legacy_config_dict() + + self.logger.info("LightingAnalyzer initialized successfully") + + except Exception as e: + self.logger.error(f"Error initializing LightingAnalyzer: {str(e)}") + self.logger.error(f"Traceback: {traceback.format_exc()}") + raise + + def _setup_logger(self) -> logging.Logger: + """Set up logger for lighting analysis operations.""" + logger = logging.getLogger(f"{__name__}.LightingAnalyzer") + if not logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + return logger + + def _update_configuration(self, config: Dict[str, Any]) -> None: + """ + Update configuration manager with provided configuration dictionary. Args: - config: Optional configuration dictionary for custom analysis parameters. + config: Configuration dictionary to update existing configuration. """ - self.config = config or self._get_default_config() + try: + # Update configuration through the manager's internal method + self.config_manager._update_from_dict(config) + self.logger.debug("Configuration updated successfully") + + except Exception as e: + self.logger.warning(f"Error updating configuration: {str(e)}") + self.logger.warning("Continuing with default configuration") - def analyze(self, image, places365_info: Optional[Dict] = None): + def analyze(self, image, places365_info: Optional[Dict] = None) -> Dict[str, Any]: """ - Analyzes the lighting conditions of an image. - Main entry point for analysis, computes basic features, determines - indoor/outdoor, and identifies lighting conditions. + Analyze lighting conditions of an image. + + This is the main entry point that maintains compatibility with the original interface + while leveraging the new modular architecture internally. Args: image: Input image (numpy array or PIL Image). + places365_info: Optional Places365 classification information containing + scene type, confidence, attributes, and indoor/outdoor classification. Returns: - Dict: Dictionary containing lighting analysis results. + Dictionary containing comprehensive lighting analysis results including: + - time_of_day: Specific lighting condition classification + - confidence: Confidence score for the classification + - is_indoor: Boolean indicating indoor/outdoor classification + - indoor_probability: Probability score for indoor classification + - brightness: Brightness analysis metrics + - color_info: Color characteristic analysis + - texture_info: Texture and gradient analysis + - structure_info: Structural feature analysis + - diagnostics: Detailed diagnostic information (if enabled) """ try: - # Convert image format - if not isinstance(image, np.ndarray): - image_np = np.array(image) # Convert PIL Image to numpy array - else: - image_np = image.copy() + self.logger.debug("Starting comprehensive lighting analysis") - # Ensure image is in BGR for OpenCV if it's from PIL (RGB) - if image_np.shape[2] == 3 and not isinstance(image, np.ndarray): # PIL images are typically RGB - image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) - elif image_np.shape[2] == 3 and image.shape[2] == 3: # Already a numpy array, assume BGR from cv2.imread - image_bgr = image_np - elif image_np.shape[2] == 4: # RGBA - image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGBA2BGR) - else: # Grayscale or other - # If grayscale, convert to BGR for consistency, though feature extraction will mostly use grayscale/HSV - if len(image_np.shape) == 2: - image_bgr = cv2.cvtColor(image_np, cv2.COLOR_GRAY2BGR) - else: # Fallback for other unexpected formats - print(f"Warning: Unexpected image format with shape {image_np.shape}. Attempting to proceed.") - image_bgr = image_np - - - # Ensure RGB format for internal processing (some functions expect RGB) - image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) - - features = self._compute_basic_features(image_rgb) # features 字典現在也包含由 P365 間接影響的預計算值 - - # 將 places365_info 傳遞給室內/室外判斷 - indoor_result = self._analyze_indoor_outdoor(features, places365_info=places365_info) - is_indoor = indoor_result["is_indoor"] - indoor_probability = indoor_result["indoor_probability"] - - # 將 places365_info 和已修正的 is_indoor 傳遞給光線類型判斷 - lighting_conditions = self._determine_lighting_conditions(features, is_indoor, places365_info=places365_info) - - # Consolidate results - result = { - "time_of_day": lighting_conditions["time_of_day"], - "confidence": float(lighting_conditions["confidence"]), - "is_indoor": is_indoor, - "indoor_probability": float(indoor_probability), - "brightness": { - "average": float(features["avg_brightness"]), - "std_dev": float(features["brightness_std"]), - "dark_ratio": float(features["dark_pixel_ratio"]), - "bright_ratio": float(features.get("bright_pixel_ratio", 0)) # Added - }, - "color_info": { - "blue_ratio": float(features["blue_ratio"]), - "sky_like_blue_ratio": float(features.get("sky_like_blue_ratio",0)), # More specific sky blue - "yellow_orange_ratio": float(features["yellow_orange_ratio"]), - "gray_ratio": float(features["gray_ratio"]), - "avg_saturation": float(features["avg_saturation"]), - "sky_region_brightness_ratio": float(features.get("sky_region_brightness_ratio", 1.0)), # Renamed and clarified - "sky_region_saturation": float(features.get("sky_region_saturation", 0)), - "sky_region_blue_dominance": float(features.get("sky_region_blue_dominance", 0)), - "color_atmosphere": features["color_atmosphere"], - "warm_ratio": float(features["warm_ratio"]), - "cool_ratio": float(features["cool_ratio"]), - }, - "texture_info": { # New category for texture/gradient features - "gradient_ratio_vertical_horizontal": float(features.get("gradient_ratio_vertical_horizontal", 0)), # Renamed - "top_region_texture_complexity": float(features.get("top_region_texture_complexity", 0)), - "shadow_clarity_score": float(features.get("shadow_clarity_score",0.5)), # Default to neutral - }, - "structure_info": { # New category for structural features - "ceiling_likelihood": float(features.get("ceiling_likelihood",0)), - "boundary_clarity": float(features.get("boundary_clarity",0)), - "openness_top_edge": float(features.get("openness_top_edge", 0.5)), # Default to neutral - } - } - - # Add diagnostic information - if self.config.get("include_diagnostics", False): # Use .get for safety - result["diagnostics"] = { - "feature_contributions": indoor_result.get("feature_contributions", {}), - "lighting_diagnostics": lighting_conditions.get("diagnostics", {}) - } - - if self.config.get("include_diagnostics", False): - # indoor_result["diagnostics"] 現在會包含 P365 的影響 - result["diagnostics"]["feature_contributions"] = indoor_result.get("feature_contributions", {}) - result["diagnostics"]["lighting_diagnostics"] = lighting_conditions.get("diagnostics", {}) - result["diagnostics"]["indoor_outdoor_diagnostics"] = indoor_result.get("diagnostics", {}) + # Step 1: Validate and preprocess input image + processed_image = self._preprocess_image(image) + if processed_image is None: + return self._get_error_result("Invalid image input") + + # Step 2: Extract comprehensive features + self.logger.debug("Extracting image features") + features = self.feature_extractor.extract_features(processed_image) + + if not features or "avg_brightness" not in features: + return self._get_error_result("Feature extraction failed") + + # Step 3: Classify indoor/outdoor with Places365 integration + self.logger.debug("Performing indoor/outdoor classification") + indoor_outdoor_result = self.indoor_outdoor_classifier.classify( + features, places365_info + ) + + is_indoor = indoor_outdoor_result["is_indoor"] + indoor_probability = indoor_outdoor_result["indoor_probability"] + + # Step 4: Determine specific lighting conditions + self.logger.debug(f"Analyzing lighting conditions for {'indoor' if is_indoor else 'outdoor'} scene") + lighting_result = self.lighting_condition_analyzer.analyze_lighting_conditions( + features, is_indoor, places365_info + ) + + # Step 5: Consolidate comprehensive results + result = self._consolidate_analysis_results( + lighting_result, indoor_outdoor_result, features + ) + + self.logger.info(f"Analysis complete: {result['time_of_day']} " + f"({'indoor' if result['is_indoor'] else 'outdoor'}) " + f"confidence: {result['confidence']:.3f}") return result except Exception as e: - print(f"Error in lighting analysis: {str(e)}") - import traceback - traceback.print_exc() - return { - "time_of_day": "unknown", - "confidence": 0, - "error": str(e) - } - - def _compute_basic_features(self, image_rgb: np.ndarray) -> Dict[str, Any]: - """ - Computes basic lighting features from an RGB image. - This version includes enhancements for sky, ceiling, and boundary detection. + self.logger.error(f"Error in lighting analysis: {str(e)}") + self.logger.error(f"Traceback: {traceback.format_exc()}") + return self._get_error_result(str(e)) + + def _preprocess_image(self, image) -> Optional[np.ndarray]: """ - # Get image dimensions - height, width = image_rgb.shape[:2] - if height == 0 or width == 0: - print("Error: Image has zero height or width.") - # Return a dictionary of zeros or default values for all expected features - return {feature: 0.0 for feature in [ # Ensure all keys expected by other methods are present - "avg_brightness", "brightness_std", "dark_pixel_ratio", "bright_pixel_ratio", - "blue_ratio", "sky_like_blue_ratio", "yellow_orange_ratio", "gray_ratio", - "avg_saturation", "sky_region_brightness_ratio", "sky_region_saturation", "sky_region_blue_dominance", - "color_atmosphere", "warm_ratio", "cool_ratio", "gradient_ratio_vertical_horizontal", - "top_region_texture_complexity", "shadow_clarity_score", "ceiling_likelihood", - "boundary_clarity", "openness_top_edge", "ceiling_uniformity", "horizontal_line_ratio", # Old keys kept for compatibility if still used - "indoor_light_score", "circular_light_count", "light_distribution_uniformity", - "boundary_edge_score", "top_region_std", "edges_density", "street_line_score", - "sky_brightness", "vertical_strength", "horizontal_strength", "brightness_uniformity", "bright_spot_count" - ]} - - - # Adaptive scaling factor based on image size for performance - base_scale = 4 - # Protect against zero division if height or width is tiny - scale_factor = base_scale + min(8, max(0, int((height * width) / (1000 * 1000)) if height * width > 0 else 0)) - scale_factor = max(1, scale_factor) # Ensure scale_factor is at least 1 - - # Create a smaller version of the image for faster processing of some features - small_rgb = cv2.resize(image_rgb, (width // scale_factor, height // scale_factor), interpolation=cv2.INTER_AREA) - - # Convert to HSV and Grayscale once - hsv_img = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV) - gray_img = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) - small_gray = cv2.cvtColor(small_rgb, cv2.COLOR_RGB2GRAY) # Grayscale of the small image - - # Separate HSV channels - h_channel, s_channel, v_channel = cv2.split(hsv_img) - - # Brightness Features - avg_brightness = np.mean(v_channel) - brightness_std = np.std(v_channel) - dark_pixel_ratio = np.sum(v_channel < self.config.get("dark_pixel_threshold", 50)) / (height * width) # 使用配置閾值 - bright_pixel_ratio = np.sum(v_channel > self.config.get("bright_pixel_threshold", 220)) / (height * width) # 新增:亮部像素比例 - - # Yellow-Orange Ratio - yellow_orange_mask = ((h_channel >= 15) & (h_channel <= 45)) # Adjusted range slightly - yellow_orange_ratio = np.sum(yellow_orange_mask) / (height * width) - - # General Blue Ratio - blue_mask = ((h_channel >= 90) & (h_channel <= 140)) # Slightly wider blue range - blue_ratio = np.sum(blue_mask) / (height * width) - - # More specific "Sky-Like Blue" Ratio - for clearer skies - sky_like_blue_hue_min = self.config.get("sky_blue_hue_min", 100) - sky_like_blue_hue_max = self.config.get("sky_blue_hue_max", 130) # Typical sky blue Hues in HSV - sky_like_blue_sat_min = self.config.get("sky_blue_sat_min", 60) # Sky is usually somewhat saturated - sky_like_blue_val_min = self.config.get("sky_blue_val_min", 120) # Sky is usually bright - sky_like_blue_mask = ((h_channel >= sky_like_blue_hue_min) & (h_channel <= sky_like_blue_hue_max) & - (s_channel > sky_like_blue_sat_min) & (v_channel > sky_like_blue_val_min)) - sky_like_blue_ratio = np.sum(sky_like_blue_mask) / (height * width) - - # Gray Ratio (low saturation, mid-high brightness) - gray_sat_max = self.config.get("gray_sat_max", 50) - gray_val_min = self.config.get("gray_val_min", 80) # Adjusted to avoid very dark grays - gray_val_max = self.config.get("gray_val_max", 200) # Avoid pure white being too gray - gray_mask = (s_channel < gray_sat_max) & (v_channel > gray_val_min) & (v_channel < gray_val_max) - gray_ratio = np.sum(gray_mask) / (height * width) - - avg_saturation = np.mean(s_channel) - - # 專門分析圖像頂部區域,這是判斷天空的關鍵 - top_third_height = height // 3 - sky_region_v = v_channel[:top_third_height, :] - sky_region_s = s_channel[:top_third_height, :] - sky_region_h = h_channel[:top_third_height, :] - - sky_region_avg_brightness = np.mean(sky_region_v) if sky_region_v.size > 0 else 0 - sky_region_brightness_ratio = sky_region_avg_brightness / max(avg_brightness, 1e-5) # Ratio to overall brightness - sky_region_saturation = np.mean(sky_region_s) if sky_region_s.size > 0 else 0 - - # Blue dominance in sky region - sky_region_blue_pixels = np.sum( - (sky_region_h >= sky_like_blue_hue_min) & (sky_region_h <= sky_like_blue_hue_max) & - (sky_region_s > sky_like_blue_sat_min) & (sky_region_v > sky_like_blue_val_min) - ) - sky_region_blue_dominance = sky_region_blue_pixels / max(1, sky_region_v.size) - - - # --- Color Atmosphere --- - warm_hue_ranges = self.config.get("warm_hue_ranges", [(0, 50), (330, 360)]) # Red, Orange, Yellow, some Magentas - cool_hue_ranges = self.config.get("cool_hue_ranges", [(90, 270)]) # Cyan, Blue, Purple, Green - - warm_mask = np.zeros_like(h_channel, dtype=bool) - for h_min, h_max in warm_hue_ranges: - warm_mask |= ((h_channel >= h_min) & (h_channel <= h_max)) - warm_ratio = np.sum(warm_mask & (s_channel > 30)) / (height * width) # Consider saturation for warmth - - cool_mask = np.zeros_like(h_channel, dtype=bool) - for h_min, h_max in cool_hue_ranges: - cool_mask |= ((h_channel >= h_min) & (h_channel <= h_max)) - cool_ratio = np.sum(cool_mask & (s_channel > 30)) / (height * width) # Consider saturation for coolness - - if warm_ratio > cool_ratio and warm_ratio > 0.3: # Increased threshold - color_atmosphere = "warm" - elif cool_ratio > warm_ratio and cool_ratio > 0.3: # Increased threshold - color_atmosphere = "cool" - else: - color_atmosphere = "neutral" - - # Gradient and Texture Features (on small image for speed) - gx = cv2.Sobel(small_gray, cv2.CV_32F, 1, 0, ksize=3) - gy = cv2.Sobel(small_gray, cv2.CV_32F, 0, 1, ksize=3) - - avg_abs_gx = np.mean(np.abs(gx)) - avg_abs_gy = np.mean(np.abs(gy)) - # Renamed for clarity: ratio of vertical to horizontal gradients - gradient_ratio_vertical_horizontal = avg_abs_gy / max(avg_abs_gx, 1e-5) - - - # Texture complexity of the top region (potential ceiling or sky) - small_top_third_height = small_gray.shape[0] // 3 - small_sky_region_gray = small_gray[:small_top_third_height, :] - if small_sky_region_gray.size > 0: - laplacian_var_sky = cv2.Laplacian(small_sky_region_gray, cv2.CV_64F).var() - # Normalize, though this might need scene-adaptive normalization or defined bins - top_region_texture_complexity = min(1.0, laplacian_var_sky / 1000.0) # Example normalization - else: - top_region_texture_complexity = 0.5 # Neutral if no top region - - # 先簡單的估計陰影清晰度。清晰陰影通常表示強烈又單一的光源(像是太陽)。 - # High brightness std dev might indicate strong highlights and shadows. - # Low dark_pixel_ratio with high brightness_std could imply sharp shadows. - if brightness_std > 60 and dark_pixel_ratio < 0.15 and avg_brightness > 100: - shadow_clarity_score = 0.7 # Potential for clear shadows (more outdoor-like) - elif brightness_std < 30 and dark_pixel_ratio > 0.1: - shadow_clarity_score = 0.3 # Potential for diffuse shadows (more indoor/cloudy-like) - else: - shadow_clarity_score = 0.5 # Neutral - - # Structural Features (Ceiling, Boundary, Openness) - # 判斷天花板的可能性。 - ceiling_likelihood = 0.0 - # 條件1: 頂部區域紋理簡單且亮度適中 (表明可能是平坦的天花板) - if top_region_texture_complexity < self.config.get("ceiling_texture_thresh", 0.4) and \ - self.config.get("ceiling_brightness_min", 60) < sky_region_avg_brightness < self.config.get("ceiling_brightness_max", 230): # 放寬亮度上限 - ceiling_likelihood += 0.45 # 稍微提高基礎分 - - # 條件2: 頂部區域存在水平線條 (可能是天花板邊緣或結構) - top_horizontal_lines_strength = np.mean(np.abs(gx[:small_gray.shape[0]//3, :])) - if top_horizontal_lines_strength > avg_abs_gx * self.config.get("ceiling_horizontal_line_factor", 1.15): # 稍微降低因子 - ceiling_likelihood += 0.35 # 稍微提高貢獻 - - # 條件3: 中央區域比周圍亮 (可能是吊燈,暗示天花板) - 針對室內光源 - # 這個條件對於 room_02.jpg 可能比較重要,因為它有一個中央吊燈 - center_y_sm, center_x_sm = small_gray.shape[0]//2, small_gray.shape[1]//2 - # 定義一個更小的中心區域來檢測吊燈類型的亮點 - lamp_check_radius_y = small_gray.shape[0] // 8 - lamp_check_radius_x = small_gray.shape[1] // 8 - center_bright_spot_region = small_gray[max(0, center_y_sm - lamp_check_radius_y) : min(small_gray.shape[0], center_y_sm + lamp_check_radius_y), - max(0, center_x_sm - lamp_check_radius_x) : min(small_gray.shape[1], center_x_sm + lamp_check_radius_x)] - - if center_bright_spot_region.size > 0 and np.mean(center_bright_spot_region) > avg_brightness * self.config.get("ceiling_center_bright_factor", 1.25): # 提高中心亮度要求 - ceiling_likelihood += 0.30 # 顯著提高吊燈對天花板的貢獻 - - # 條件4: 如果頂部區域藍色成分不高,且不是特別亮(排除天空),則增加天花板可能性 - # 這個條件有助於區分多雲天空和室內天花板 - if sky_region_blue_dominance < self.config.get("ceiling_max_sky_blue_thresh", 0.08) and \ - sky_region_brightness_ratio < self.config.get("ceiling_max_sky_brightness_ratio", 1.15): # 頂部不能太亮 - ceiling_likelihood += 0.15 - - # 懲罰項: 如果有強烈天空信號,大幅降低天花板可能性 - if sky_region_blue_dominance > self.config.get("sky_blue_dominance_strong_thresh", 0.25) and \ - sky_region_brightness_ratio > self.config.get("sky_brightness_strong_thresh", 1.25): - ceiling_likelihood *= self.config.get("ceiling_sky_override_factor", 0.1) # 大幅降低 - - ceiling_likelihood = min(1.0, ceiling_likelihood) - - - # 邊界感的,通常室內邊界較強 - # Using Sobel on edges of the small_gray image - edge_width_sm = max(1, small_gray.shape[1] // 10) # 10% for edge - edge_height_sm = max(1, small_gray.shape[0] // 10) - - left_edge_grad_x = np.mean(np.abs(gx[:, :edge_width_sm])) if small_gray.shape[1] > edge_width_sm else 0 - right_edge_grad_x = np.mean(np.abs(gx[:, -edge_width_sm:])) if small_gray.shape[1] > edge_width_sm else 0 - top_edge_grad_y = np.mean(np.abs(gy[:edge_height_sm, :])) if small_gray.shape[0] > edge_height_sm else 0 - - # Normalize these gradients (e.g. against average gradient) - boundary_clarity = (left_edge_grad_x + right_edge_grad_x + top_edge_grad_y) / (3 * max(avg_abs_gx, avg_abs_gy, 1e-5)) - boundary_clarity = min(1.0, boundary_clarity / 1.5) # Normalize, 1.5 is a heuristic factor - - - # 判斷頂部邊緣是否開放(例如天空),室外的特徵比較明顯 - # Low vertical gradient at the very top edge suggests openness (sky) - top_edge_strip_gy = np.mean(np.abs(gy[:max(1,small_gray.shape[0]//20), :])) # Very top 5% - openness_top_edge = 1.0 - min(1.0, top_edge_strip_gy / max(avg_abs_gy, 1e-5) / 0.5 ) # Normalize, 0.5 factor, less grad = more open - - top_region = v_channel[:height//4, :] # Full res top region - top_region_std_fullres = np.std(top_region) if top_region.size > 0 else 0 - ceiling_uniformity_old = 1.0 - min(1, top_region_std_fullres / max(np.mean(top_region) if top_region.size >0 else 1e-5, 1e-5)) - - top_gradients_old = np.abs(cv2.Sobel(gray_img[:height//4, :], cv2.CV_32F, 0, 1, ksize=3)) # Full res top gradients for gy - horizontal_lines_strength_old = np.mean(top_gradients_old) if top_gradients_old.size > 0 else 0 - horizontal_line_ratio_old = min(1, horizontal_lines_strength_old / 40) # Original normalization - - # Light source detection (simplified, as in original) - sampled_v = v_channel[::scale_factor*2, ::scale_factor*2] # Already calculated - light_threshold = min(self.config.get("light_source_abs_thresh", 220), avg_brightness + 2*brightness_std) - is_bright_spots = sampled_v > light_threshold - bright_spot_count_old = np.sum(is_bright_spots) - circular_light_score_old = 0 - indoor_light_score_old = 0.0 # Default to float - light_distribution_uniformity_old = 0.5 - if 1 < bright_spot_count_old < 20: - bright_y, bright_x = np.where(is_bright_spots) - if len(bright_y) > 1: - mean_x, mean_y = np.mean(bright_x), np.mean(bright_y) - dist_from_center = np.sqrt((bright_x - mean_x)**2 + (bright_y - mean_y)**2) - if np.std(dist_from_center) < np.mean(dist_from_center): # Concentrated - circular_light_score_old = min(3, len(bright_y) // 2) - light_distribution_uniformity_old = 0.7 - if np.mean(bright_y) < sampled_v.shape[0] / 2: # Lights in upper half - indoor_light_score_old = 0.6 - else: - indoor_light_score_old = 0.3 - - - # Boundary edge score - # Using small_gray for consistency with other gradient features - left_edge_sm = small_gray[:, :small_gray.shape[1]//6] - right_edge_sm = small_gray[:, 5*small_gray.shape[1]//6:] - top_edge_sm = small_gray[:small_gray.shape[0]//6, :] - - left_gradient_old = np.mean(np.abs(cv2.Sobel(left_edge_sm, cv2.CV_32F, 1, 0, ksize=3))) if left_edge_sm.size >0 else 0 - right_gradient_old = np.mean(np.abs(cv2.Sobel(right_edge_sm, cv2.CV_32F, 1, 0, ksize=3))) if right_edge_sm.size >0 else 0 - top_gradient_old = np.mean(np.abs(cv2.Sobel(top_edge_sm, cv2.CV_32F, 0, 1, ksize=3))) if top_edge_sm.size >0 else 0 - boundary_edge_score_old = (min(1, left_gradient_old/50) + min(1, right_gradient_old/50) + min(1, top_gradient_old/50)) / 3 - - edges_density_old = min(1, (avg_abs_gx + avg_abs_gy) / 100) # Using already computed avg_abs_gx, avg_abs_gy - - # Street line score (original) - street_line_score_old = 0 - bottom_half_sm = small_gray[small_gray.shape[0]//2:, :] - if bottom_half_sm.size > 0: - bottom_vert_gradient = cv2.Sobel(bottom_half_sm, cv2.CV_32F, 0, 1, ksize=3) - strong_vert_lines = np.abs(bottom_vert_gradient) > 50 - if np.sum(strong_vert_lines) > (bottom_half_sm.size * 0.05): - street_line_score_old = 0.7 - - - features = { - # Brightness - "avg_brightness": avg_brightness, - "brightness_std": brightness_std, - "dark_pixel_ratio": dark_pixel_ratio, - "bright_pixel_ratio": bright_pixel_ratio, - - # Color - "blue_ratio": blue_ratio, - "sky_like_blue_ratio": sky_like_blue_ratio, - "yellow_orange_ratio": yellow_orange_ratio, - "gray_ratio": gray_ratio, - "avg_saturation": avg_saturation, - "color_atmosphere": color_atmosphere, - "warm_ratio": warm_ratio, - "cool_ratio": cool_ratio, - - # Sky Region Specific - "sky_region_brightness_ratio": sky_region_brightness_ratio, - "sky_region_saturation": sky_region_saturation, - "sky_region_blue_dominance": sky_region_blue_dominance, - - # Texture / Gradient - "gradient_ratio_vertical_horizontal": gradient_ratio_vertical_horizontal, - "top_region_texture_complexity": top_region_texture_complexity, - "shadow_clarity_score": shadow_clarity_score, - - # Structure - "ceiling_likelihood": ceiling_likelihood, - "boundary_clarity": boundary_clarity, - "openness_top_edge": openness_top_edge, - - # color distribution - "sky_blue_ratio": sky_like_blue_ratio, - "sky_brightness": sky_region_avg_brightness, - "gradient_ratio": gradient_ratio_vertical_horizontal, - "brightness_uniformity": 1 - min(1, brightness_std / max(avg_brightness, 1e-5)), - "vertical_strength": avg_abs_gy, - "horizontal_strength": avg_abs_gx, - "ceiling_uniformity": ceiling_uniformity_old, - "horizontal_line_ratio": horizontal_line_ratio_old, - "bright_spot_count": bright_spot_count_old, - "indoor_light_score": indoor_light_score_old, - "circular_light_count": circular_light_score_old, - "light_distribution_uniformity": light_distribution_uniformity_old, - "boundary_edge_score": boundary_edge_score_old, - "top_region_std": top_region_std_fullres, - "edges_density": edges_density_old, - "street_line_score": street_line_score_old, - } - return features + Preprocess input image to ensure consistent format for analysis. + Args: + image: Input image in various possible formats. - def _analyze_indoor_outdoor(self, features: Dict[str, Any], places365_info: Optional[Dict] = None) -> Dict[str, Any]: - """ - Analyzes features and Places365 info to determine if the scene is indoor or outdoor. - Places365 info is used to strongly influence the decision if its confidence is high. + Returns: + Preprocessed image as RGB numpy array, or None if preprocessing failed. """ - # Use a copy of weights if they might be modified, otherwise direct access is fine - weights = self.config.get("indoor_outdoor_weights", {}) - visual_indoor_score = 0.0 # Score based purely on visual features - feature_contributions = {} - diagnostics = {} - - # Internal Thresholds and Definitions for this function - P365_HIGH_CONF_THRESHOLD = 0.65 # Confidence threshold for P365 to strongly influence/override - P365_MODERATE_CONF_THRESHOLD = 0.4 # Confidence threshold for P365 to moderately influence - - # Simplified internal lists for definitely indoor/outdoor based on P365 mapped_scene_type - DEFINITELY_OUTDOOR_KEYWORDS_P365 = [ - "street", "road", "highway", "park", "beach", "mountain", "forest", "field", - "outdoor", "sky", "coast", "courtyard", "square", "plaza", "bridge", - "parking_lot", "playground", "stadium", "construction_site", "river", "ocean", "desert", "garden", "trail" - ] - DEFINITELY_INDOOR_KEYWORDS_P365 = [ - "bedroom", "office", "kitchen", "library", "classroom", "conference_room", "living_room", - "bathroom", "hospital", "hotel_room", "cabin", "interior", "museum", "gallery", - "mall", "market_indoor", "basement", "corridor", "lobby", "restaurant_indoor", "bar_indoor", "shop_indoor", "gym_indoor" - ] - - # Extract key info from places365_info - p365_mapped_scene = "unknown" - p365_is_indoor_from_classification = None - p365_attributes = [] - p365_confidence = 0.0 - - if places365_info: - p365_mapped_scene = places365_info.get('mapped_scene_type', 'unknown').lower() - p365_attributes = [attr.lower() for attr in places365_info.get('attributes', [])] - p365_confidence = places365_info.get('confidence', 0.0) - p365_is_indoor_from_classification = places365_info.get('is_indoor_from_classification', None) - - diagnostics["p365_context_received"] = ( - f"P365 Scene: {p365_mapped_scene}, P365 SceneConf: {p365_confidence:.2f}, " - f"P365 DirectIndoor: {p365_is_indoor_from_classification}, P365 Attrs: {p365_attributes}" - ) + try: + # Convert to numpy array if needed + if not isinstance(image, np.ndarray): + image_np = np.array(image) + else: + image_np = image.copy() - # Step 1: Calculate visual_indoor_score based on its own features - sky_evidence_score_visual = 0.0 - strong_sky_signal_visual = False - sky_blue_dominance_val = features.get("sky_region_blue_dominance", 0.0) - sky_region_brightness_ratio_val = features.get("sky_region_brightness_ratio", 1.0) - top_texture_complexity_val = features.get("top_region_texture_complexity", 0.5) - openness_top_edge_val = features.get("openness_top_edge", 0.5) - - # Condition 1: Visual Strong blue sky signal - if sky_blue_dominance_val > self.config.get("sky_blue_dominance_strong_thresh", 0.35): - sky_evidence_score_visual -= weights.get("sky_blue_dominance_w", 3.5) * sky_blue_dominance_val - diagnostics["sky_detection_reason_visual"] = f"Visual: Strong sky-like blue ({sky_blue_dominance_val:.2f})" - strong_sky_signal_visual = True - - elif sky_region_brightness_ratio_val > self.config.get("sky_brightness_ratio_strong_thresh", 1.35) and \ - top_texture_complexity_val < self.config.get("sky_texture_complexity_clear_thresh", 0.25): - outdoor_push = weights.get("sky_brightness_ratio_w", 3.0) * (sky_region_brightness_ratio_val - 1.0) - sky_evidence_score_visual -= outdoor_push - sky_evidence_score_visual -= weights.get("sky_texture_w", 2.0) - diagnostics["sky_detection_reason_visual"] = f"Visual: Top brighter (ratio:{sky_region_brightness_ratio_val:.2f}) & low texture." - strong_sky_signal_visual = True - - elif openness_top_edge_val > self.config.get("openness_top_strong_thresh", 0.80): - sky_evidence_score_visual -= weights.get("openness_top_w", 2.8) * openness_top_edge_val - diagnostics["sky_detection_reason_visual"] = f"Visual: Very high top edge openness ({openness_top_edge_val:.2f})." - strong_sky_signal_visual = True - - elif not strong_sky_signal_visual and \ - top_texture_complexity_val < self.config.get("sky_texture_complexity_cloudy_thresh", 0.20) and \ - sky_region_brightness_ratio_val > self.config.get("sky_brightness_ratio_cloudy_thresh", 0.95): - sky_evidence_score_visual -= weights.get("sky_texture_w", 2.0) * (1.0 - top_texture_complexity_val) * 0.5 - diagnostics["sky_detection_reason_visual"] = f"Visual: Weak sky signal (low texture, brightish top: {top_texture_complexity_val:.2f}), less weight." - - if abs(sky_evidence_score_visual) > 0.01: - visual_indoor_score += sky_evidence_score_visual - feature_contributions["sky_openness_features_visual"] = round(sky_evidence_score_visual, 2) - if strong_sky_signal_visual: - diagnostics["strong_sky_signal_visual_detected"] = True - - # Indoor Indicators (Visual): Ceiling, Enclosure - enclosure_evidence_score_visual = 0.0 - ceiling_likelihood_val = features.get("ceiling_likelihood", 0.0) - boundary_clarity_val = features.get("boundary_clarity", 0.0) - - # Get base weights for modification - effective_ceiling_weight = weights.get("ceiling_likelihood_w", 1.5) - effective_boundary_weight = weights.get("boundary_clarity_w", 1.2) - - if ceiling_likelihood_val > self.config.get("ceiling_likelihood_thresh", 0.38): - current_ceiling_score = effective_ceiling_weight * ceiling_likelihood_val - if strong_sky_signal_visual: - current_ceiling_score *= self.config.get("sky_override_factor_ceiling", 0.1) - enclosure_evidence_score_visual += current_ceiling_score - diagnostics["indoor_reason_ceiling_visual"] = f"Visual Ceiling: {ceiling_likelihood_val:.2f}, ScoreCont: {current_ceiling_score:.2f}" - - if boundary_clarity_val > self.config.get("boundary_clarity_thresh", 0.38): - current_boundary_score = effective_boundary_weight * boundary_clarity_val - if strong_sky_signal_visual: - current_boundary_score *= self.config.get("sky_override_factor_boundary", 0.2) - enclosure_evidence_score_visual += current_boundary_score - diagnostics["indoor_reason_boundary_visual"] = f"Visual Boundary: {boundary_clarity_val:.2f}, ScoreCont: {current_boundary_score:.2f}" - - if not strong_sky_signal_visual and top_texture_complexity_val > 0.7 and \ - openness_top_edge_val < 0.3 and ceiling_likelihood_val < 0.35: - diagnostics["complex_urban_top_visual"] = True - if boundary_clarity_val > 0.5: - enclosure_evidence_score_visual *= 0.5 - diagnostics["reduced_enclosure_for_urban_top_visual"] = True - - if abs(enclosure_evidence_score_visual) > 0.01: - visual_indoor_score += enclosure_evidence_score_visual - feature_contributions["enclosure_features"] = round(enclosure_evidence_score_visual, 2) - - # Brightness Uniformity (Visual) - brightness_uniformity_val = 1.0 - min(1.0, features.get("brightness_std", 50.0) / max(features.get("avg_brightness", 100.0), 1e-5)) - uniformity_contribution_visual = 0.0 - if brightness_uniformity_val > self.config.get("brightness_uniformity_thresh_indoor", 0.6): - uniformity_contribution_visual = weights.get("brightness_uniformity_w", 0.6) * brightness_uniformity_val - if strong_sky_signal_visual: uniformity_contribution_visual *= self.config.get("sky_override_factor_uniformity", 0.15) - elif brightness_uniformity_val < self.config.get("brightness_uniformity_thresh_outdoor", 0.40): - if features.get("shadow_clarity_score", 0.5) > 0.65: - uniformity_contribution_visual = -weights.get("brightness_non_uniformity_outdoor_w", 1.0) * (1.0 - brightness_uniformity_val) - elif not strong_sky_signal_visual: - uniformity_contribution_visual = weights.get("brightness_non_uniformity_indoor_penalty_w", 0.1) * (1.0 - brightness_uniformity_val) - if abs(uniformity_contribution_visual) > 0.01: - visual_indoor_score += uniformity_contribution_visual - feature_contributions["brightness_uniformity_contribution"] = round(uniformity_contribution_visual, 2) - - # Light Sources (Visual) - indoor_light_score_val = features.get("indoor_light_score", 0.0) - circular_light_count_val = features.get("circular_light_count", 0) - bright_spot_count_val = features.get("bright_spot_count", 0) - avg_brightness_val = features.get("avg_brightness", 100.0) - light_source_contribution_visual = 0.0 - - if circular_light_count_val >= 1 and not strong_sky_signal_visual: - light_source_contribution_visual += weights.get("circular_lights_w", 1.2) * circular_light_count_val - elif indoor_light_score_val > 0.55 and not strong_sky_signal_visual: - light_source_contribution_visual += weights.get("indoor_light_score_w", 0.8) * indoor_light_score_val - elif bright_spot_count_val > self.config.get("many_bright_spots_thresh", 6) and \ - avg_brightness_val < self.config.get("dim_scene_for_spots_thresh", 115) and \ - not strong_sky_signal_visual: - light_source_contribution_visual += weights.get("many_bright_spots_indoor_w", 0.3) * min(bright_spot_count_val / 10.0, 1.5) - - grad_ratio_val = features.get("gradient_ratio_vertical_horizontal", 1.0) - is_likely_street_structure_visual = (0.7 < grad_ratio_val < 1.5) and features.get("edges_density", 0.0) > 0.15 - - if is_likely_street_structure_visual and bright_spot_count_val > 3 and not strong_sky_signal_visual: - light_source_contribution_visual *= 0.2 - diagnostics["street_lights_heuristic_visual"] = True - elif strong_sky_signal_visual: - light_source_contribution_visual *= self.config.get("sky_override_factor_lights", 0.05) - - if abs(light_source_contribution_visual) > 0.01: - visual_indoor_score += light_source_contribution_visual - feature_contributions["light_source_features"] = round(light_source_contribution_visual, 2) - - # Color Atmosphere (Visual) - color_atmosphere_contribution_visual = 0.0 - if features.get("color_atmosphere") == "warm" and \ - avg_brightness_val < self.config.get("warm_indoor_max_brightness_thresh", 135): - if not strong_sky_signal_visual and \ - not diagnostics.get("complex_urban_top_visual", False) and \ - not (is_likely_street_structure_visual and avg_brightness_val > 80) and \ - features.get("avg_saturation", 100.0) < 160: - if light_source_contribution_visual > 0.05: - color_atmosphere_contribution_visual = weights.get("warm_atmosphere_indoor_w", 0.15) - visual_indoor_score += color_atmosphere_contribution_visual - if abs(color_atmosphere_contribution_visual) > 0.01: - feature_contributions["warm_atmosphere_indoor_visual_contrib"] = round(color_atmosphere_contribution_visual, 2) # New key - - # Home Environment Pattern (Visual) - home_env_score_contribution_visual = 0.0 - if not strong_sky_signal_visual: - bedroom_indicators = 0 - if features.get("brightness_uniformity",0.0) > 0.65 and features.get("boundary_clarity",0.0) > 0.40 : bedroom_indicators+=1.1 - if features.get("ceiling_likelihood",0.0) > 0.35 and (bright_spot_count_val > 0 or circular_light_count_val > 0) : bedroom_indicators+=1.1 - if features.get("warm_ratio", 0.0) > 0.55 and features.get("brightness_uniformity",0.0) > 0.65 : bedroom_indicators+=1.0 - if features.get("brightness_uniformity",0.0) > 0.70 and features.get("avg_saturation",100.0) < 60 : bedroom_indicators+=0.7 - - if bedroom_indicators >= self.config.get("home_pattern_thresh_strong", 2.0) : - home_env_score_contribution_visual = weights.get("home_env_strong_w", 1.5) - elif bedroom_indicators >= self.config.get("home_pattern_thresh_moderate", 1.0): - home_env_score_contribution_visual = weights.get("home_env_moderate_w", 0.7) - if bedroom_indicators > 0: - diagnostics["home_environment_pattern_visual_indicators"] = round(bedroom_indicators,1) - else: - diagnostics["skipped_home_env_visual_due_to_sky"] = True - - if abs(home_env_score_contribution_visual) > 0.01: - visual_indoor_score += home_env_score_contribution_visual - feature_contributions["home_environment_pattern_visual"] = round(home_env_score_contribution_visual, 2) - - # Aerial View of Streets (Visual Heuristic) - if features.get("sky_region_brightness_ratio", 1.0) < self.config.get("aerial_top_dark_ratio_thresh", 0.9) and \ - top_texture_complexity_val > self.config.get("aerial_top_complex_thresh", 0.60) and \ - avg_brightness_val > self.config.get("aerial_min_avg_brightness_thresh", 65) and \ - not strong_sky_signal_visual: - aerial_street_outdoor_push_visual = -weights.get("aerial_street_w", 2.5) - visual_indoor_score += aerial_street_outdoor_push_visual - feature_contributions["aerial_street_pattern_visual"] = round(aerial_street_outdoor_push_visual, 2) - diagnostics["aerial_street_pattern_visual_detected"] = True - if "enclosure_features" in feature_contributions and feature_contributions["enclosure_features"] > 0: # Check if positive - reduction_factor = self.config.get("aerial_enclosure_reduction_factor", 0.75) - # Only reduce the positive part of enclosure_evidence_score_visual - positive_enclosure_score = max(0, enclosure_evidence_score_visual) - reduction_amount = positive_enclosure_score * reduction_factor - visual_indoor_score -= reduction_amount - feature_contributions["enclosure_features_reduced_by_aerial"] = round(-reduction_amount, 2) - # Update the main enclosure_features contribution - feature_contributions["enclosure_features"] = round(enclosure_evidence_score_visual - reduction_amount, 2) - - diagnostics["visual_indoor_score_subtotal"] = round(visual_indoor_score, 3) - - # Step 2: Incorporate Places365 Influence - final_indoor_score = visual_indoor_score # Start with the visual score - p365_influence_score = 0.0 # Score component specifically from P365 - - # 處理所有Places365資訊 - if places365_info: - # Define internal (non-config) weights for P365 influence to keep it self-contained - P365_DIRECT_INDOOR_WEIGHT = 3.5 # Strong influence for P365's direct classification - P365_DIRECT_OUTDOOR_WEIGHT = 4.0 # Slightly stronger for outdoor to counter visual enclosure bias - P365_SCENE_CONTEXT_INDOOR_WEIGHT = 2.0 - P365_SCENE_CONTEXT_OUTDOOR_WEIGHT = 2.5 - P365_ATTRIBUTE_INDOOR_WEIGHT = 1.0 - P365_ATTRIBUTE_OUTDOOR_WEIGHT = 1.5 - - # 場景關鍵字定義,包含十字路口相關詞彙 - DEFINITELY_OUTDOOR_KEYWORDS_P365 = [ - "street", "road", "highway", "park", "beach", "mountain", "forest", "field", - "outdoor", "sky", "coast", "courtyard", "square", "plaza", "bridge", - "parking_lot", "playground", "stadium", "construction_site", "river", "ocean", - "desert", "garden", "trail", "intersection", "crosswalk", "sidewalk", "pathway", - "avenue", "boulevard", "downtown", "city_center", "market_outdoor" - ] - - DEFINITELY_INDOOR_KEYWORDS_P365 = [ - "bedroom", "office", "kitchen", "library", "classroom", "conference_room", "living_room", - "bathroom", "hospital", "hotel_room", "cabin", "interior", "museum", "gallery", - "mall", "market_indoor", "basement", "corridor", "lobby", "restaurant_indoor", - "bar_indoor", "shop_indoor", "gym_indoor" - ] - - # A. Influence from P365's direct indoor/outdoor classification (is_indoor_from_classification) - if p365_is_indoor_from_classification is not None and \ - p365_confidence >= P365_MODERATE_CONF_THRESHOLD: - - current_p365_direct_contrib = 0.0 - if p365_is_indoor_from_classification is True: - current_p365_direct_contrib = P365_DIRECT_INDOOR_WEIGHT * p365_confidence - diagnostics["p365_influence_source"] = f"P365_DirectIndoor(True,Conf:{p365_confidence:.2f},Scene:{p365_mapped_scene})" - else: # P365 says outdoor - current_p365_direct_contrib = -P365_DIRECT_OUTDOOR_WEIGHT * p365_confidence - diagnostics["p365_influence_source"] = f"P365_DirectIndoor(False,Conf:{p365_confidence:.2f},Scene:{p365_mapped_scene})" - - # Modulate P365's indoor push if strong VISUAL sky signal exists from LA - if strong_sky_signal_visual and current_p365_direct_contrib > 0: - sky_override_factor = self.config.get("sky_override_factor_p365_indoor_decision", 0.3) - current_p365_direct_contrib *= sky_override_factor - diagnostics["p365_indoor_push_reduced_by_visual_sky"] = f"Reduced to {current_p365_direct_contrib:.2f}" - - p365_influence_score += current_p365_direct_contrib - - # B. Influence from P365's mapped scene type context (修改:適用於所有信心度情況) - elif p365_confidence >= P365_MODERATE_CONF_THRESHOLD: - current_p365_context_contrib = 0.0 - is_def_indoor = any(kw in p365_mapped_scene for kw in DEFINITELY_INDOOR_KEYWORDS_P365) - is_def_outdoor = any(kw in p365_mapped_scene for kw in DEFINITELY_OUTDOOR_KEYWORDS_P365) - - if is_def_indoor and not is_def_outdoor: # Clearly an indoor scene type from P365 - current_p365_context_contrib = P365_SCENE_CONTEXT_INDOOR_WEIGHT * p365_confidence - diagnostics["p365_influence_source"] = f"P365_SceneContext(Indoor: {p365_mapped_scene}, Conf:{p365_confidence:.2f})" - elif is_def_outdoor and not is_def_indoor: # Clearly an outdoor scene type from P365 - current_p365_context_contrib = -P365_SCENE_CONTEXT_OUTDOOR_WEIGHT * p365_confidence - diagnostics["p365_influence_source"] = f"P365_SceneContext(Outdoor: {p365_mapped_scene}, Conf:{p365_confidence:.2f})" - - if strong_sky_signal_visual and current_p365_context_contrib > 0: - sky_override_factor = self.config.get("sky_override_factor_p365_indoor_decision", 0.3) - current_p365_context_contrib *= sky_override_factor - diagnostics["p365_context_indoor_push_reduced_by_visual_sky"] = f"Reduced to {current_p365_context_contrib:.2f}" - - p365_influence_score += current_p365_context_contrib - - # C. Influence from P365 attributes - if p365_attributes and p365_confidence > self.config.get("places365_attribute_confidence_thresh", 0.5): - attr_contrib = 0.0 - if "indoor" in p365_attributes and "outdoor" not in p365_attributes: # Prioritize "indoor" if both somehow appear - attr_contrib += P365_ATTRIBUTE_INDOOR_WEIGHT * (p365_confidence * 0.5) # Attributes usually less direct - diagnostics["p365_attr_influence"] = f"+{attr_contrib:.2f} (indoor attr)" - elif "outdoor" in p365_attributes and "indoor" not in p365_attributes: - attr_contrib -= P365_ATTRIBUTE_OUTDOOR_WEIGHT * (p365_confidence * 0.5) - diagnostics["p365_attr_influence"] = f"{attr_contrib:.2f} (outdoor attr)" - - if strong_sky_signal_visual and attr_contrib > 0: - attr_contrib *= self.config.get("sky_override_factor_p365_indoor_decision", 0.3) # Reduce if LA sees sky - - p365_influence_score += attr_contrib - - # 針對高信心度戶外場景的額外處理 - if p365_confidence >= 0.85 and any(kw in p365_mapped_scene for kw in ["intersection", "crosswalk", "street", "road"]): - # 當Places365強烈指示戶外街道場景時,額外增加戶外影響分數 - additional_outdoor_push = -3.0 * p365_confidence - p365_influence_score += additional_outdoor_push - diagnostics["p365_street_scene_boost"] = f"Additional outdoor push: {additional_outdoor_push:.2f} for street scene: {p365_mapped_scene}" - print(f"DEBUG: High confidence street scene detected - {p365_mapped_scene} with confidence {p365_confidence:.3f}") - - if abs(p365_influence_score) > 0.01: - feature_contributions["places365_influence_score"] = round(p365_influence_score, 2) - - final_indoor_score = visual_indoor_score + p365_influence_score - - diagnostics["final_indoor_score_value"] = round(final_indoor_score, 3) - diagnostics["final_score_breakdown"] = f"VisualScore: {visual_indoor_score:.2f}, P365Influence: {p365_influence_score:.2f}" - - # Step 3: Final probability and decision - sigmoid_scale = self.config.get("indoor_score_sigmoid_scale", 0.30) - indoor_probability = 1 / (1 + np.exp(-final_indoor_score * sigmoid_scale)) - decision_threshold = self.config.get("indoor_decision_threshold", 0.5) - is_indoor = indoor_probability > decision_threshold - - # Places365 高信心度強制覆蓋(在 sigmoid 計算之後才執行) - print(f"DEBUG_OVERRIDE: Pre-override -> is_indoor: {is_indoor} (type: {type(is_indoor)}), p365_conf: {p365_confidence}, p365_raw_is_indoor: {places365_info.get('is_indoor', 'N/A') if places365_info else 'N/A'}") - - # Places365 Model 信心大於0.5時候直接覆蓋結果 - if places365_info and p365_confidence >= 0.5: - p365_is_indoor_decision = places365_info.get('is_indoor', None) # 這應該是 Python bool (True, False) 或 None - - print(f"DEBUG_OVERRIDE: Override condition p365_conf >= 0.8 MET. p365_is_indoor_decision: {p365_is_indoor_decision} (type: {type(p365_is_indoor_decision)})") - - # 使用 '==' 進行比較以增加對 NumPy bool 型別的兼容性 - # 並且明確檢查 p365_is_indoor_decision 不是 None - if p365_is_indoor_decision == False and p365_is_indoor_decision is not None: - print(f"DEBUG_OVERRIDE: Path for p365_is_indoor_decision == False taken. Original is_indoor: {is_indoor}") - original_decision_str = f"Indoor:{is_indoor}, Prob:{indoor_probability:.3f}, Score:{final_indoor_score:.2f}" - - is_indoor = False - indoor_probability = 0.02 # 強制設定為極低的室內機率,基本上就是變成室外 - final_indoor_score = -8.0 # 強制設定為極低的室內分數 - feature_contributions["places365_influence_score"] = final_indoor_score # 更新貢獻分數 - - diagnostics["p365_force_override_applied"] = f"P365 FORCED OUTDOOR (is_indoor: {p365_is_indoor_decision}, Conf: {p365_confidence:.3f})" - diagnostics["p365_override_original_decision"] = original_decision_str - print(f"INFO: Places365 FORCED OUTDOOR override applied. New is_indoor: {is_indoor}") - - elif p365_is_indoor_decision == True and p365_is_indoor_decision is not None: - print(f"DEBUG_OVERRIDE: Path for p365_is_indoor_decision == True taken. Original is_indoor: {is_indoor}") - original_decision_str = f"Indoor:{is_indoor}, Prob:{indoor_probability:.3f}, Score:{final_indoor_score:.2f}" - - is_indoor = True - indoor_probability = 0.98 # 強制設定為極高的室內機率,基本上就是變成室內 - final_indoor_score = 8.0 # 強制設定為極高的室內分數 - feature_contributions["places365_influence_score"] = final_indoor_score # 更新貢獻分數 - - diagnostics["p365_force_override_applied"] = f"P365 FORCED INDOOR (is_indoor: {p365_is_indoor_decision}, Conf: {p365_confidence:.3f})" - diagnostics["p365_override_original_decision"] = original_decision_str - print(f"INFO: Places365 FORCED INDOOR override applied. New is_indoor: {is_indoor}") + # Validate basic image properties + if len(image_np.shape) < 2: + self.logger.error("Image must have at least 2 dimensions") + return None + + height, width = image_np.shape[:2] + if height == 0 or width == 0: + self.logger.error(f"Invalid image dimensions: {height}x{width}") + return None + + # Handle different color formats and convert to RGB + if len(image_np.shape) == 2: + # 灰階 to RGB + image_rgb = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB) + elif image_np.shape[2] == 3: + # Handle BGR vs RGB + if not isinstance(image, np.ndarray): + # PIL images are typically RGB + image_rgb = image_np + else: + # OpenCV arrays are typically BGR, convert to RGB + image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB) + elif image_np.shape[2] == 4: + # RGBA to RGB + if not isinstance(image, np.ndarray): + # PIL RGBA to RGB + image_rgb = cv2.cvtColor(image_np, cv2.COLOR_RGBA2RGB) + else: + # OpenCV BGRA to RGB + image_rgb = cv2.cvtColor(image_np, cv2.COLOR_BGRA2RGB) else: - print(f"DEBUG_OVERRIDE: No P365 True/False override. p365_is_indoor_decision was: {p365_is_indoor_decision}") + self.logger.error(f"Unsupported image format with shape: {image_np.shape}") + return None + + # Ensure uint8 data type + if image_rgb.dtype != np.uint8: + if image_rgb.dtype in [np.float32, np.float64]: + # Assume normalized float values + if image_rgb.max() <= 1.0: + image_rgb = (image_rgb * 255).astype(np.uint8) + else: + image_rgb = image_rgb.astype(np.uint8) + else: + image_rgb = image_rgb.astype(np.uint8) - # 確保 diagnostics 反映的是覆蓋後的 is_indoor 值 - diagnostics["final_indoor_probability_calculated"] = round(indoor_probability, 3) # 使用可能已被覆蓋的 indoor_probability - diagnostics["final_is_indoor_decision"] = bool(is_indoor) # 避免 np.True_ + self.logger.debug(f"Preprocessed image: {image_rgb.shape}, dtype: {image_rgb.dtype}") + return image_rgb - print(f"DEBUG_OVERRIDE: Returning from _analyze_indoor_outdoor -> is_indoor: {is_indoor} (type: {type(is_indoor)}), final_indoor_score: {final_indoor_score}, indoor_probability: {indoor_probability}") + except Exception as e: + self.logger.error(f"Error preprocessing image: {str(e)}") + return None - for key in ["sky_openness_features", "enclosure_features", "brightness_uniformity_contribution", "light_source_features"]: - if key not in feature_contributions: - feature_contributions[key] = 0.0 # Default to 0 if not specifically calculated by visual or P365 parts + def _consolidate_analysis_results(self, lighting_result: Dict[str, Any], + indoor_outdoor_result: Dict[str, Any], + features: Dict[str, Any]) -> Dict[str, Any]: + """ + Consolidate results from all analysis components into final output format. - return { - "is_indoor": is_indoor, - "indoor_probability": indoor_probability, - "indoor_score_raw": final_indoor_score, - "feature_contributions": feature_contributions, # Contains visual contributions and P365 influence - "diagnostics": diagnostics - } + Args: + lighting_result: Results from lighting condition analysis. + indoor_outdoor_result: Results from indoor/outdoor classification. + features: Extracted image features. - def _determine_lighting_conditions(self, features: Dict[str, Any], is_indoor: bool, places365_info: Optional[Dict] = None) -> Dict[str, Any]: - """ - Determines specific lighting conditions based on features, the (Places365-influenced) is_indoor status, - and Places365 scene context. + Returns: + Consolidated analysis results in the expected output format. """ - time_of_day = "unknown" - confidence = 0.5 # Base confidence for visual feature analysis - diagnostics = {} - - # Internal Thresholds and Definitions for this function - P365_ATTRIBUTE_CONF_THRESHOLD = 0.60 # Min P365 scene confidence to trust its attributes for lighting - P365_SCENE_MODERATE_CONF_THRESHOLD = 0.45 # Min P365 scene confidence for its type to influence lighting - P365_SCENE_HIGH_CONF_THRESHOLD = 0.70 # Min P365 scene confidence for strong influence - - # Keywords for P365 mapped scene types (lowercase) - P365_OUTDOOR_SCENE_KEYWORDS = [ - "street", "road", "highway", "park", "beach", "mountain", "forest", "field", - "outdoor", "sky", "coast", "courtyard", "square", "plaza", "bridge", - "parking", "playground", "stadium", "construction", "river", "ocean", "desert", "garden", "trail", - "natural_landmark", "airport_outdoor", "train_station_outdoor", "bus_station_outdoor", "intersection", "crosswalk", "sidewalk", "pathway" - ] - P365_INDOOR_RESTAURANT_KEYWORDS = ["restaurant", "bar", "cafe", "dining_room", "pub", "bistro", "eatery"] - - # Extract key info from places365_info - Initialize all variables first - p365_mapped_scene = "unknown" - p365_attributes = [] - p365_confidence = 0.0 - - if places365_info: - p365_mapped_scene = places365_info.get('mapped_scene_type', 'unknown').lower() - p365_attributes = [attr.lower() for attr in places365_info.get('attributes', [])] - p365_confidence = places365_info.get('confidence', 0.0) - diagnostics["p365_context_for_lighting"] = ( - f"P365 Scene: {p365_mapped_scene}, Attrs: {p365_attributes}, Conf: {p365_confidence:.2f}" - ) + # Extract core results + time_of_day = lighting_result["time_of_day"] + confidence = lighting_result["confidence"] + is_indoor = indoor_outdoor_result["is_indoor"] + indoor_probability = indoor_outdoor_result["indoor_probability"] + + # Organize brightness information + brightness_info = { + "average": float(features.get("avg_brightness", 0.0)), + "std_dev": float(features.get("brightness_std", 0.0)), + "dark_ratio": float(features.get("dark_pixel_ratio", 0.0)), + "bright_ratio": float(features.get("bright_pixel_ratio", 0.0)) + } - # Extract visual features (using .get with defaults for safety) - avg_brightness = features.get("avg_brightness", 128.0) - yellow_orange_ratio = features.get("yellow_orange_ratio", 0.0) - gray_ratio = features.get("gray_ratio", 0.0) - sky_like_blue_in_sky_region = features.get("sky_region_blue_dominance", 0.0) - sky_region_brightness_ratio = features.get("sky_region_brightness_ratio", 1.0) - sky_region_is_brighter = sky_region_brightness_ratio > 1.05 - top_texture_complexity_val = features.get("top_region_texture_complexity", 0.5) - bright_spots_overall = features.get("bright_spot_count", 0) - circular_lights = features.get("circular_light_count", 0) - is_likely_home_environment = features.get("home_environment_pattern", 0.0) > self.config.get("home_pattern_thresh_moderate", 1.0) * 0.7 - light_dist_uniformity = features.get("light_distribution_uniformity", 0.5) - - # Config thresholds - config_thresholds = self.config - - # Priority 1: Use Places365 Attributes if highly confident and consistent with `is_indoor` - determined_by_p365_attr = False - if p365_attributes and p365_confidence > P365_ATTRIBUTE_CONF_THRESHOLD: - if not is_indoor: # Apply outdoor attributes only if current `is_indoor` decision is False - if "sunny" in p365_attributes or "clear sky" in p365_attributes: - time_of_day = "day_clear" - confidence = 0.85 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.25 - diagnostics["reason"] = "P365 attribute: sunny/clear sky (Outdoor)." - determined_by_p365_attr = True - - elif ("nighttime" in p365_attributes or "night" in p365_attributes): - # Further refine based on lights if P365 confirms it's a typically lit outdoor night scene - if ("artificial lighting" in p365_attributes or "man-made lighting" in p365_attributes or \ - any(kw in p365_mapped_scene for kw in ["street", "city", "road", "urban", "downtown"])): - time_of_day = "night_with_lights" - confidence = 0.82 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.20 - diagnostics["reason"] = "P365 attribute: nighttime with artificial/street lights (Outdoor)." - - else: # General dark night - time_of_day = "night_dark" - confidence = 0.78 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.20 - diagnostics["reason"] = "P365 attribute: nighttime, dark (Outdoor)." - determined_by_p365_attr = True - - elif "cloudy" in p365_attributes or "overcast" in p365_attributes: - time_of_day = "day_cloudy_overcast" - confidence = 0.80 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.25 - diagnostics["reason"] = "P365 attribute: cloudy/overcast (Outdoor)." - determined_by_p365_attr = True - - elif is_indoor: # Apply indoor attributes only if current `is_indoor` decision is True - if "artificial lighting" in p365_attributes or "man-made lighting" in p365_attributes: - base_indoor_conf = 0.70 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.20 - if avg_brightness > config_thresholds.get("indoor_bright_thresh", 130): - time_of_day = "indoor_bright_artificial" - confidence = base_indoor_conf + 0.10 - - elif avg_brightness > config_thresholds.get("indoor_moderate_thresh", 95): - time_of_day = "indoor_moderate_artificial" - confidence = base_indoor_conf + # Organize color information + color_info = { + "blue_ratio": float(features.get("blue_ratio", 0.0)), + "sky_like_blue_ratio": float(features.get("sky_like_blue_ratio", 0.0)), + "yellow_orange_ratio": float(features.get("yellow_orange_ratio", 0.0)), + "gray_ratio": float(features.get("gray_ratio", 0.0)), + "avg_saturation": float(features.get("avg_saturation", 0.0)), + "sky_region_brightness_ratio": float(features.get("sky_region_brightness_ratio", 1.0)), + "sky_region_saturation": float(features.get("sky_region_saturation", 0.0)), + "sky_region_blue_dominance": float(features.get("sky_region_blue_dominance", 0.0)), + "color_atmosphere": features.get("color_atmosphere", "neutral"), + "warm_ratio": float(features.get("warm_ratio", 0.0)), + "cool_ratio": float(features.get("cool_ratio", 0.0)) + } - else: - time_of_day = "indoor_dim_artificial" # More specific than _general - confidence = base_indoor_conf - 0.05 - diagnostics["reason"] = f"P365 attribute: artificial lighting (Indoor), brightness based category: {time_of_day}." - determined_by_p365_attr = True - - elif "natural lighting" in p365_attributes and \ - (is_likely_home_environment or any(kw in p365_mapped_scene for kw in ["living_room", "bedroom", "sunroom"])): - time_of_day = "indoor_residential_natural" - confidence = 0.80 + (p365_confidence - P365_ATTRIBUTE_CONF_THRESHOLD) * 0.20 - diagnostics["reason"] = "P365 attribute: natural lighting in residential/applicable indoor scene." - determined_by_p365_attr = True - - # Step 2: If P365 attributes didn't make a high-confidence decision - # proceed with visual feature analysis, but now refined by P365 scene context. - if not determined_by_p365_attr or confidence < 0.75: # If P365 attributes didn't strongly decide - - # Store the initial P365-attribute based tod and conf if they existed - initial_tod_by_attr = time_of_day if determined_by_p365_attr else "unknown" - initial_conf_by_attr = confidence if determined_by_p365_attr else 0.5 - - # Reset for visual analysis, but keep P365 context in diagnostics - time_of_day = "unknown" - confidence = 0.5 # Base for visual - current_visual_reason = "" # For diagnostics from visual features - - if is_indoor: # `is_indoor` is already P365-influenced from _analyze_indoor_outdoor - natural_light_hints = 0 - if sky_like_blue_in_sky_region > 0.05 and sky_region_is_brighter: natural_light_hints += 1.0 - if features.get("brightness_uniformity", 0.0) > 0.65 and features.get("brightness_std", 100.0) < 70: natural_light_hints += 1.0 - if features.get("warm_ratio", 0.0) > 0.15 and avg_brightness > 110: natural_light_hints += 0.5 - - is_designer_lit_flag = (circular_lights > 0 or bright_spots_overall > 2) and \ - features.get("brightness_uniformity", 0.0) > 0.6 and \ - features.get("warm_ratio", 0.0) > 0.2 and \ - avg_brightness > 90 - - if avg_brightness > config_thresholds.get("indoor_bright_thresh", 130): - if natural_light_hints >= 1.5 and (is_likely_home_environment or any(kw in p365_mapped_scene for kw in ["home", "residential", "living", "bedroom"])): - time_of_day = "indoor_residential_natural" - confidence = 0.82 - current_visual_reason = "Visual: Bright residential, natural window light hints." - elif is_designer_lit_flag and (is_likely_home_environment or any(kw in p365_mapped_scene for kw in ["home", "designer", "modern_interior"])): - time_of_day = "indoor_designer_residential" - confidence = 0.85 - current_visual_reason = "Visual: Bright, designer-lit residential." - elif sky_like_blue_in_sky_region > 0.03 and sky_region_is_brighter: - time_of_day = "indoor_bright_natural_mix" - confidence = 0.78 - current_visual_reason = "Visual: Bright indoor, mixed natural/artificial (window)." - else: - time_of_day = "indoor_bright_artificial" - confidence = 0.75 - current_visual_reason = "Visual: High brightness, artificial indoor." - elif avg_brightness > config_thresholds.get("indoor_moderate_thresh", 95): - if is_designer_lit_flag and (is_likely_home_environment or any(kw in p365_mapped_scene for kw in ["home", "designer"])): - time_of_day = "indoor_designer_residential" - confidence = 0.78 - current_visual_reason = "Visual: Moderately bright, designer-lit residential." - elif features.get("warm_ratio", 0.0) > 0.35 and yellow_orange_ratio > 0.1: - - if any(kw in p365_mapped_scene for kw in P365_INDOOR_RESTAURANT_KEYWORDS) and \ - p365_confidence > P365_SCENE_MODERATE_CONF_THRESHOLD : - time_of_day = "indoor_restaurant_bar" - confidence = 0.80 + p365_confidence * 0.15 # Boost with P365 context - current_visual_reason = "Visual: Moderate warm tones. P365 context confirms restaurant/bar." - elif any(kw in p365_mapped_scene for kw in P365_OUTDOOR_SCENE_KEYWORDS) and \ - p365_confidence > P365_SCENE_MODERATE_CONF_THRESHOLD : - # This shouldn't happen if `is_indoor` was correctly set to False by P365 in `_analyze_indoor_outdoor` - # But as a fallback, if is_indoor=True but P365 scene context says strongly outdoor - time_of_day = "indoor_moderate_artificial" # Fallback to general indoor - confidence = 0.55 # Lower confidence due to strong conflict - current_visual_reason = "Visual: Moderate warm. CONFLICT: LA says indoor but P365 scene is outdoor. Defaulting to general indoor artificial." - diagnostics["conflict_is_indoor_vs_p365_scene_for_restaurant_bar"] = True - else: # P365 context is neutral, or not strongly conflicting restaurant/bar - time_of_day = "indoor_restaurant_bar" - confidence = 0.70 # Standard confidence without strong P365 confirmation - current_visual_reason = "Visual: Moderate warm tones, typical of restaurant/bar. P365 context neutral or weak." - else: - time_of_day = "indoor_moderate_artificial" - confidence = 0.70 - current_visual_reason = "Visual: Moderate brightness, standard artificial indoor." - else: # Dimmer indoor - if features.get("warm_ratio", 0.0) > 0.45 and yellow_orange_ratio > 0.15: - time_of_day = "indoor_dim_warm" - confidence = 0.75 - current_visual_reason = "Visual: Dim indoor with very warm tones." - else: - time_of_day = "indoor_dim_general" - confidence = 0.70 - current_visual_reason = "Visual: Low brightness indoor." - - # Refined commercial check (indoor) - if "residential" not in time_of_day and "restaurant" not in time_of_day and "bar" not in time_of_day and \ - not (any(kw in p365_mapped_scene for kw in P365_INDOOR_RESTAURANT_KEYWORDS)): # Avoid reclassifying if P365 already said restaurant/bar - if avg_brightness > config_thresholds.get("commercial_min_brightness_thresh", 105) and \ - bright_spots_overall > config_thresholds.get("commercial_min_spots_thresh", 3) and \ - (light_dist_uniformity > 0.5 or features.get("ceiling_likelihood",0) > 0.4): - if not (any(kw in p365_mapped_scene for kw in ["home", "residential"])): # Don't call commercial if P365 suggests home - time_of_day = "indoor_commercial" - confidence = 0.70 + min(0.2, bright_spots_overall * 0.02) - current_visual_reason = "Visual: Multiple/structured light sources in non-residential/restaurant setting." - - diagnostics["visual_analysis_reason"] = current_visual_reason - - else: # Outdoor (is_indoor is False, influenced by P365 in the previous step) - current_visual_reason = "" - - if (any(kw in p365_mapped_scene for kw in P365_OUTDOOR_SCENE_KEYWORDS) and any(kw in p365_mapped_scene for kw in ["street", "city", "road", "urban", "downtown", "intersection"])) and \ - p365_confidence > P365_SCENE_MODERATE_CONF_THRESHOLD and \ - features.get("color_atmosphere") == "warm" and \ - avg_brightness < config_thresholds.get("outdoor_dusk_dawn_thresh_brightness", 135): # Not bright daytime - - if avg_brightness < config_thresholds.get("outdoor_night_thresh_brightness", 85) and \ - bright_spots_overall > config_thresholds.get("outdoor_night_lights_thresh", 2): - time_of_day = "night_with_lights" - confidence = 0.88 + p365_confidence * 0.1 # High confidence - current_visual_reason = f"P365 outdoor scene '{p365_mapped_scene}' + visual low-warm light with spots -> night_with_lights." - elif avg_brightness >= config_thresholds.get("outdoor_night_thresh_brightness", 85) : # Dusk/Dawn range - time_of_day = "sunset_sunrise" - confidence = 0.88 + p365_confidence * 0.1 # High confidence - current_visual_reason = f"P365 outdoor scene '{p365_mapped_scene}' + visual moderate-warm light -> sunset/sunrise." - else: # Too dark for sunset, but not enough spots for "night_with_lights" based on pure visual - time_of_day = "night_dark" # Fallback if P365 indicates night but visual light spots are few - confidence = 0.75 + p365_confidence * 0.1 - current_visual_reason = f"P365 outdoor scene '{p365_mapped_scene}' + visual very low light -> night_dark." - - # Fallback to your original visual logic if P365 street context isn't strong enough for above - elif avg_brightness < config_thresholds.get("outdoor_night_thresh_brightness", 85): - if bright_spots_overall > config_thresholds.get("outdoor_night_lights_thresh", 2): - time_of_day = "night_with_lights" - confidence = 0.82 + min(0.13, features.get("dark_pixel_ratio", 0.0) / 2.5) - current_visual_reason = "Visual: Low brightness with light sources (street/car lights)." - else: - time_of_day = "night_dark" - confidence = 0.78 + min(0.17, features.get("dark_pixel_ratio", 0.0) / 1.8) - current_visual_reason = "Visual: Very low brightness outdoor, deep night." - - elif avg_brightness < config_thresholds.get("outdoor_dusk_dawn_thresh_brightness", 135) and \ - yellow_orange_ratio > config_thresholds.get("outdoor_dusk_dawn_color_thresh", 0.10) and \ - features.get("color_atmosphere") == "warm" and \ - sky_region_brightness_ratio < 1.5 : - time_of_day = "sunset_sunrise" - confidence = 0.75 + min(0.20, yellow_orange_ratio / 1.5) - current_visual_reason = "Visual: Moderate brightness, warm tones -> sunset/sunrise." - if any(kw in p365_mapped_scene for kw in ["beach", "mountain", "lake", "ocean", "desert", "field", "natural_landmark", "sky"]) and \ - p365_confidence > P365_SCENE_MODERATE_CONF_THRESHOLD: - confidence = min(0.95, confidence + 0.15) - current_visual_reason += f" P365 natural scene '{p365_mapped_scene}' supports." - - elif avg_brightness > config_thresholds.get("outdoor_day_bright_thresh", 140) and \ - (sky_like_blue_in_sky_region > config_thresholds.get("outdoor_day_blue_thresh", 0.05) or \ - (sky_region_is_brighter and top_texture_complexity_val < 0.4) ): - time_of_day = "day_clear" - confidence = 0.80 + min(0.15, sky_like_blue_in_sky_region * 2 + (sky_like_blue_in_sky_region*1.5 if sky_region_is_brighter else 0) ) # Corrected feature name - current_visual_reason = "Visual: High brightness with blue/sky tones or bright smooth top." - - elif avg_brightness > config_thresholds.get("outdoor_day_cloudy_thresh", 120): - if sky_region_is_brighter and top_texture_complexity_val < 0.45 and features.get("avg_saturation", 100) < 70: - time_of_day = "day_cloudy_overcast" - confidence = 0.75 + min(0.20, gray_ratio / 1.5 + (features.get("brightness_uniformity",0.0)-0.5)/1.5) - current_visual_reason = "Visual: Good brightness, uniform bright top, lower saturation -> overcast." - elif gray_ratio > config_thresholds.get("outdoor_day_gray_thresh", 0.18): - time_of_day = "day_cloudy_gray" - confidence = 0.72 + min(0.23, gray_ratio / 1.8) - current_visual_reason = "Visual: Good brightness with higher gray tones." - else: - time_of_day = "day_bright_general" - confidence = 0.68 - current_visual_reason = "Visual: Bright outdoor, specific type less clear." - else: # Fallback for outdoor - if features.get("color_atmosphere") == "warm" and yellow_orange_ratio > 0.08: - time_of_day = "sunset_sunrise_low_confidence" - confidence = 0.62 - elif sky_like_blue_in_sky_region > 0.02 or features.get("sky_region_blue_dominance",0) > 0.03 : - time_of_day = "day_hazy_or_partly_cloudy" - confidence = 0.62 - else: - time_of_day = "outdoor_unknown_daylight" - confidence = 0.58 - current_visual_reason = "Visual: Outdoor, specific conditions less clear; broader visual cues." - - # Visual check for stadium/floodlit (only if is_indoor is false) - if avg_brightness > 150 and \ - features.get("brightness_uniformity",0.0) > 0.70 and \ - bright_spots_overall > config_thresholds.get("stadium_min_spots_thresh", 6): - time_of_day = "stadium_or_floodlit_area" - confidence = 0.78 - current_visual_reason = "Visual: Very bright, uniform lighting with multiple sources, suggests floodlights (Outdoor)." - - diagnostics["visual_analysis_reason"] = current_visual_reason - - # If P365 attributes made a decision, and visual analysis refined it or provided a different one, - # we need to decide which one to trust or how to blend. - # If P365 attributes were strong (determined_by_p365_attr=True and initial_p365_confidence >=0.8), we stick with it. - # Otherwise, the visual analysis (now also P365 scene-context-aware) takes over. - if determined_by_p365_attr and initial_conf_by_attr >= 0.80 and initial_tod_by_attr != "unknown": - # time_of_day and confidence are already set from P365 attributes. - diagnostics["final_decision_source"] = "High-confidence P365 attribute." - else: - # If P365 attribute was not decisive, or visual analysis provided a different and - # potentially more nuanced result (especially if P365 scene context was used in visual path), - diagnostics["final_decision_source"] = "Visual features (potentially P365-context-refined)." - if initial_tod_by_attr != "unknown" and initial_tod_by_attr != time_of_day: - diagnostics["p365_attr_overridden_by_visual"] = f"P365 Attr ToD {initial_tod_by_attr} (Conf {initial_conf_by_attr:.2f}) was less certain or overridden by visual logic result {time_of_day} (Conf {confidence:.2f})." - - # Neon/Sodium Vapor Night (can apply to either indoor if bar-like, or outdoor street) - # This refinement can apply *after* the main decision. - is_current_night_or_dim_warm = "night" in time_of_day or time_of_day == "indoor_dim_warm" - - # Define these thresholds here if not in self.config or use self.config.get() - neon_yellow_orange_thresh = self.config.get("neon_yellow_orange_thresh", 0.12) - neon_bright_spots_thresh = self.config.get("neon_bright_spots_thresh", 4) - neon_avg_saturation_thresh = self.config.get("neon_avg_saturation_thresh", 60) - - if is_current_night_or_dim_warm and \ - yellow_orange_ratio > neon_yellow_orange_thresh and \ - bright_spots_overall > neon_bright_spots_thresh and \ - features.get("color_atmosphere") == "warm" and \ - features.get("avg_saturation",0) > neon_avg_saturation_thresh: - - old_time_of_day_for_neon_check = time_of_day - old_confidence_for_neon_check = confidence - - # Check P365 context for "neon" related scenes - is_p365_neon_context = any(kw in p365_mapped_scene for kw in ["neon", "nightclub", "bar_neon"]) or \ - "neon" in p365_attributes - - if is_indoor: - if is_p365_neon_context or any(kw in p365_mapped_scene for kw in P365_INDOOR_RESTAURANT_KEYWORDS): # e.g. bar with neon - time_of_day = "indoor_neon_lit" - confidence = max(confidence, 0.80) # Boost confidence if P365 supports - else: # Generic indoor dim warm with neon characteristics - time_of_day = "indoor_dim_warm_neon_accent" # A more nuanced category - confidence = max(confidence, 0.77) - else: # outdoor street neon - if is_p365_neon_context or any(kw in p365_mapped_scene for kw in ["street_night", "city_night", "downtown_night"]): - time_of_day = "neon_or_sodium_vapor_night" - confidence = max(confidence, 0.82) # Boost confidence - else: # Generic outdoor night with neon characteristics - time_of_day = "night_with_neon_lights" # A more nuanced category - confidence = max(confidence, 0.79) - - diagnostics["special_lighting_detected"] = ( - f"Refined from {old_time_of_day_for_neon_check} (Conf:{old_confidence_for_neon_check:.2f}) " - f"to {time_of_day} (Conf:{confidence:.2f}) due to neon/sodium vapor light characteristics. " - f"P365 Context: {p365_mapped_scene if is_p365_neon_context else 'N/A'}." - ) + # Organize texture information + texture_info = { + "gradient_ratio_vertical_horizontal": float(features.get("gradient_ratio_vertical_horizontal", 0.0)), + "top_region_texture_complexity": float(features.get("top_region_texture_complexity", 0.0)), + "shadow_clarity_score": float(features.get("shadow_clarity_score", 0.5)) + } - # Final confidence clamp - confidence = min(0.95, max(0.50, confidence)) - diagnostics["final_lighting_time_of_day"] = time_of_day - diagnostics["final_lighting_confidence"] = round(confidence,3) + # Organize structure information + structure_info = { + "ceiling_likelihood": float(features.get("ceiling_likelihood", 0.0)), + "boundary_clarity": float(features.get("boundary_clarity", 0.0)), + "openness_top_edge": float(features.get("openness_top_edge", 0.5)) + } - return { + # Compile final result + result = { "time_of_day": time_of_day, - "confidence": confidence, - "diagnostics": diagnostics + "confidence": float(confidence), + "is_indoor": is_indoor, + "indoor_probability": float(indoor_probability), + "brightness": brightness_info, + "color_info": color_info, + "texture_info": texture_info, + "structure_info": structure_info } - def _get_default_config(self) -> Dict[str, Any]: + # Add diagnostic information if enabled + if self.config_manager.algorithm_parameters.include_diagnostics: + diagnostics = {} + + # Combine diagnostics from all components + if "diagnostics" in lighting_result: + diagnostics["lighting_diagnostics"] = lighting_result["diagnostics"] + + if "diagnostics" in indoor_outdoor_result: + diagnostics["indoor_outdoor_diagnostics"] = indoor_outdoor_result["diagnostics"] + + if "feature_contributions" in indoor_outdoor_result: + diagnostics["feature_contributions"] = indoor_outdoor_result["feature_contributions"] + + result["diagnostics"] = diagnostics + + return result + + def _get_error_result(self, error_message: str) -> Dict[str, Any]: """ - Returns default configuration parameters, with adjustments for better balance. + Generate standardized error result format. + + Args: + error_message: Description of the error that occurred. + + Returns: + Dictionary containing error result with safe default values. """ return { - # Thresholds for feature calculation (from _compute_basic_features) - "dark_pixel_threshold": 50, - "bright_pixel_threshold": 220, - "sky_blue_hue_min": 95, - "sky_blue_hue_max": 135, - "sky_blue_sat_min": 40, - "sky_blue_val_min": 90, - "gray_sat_max": 70, - "gray_val_min": 60, - "gray_val_max": 220, - "light_source_abs_thresh": 220, # For old bright_spot_count compatibility if used - - "warm_hue_ranges": [(0, 50), (330, 360)], - "cool_hue_ranges": [(90, 270)], - - # Thresholds for _analyze_indoor_outdoor logic - "sky_blue_dominance_thresh": 0.18, - "sky_brightness_ratio_thresh": 1.25, - "openness_top_thresh": 0.68, - "sky_texture_complexity_thresh": 0.35, - "ceiling_likelihood_thresh": 0.4, - "boundary_clarity_thresh": 0.38, - "brightness_uniformity_thresh_indoor": 0.6, - "brightness_uniformity_thresh_outdoor": 0.40, - "many_bright_spots_thresh": 6, - "dim_scene_for_spots_thresh": 115, - "home_pattern_thresh_strong": 2.0, - "home_pattern_thresh_moderate": 1.0, - "warm_indoor_max_brightness_thresh": 135, - "aerial_top_dark_ratio_thresh": 0.9, - "aerial_top_complex_thresh": 0.60, - "aerial_min_avg_brightness_thresh": 65, - - # Factors to reduce indoor cues if strong sky signal - "sky_override_factor_ceiling": 0.1, - "sky_override_factor_boundary": 0.2, - "sky_override_factor_uniformity": 0.15, - "sky_override_factor_lights": 0.05, - - # Factor to reduce enclosure score if aerial street pattern detected - "aerial_enclosure_reduction_factor": 0.75, - - # Weights for _analyze_indoor_outdoor scoring (positive = indoor, negative = outdoor) - "indoor_outdoor_weights": { - # Sky/Openness (Negative values push towards outdoor) - "sky_blue_dominance_w": 3.5, - "sky_brightness_ratio_w": 3, - "openness_top_w": 2.8, - "sky_texture_w": 2, - - # Ceiling/Enclosure (Positive values push towards indoor) - "ceiling_likelihood_w": 1.5, - "boundary_clarity_w": 1.2, - - # Brightness - "brightness_uniformity_w": 0.6, - "brightness_non_uniformity_outdoor_w": 1.0, - "brightness_non_uniformity_indoor_penalty_w": 0.1, - - # Light Sources - "circular_lights_w": 1.2, - "indoor_light_score_w": 0.8, - "many_bright_spots_indoor_w": 0.3, - - # Color Atmosphere - "warm_atmosphere_indoor_w": 0.15, - - # Home Environment Pattern (structural cues for indoor) - "home_env_strong_w": 1.5, - "home_env_moderate_w": 0.7, - - # Aerial street pattern (negative pushes to outdoor) - "aerial_street_w": 2.5, - - "places365_outdoor_scene_w": 4.0, # Places365 明確判斷為室外場景時的強烈負(室外)權重 - "places365_indoor_scene_w": 3.0, # Places365 明確判斷為室內場景時的正面(室內)權重 - "places365_attribute_w": 1.5, - - "blue_ratio": 0.0, "gradient_ratio": 0.0, "bright_spots": 0.0, - "color_tone": 0.0, "sky_brightness": 0.0, "ceiling_features": 0.0, - "light_features": 0.0, "boundary_features": 0.0, - "street_features": 0.0, "building_features": 0.0, + "time_of_day": "unknown", + "confidence": 0.0, + "is_indoor": False, + "indoor_probability": 0.5, + "brightness": { + "average": 100.0, + "std_dev": 50.0, + "dark_ratio": 0.0, + "bright_ratio": 0.0 + }, + "color_info": { + "blue_ratio": 0.0, + "sky_like_blue_ratio": 0.0, + "yellow_orange_ratio": 0.0, + "gray_ratio": 0.0, + "avg_saturation": 100.0, + "sky_region_brightness_ratio": 1.0, + "sky_region_saturation": 0.0, + "sky_region_blue_dominance": 0.0, + "color_atmosphere": "neutral", + "warm_ratio": 0.0, + "cool_ratio": 0.0 + }, + "texture_info": { + "gradient_ratio_vertical_horizontal": 1.0, + "top_region_texture_complexity": 0.5, + "shadow_clarity_score": 0.5 + }, + "structure_info": { + "ceiling_likelihood": 0.0, + "boundary_clarity": 0.0, + "openness_top_edge": 0.5 }, - "indoor_score_sigmoid_scale": 0.3, - "indoor_decision_threshold": 0.5, - - # Places365 相關閾值 - "places365_high_confidence_thresh": 0.75, # Places365 判斷結果被視為高信心度的閾值 - "places365_moderate_confidence_thresh": 0.5, # Places365 中等信心度閾值 - "places365_attribute_confidence_thresh": 0.6, # Places365 屬性判斷的置信度閾值 - - "p365_outdoor_reduces_enclosure_factor": 0.3, # 如果P365認為是室外,圍合特徵的影響降低到30% - "p365_indoor_boosts_ceiling_factor": 1.5, - - # Thresholds for _determine_lighting_conditions (outdoor) - "outdoor_night_thresh_brightness": 80, - "outdoor_night_lights_thresh": 2, - "outdoor_dusk_dawn_thresh_brightness": 130, - "outdoor_dusk_dawn_color_thresh": 0.10, - "outdoor_day_bright_thresh": 140, - "outdoor_day_blue_thresh": 0.05, - "outdoor_day_cloudy_thresh": 120, - "outdoor_day_gray_thresh": 0.18, - - "include_diagnostics": True, - - "ceiling_likelihood_thresh_indoor": 0.38, - "sky_blue_dominance_strong_thresh": 0.35, - "sky_brightness_ratio_strong_thresh": 1.35, - "sky_texture_complexity_clear_thresh": 0.25, - "openness_top_strong_thresh": 0.80, - "sky_texture_complexity_cloudy_thresh": 0.20, - "sky_brightness_ratio_cloudy_thresh": 0.95, - - "ceiling_texture_thresh": 0.4, - "ceiling_brightness_min": 60, - "ceiling_brightness_max": 230, - "ceiling_horizontal_line_factor": 1.15, - "ceiling_center_bright_factor": 1.25, - "ceiling_max_sky_blue_thresh": 0.08, - "ceiling_max_sky_brightness_ratio": 1.15, - "ceiling_sky_override_factor": 0.1, - - "stadium_min_spots_thresh": 6 + "error": error_message } + + def get_configuration(self) -> Dict[str, Any]: + """ + Get current configuration as dictionary for backward compatibility. + + Returns: + Dictionary containing all current configuration parameters. + """ + return self.config_manager.get_legacy_config_dict() + + def update_configuration(self, config_updates: Dict[str, Any]) -> None: + """ + Update configuration parameters. + + Args: + config_updates: Dictionary containing configuration parameters to update. + """ + try: + self.config_manager._update_from_dict(config_updates) + # Update legacy config reference + self.config = self.config_manager.get_legacy_config_dict() + self.logger.info("Configuration updated successfully") + + except Exception as e: + self.logger.error(f"Error updating configuration: {str(e)}") + raise + + def validate_configuration(self) -> bool: + """ + Validate current configuration for logical consistency. + + Returns: + True if configuration is valid, False otherwise. + """ + try: + validation_errors = self.config_manager.validate_configuration() + + if validation_errors: + self.logger.error("Configuration validation failed:") + for error in validation_errors: + self.logger.error(f" - {error}") + return False + + self.logger.info("Configuration validation passed") + return True + + except Exception as e: + self.logger.error(f"Error during configuration validation: {str(e)}") + return False + + def save_configuration(self, filepath: str) -> None: + """ + Save current configuration to file. + + Args: + filepath: Path where to save the configuration file. + """ + try: + self.config_manager.save_to_file(filepath) + self.logger.info(f"Configuration saved to {filepath}") + + except Exception as e: + self.logger.error(f"Error saving configuration: {str(e)}") + raise + + def load_configuration(self, filepath: str) -> None: + """ + Load configuration from file. + + Args: + filepath: Path to the configuration file to load. + """ + try: + self.config_manager.load_from_file(filepath) + # Update legacy config reference + self.config = self.config_manager.get_legacy_config_dict() + self.logger.info(f"Configuration loaded from {filepath}") + + except Exception as e: + self.logger.error(f"Error loading configuration: {str(e)}") + raise