import pandas as pd import h5py import numpy as np # from scipy.stats import skew, kurtosis # No longer needed for features import umap import matplotlib.pyplot as plt import matplotlib.colors as mcolors import seaborn as sns import os import concurrent.futures from functools import partial # --- Configuration --- TRUNCATE_M_POINTS = 2048 # Default truncation length for raw signals SAMPLE_K_SEGMENTS = 200 # Default number of segments to sample per group for UMAP DEFAULT_NUM_DATASETS_TO_DISPLAY = 18 # Default number of datasets for plotting if selection is used # --- 1. Metadata Loading and Dataset Selection --- def load_and_select_metadata(excel_path, num_datasets_to_display=None, reports_dir="reports"): """ Loads metadata, extracts Dataset_Name, sorts by Id, and optionally selects a subset of datasets. Saves the selected metadata. """ try: metadata_df = pd.read_excel(excel_path) print(f"Successfully loaded metadata: {excel_path}") except FileNotFoundError: print(f"ERROR: Metadata file not found {excel_path}"); return None except Exception as e: print(f"ERROR loading metadata: {e}"); return None if 'Id' not in metadata_df.columns: print("ERROR: 'Id' column not found in metadata."); return None if 'Name' not in metadata_df.columns: print("WARNING: 'Name' column not found. Using 'Dataset_id' as 'Dataset_Name'.") metadata_df['Dataset_Name'] = metadata_df['Dataset_id'].astype(str) else: metadata_df['Dataset_Name'] = metadata_df['Name'].apply( lambda x: x.split('_')[-1] if isinstance(x, str) and '_' in x else str(x) ) metadata_df['Id'] = pd.to_numeric(metadata_df['Id'], errors='coerce') metadata_df.dropna(subset=['Id'], inplace=True) metadata_df['Id'] = metadata_df['Id'].astype(int) metadata_df.sort_values(by='Id', inplace=True) selected_metadata_df = metadata_df.copy() if num_datasets_to_display is not None and num_datasets_to_display > 0: # Simple selection: take datasets with the most entries, up to num_datasets_to_display # More sophisticated selection might be needed (e.g., specific names) top_datasets = selected_metadata_df['Dataset_Name'].value_counts().nlargest(num_datasets_to_display).index selected_metadata_df = selected_metadata_df[selected_metadata_df['Dataset_Name'].isin(top_datasets)] print(f"Selected top {len(top_datasets)} datasets for processing: {top_datasets.tolist()}") os.makedirs(reports_dir, exist_ok=True) selected_metadata_path = os.path.join(reports_dir, "selected_metadata.csv") selected_metadata_df.to_csv(selected_metadata_path, index=False) print(f"Selected metadata saved to: {selected_metadata_path}") return selected_metadata_df # --- 2. Raw Data Loading, Truncation, and Sampling --- def _process_single_raw_segment(h5_key_meta_tuple, h5_filepath, truncate_m_points): """ Loads a single raw data segment from HDF5, truncates/pads it, and returns with metadata. """ h5_key, meta_info = h5_key_meta_tuple try: with h5py.File(h5_filepath, 'r') as h5f_proc: raw_data = h5f_proc[h5_key][()] if not np.issubdtype(raw_data.dtype, np.number): return None raw_data = raw_data.flatten() if raw_data.size == 0: return None if raw_data.size > truncate_m_points: processed_data = raw_data[:truncate_m_points] elif raw_data.size < truncate_m_points: # Pad with zeros if shorter processed_data = np.pad(raw_data, (0, truncate_m_points - raw_data.size), 'constant') else: processed_data = raw_data return { 'Id': meta_info['Id'], 'H5_Key': h5_key, 'Dataset_id': meta_info.get('Dataset_id', 'N/A'), 'Dataset_Name': meta_info.get('Dataset_Name', 'N/A'), 'Label': meta_info.get('Label', 'N/A'), 'Domain_id': meta_info.get('Domain_id', 'N/A'), 'Fault_level': meta_info.get('Fault_level', 'N/A'), 'raw_segment': processed_data } except Exception as e: print(f" ERROR processing raw segment Id {meta_info.get('Id', 'Unknown')} (H5 key: {h5_key}): {e}") return None def load_process_sample_raw_data(h5_filepath, selected_metadata_df, cache_dir, truncate_m_points=TRUNCATE_M_POINTS, sample_k_segments=SAMPLE_K_SEGMENTS, max_workers=None): """ Loads raw data based on selected_metadata_df, truncates/pads, samples k segments per group, and prepares data for UMAP. Caches results. """ if selected_metadata_df is None or selected_metadata_df.empty: print("No metadata provided for raw data processing."); return None, None os.makedirs(cache_dir, exist_ok=True) raw_segments_matrix_cache_path = os.path.join(cache_dir, f"sampled_raw_segments_m{truncate_m_points}_k{sample_k_segments}.npy") raw_segments_labels_cache_path = os.path.join(cache_dir, f"sampled_raw_segments_labels_m{truncate_m_points}_k{sample_k_segments}.csv") if os.path.exists(raw_segments_matrix_cache_path) and os.path.exists(raw_segments_labels_cache_path): try: print("Loading sampled raw data from cache...") sampled_raw_segments_matrix = np.load(raw_segments_matrix_cache_path) sampled_labels_df = pd.read_csv(raw_segments_labels_cache_path) for col in ['Dataset_Name', 'Label', 'Domain_id', 'Fault_level', 'Id', 'H5_Key', 'Dataset_id']: if col in sampled_labels_df.columns: sampled_labels_df[col] = sampled_labels_df[col].astype(str) print("Successfully loaded sampled raw data from cache.") return sampled_raw_segments_matrix, sampled_labels_df except Exception as e: print(f"Error loading from cache: {e}. Re-processing.") if os.path.exists(raw_segments_matrix_cache_path): os.remove(raw_segments_matrix_cache_path) if os.path.exists(raw_segments_labels_cache_path): os.remove(raw_segments_labels_cache_path) print("\nProcessing raw data: Loading, Truncating/Padding...") all_processed_segments_info = [] selected_metadata_df['Id_str'] = selected_metadata_df['Id'].astype(str) items_to_process_raw = [] h5_keys_not_found_in_h5 = [] try: with h5py.File(h5_filepath, 'r') as h5f: h5_keys_in_file = set(h5f.keys()) for _, meta_row in selected_metadata_df.iterrows(): h5_key_candidate = meta_row['Id_str'] if h5_key_candidate in h5_keys_in_file: items_to_process_raw.append((h5_key_candidate, meta_row.to_dict())) else: h5_keys_not_found_in_h5.append(h5_key_candidate) if h5_keys_not_found_in_h5: print(f"WARNING: {len(h5_keys_not_found_in_h5)} Ids from metadata not found as keys in HDF5: {h5_keys_not_found_in_h5[:5]}...") except FileNotFoundError: print(f"ERROR: HDF5 file {h5_filepath} not found."); return None, None except Exception as e: print(f"ERROR opening HDF5 file {h5_filepath}: {e}"); return None, None if not items_to_process_raw: print("No items to process from HDF5 based on selected metadata."); return None, None with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor: task_fn = partial(_process_single_raw_segment, h5_filepath=h5_filepath, truncate_m_points=truncate_m_points) results = list(executor.map(task_fn, items_to_process_raw)) all_processed_segments_info = [res for res in results if res is not None] if not all_processed_segments_info: print("No valid raw segments processed."); return None, None full_segments_df = pd.DataFrame(all_processed_segments_info) full_segments_df['Id'] = pd.to_numeric(full_segments_df['Id']) # For sorting full_segments_df.sort_values(by='Id', inplace=True) print(f"\nSampling {sample_k_segments} segments per (Dataset_Name, Domain_id, Label) group...") grouping_cols_sample = [col for col in ['Dataset_Name', 'Domain_id', 'Label'] if col in full_segments_df.columns] if not grouping_cols_sample: print("WARNING: Not enough columns for grouped sampling. Using all processed segments.") sampled_df = full_segments_df else: def sample_group_k(group): return group.sample(n=min(len(group), sample_k_segments), random_state=1) # Consistent sampling sampled_df = full_segments_df.groupby(grouping_cols_sample, group_keys=False).apply(sample_group_k) sampled_df.sort_values(by='Id', inplace=True) # Ensure final sort by Id print(f"Sampling complete. Selected {len(sampled_df)} segments for UMAP.") if sampled_df.empty: print("No segments remaining after sampling."); return None, None sampled_raw_segments_matrix = np.array(sampled_df['raw_segment'].tolist()) sampled_labels_df = sampled_df.drop(columns=['raw_segment']) # Cache the sampled data try: np.save(raw_segments_matrix_cache_path, sampled_raw_segments_matrix) # Ensure string types for labels before saving for col in ['Dataset_Name', 'Label', 'Domain_id', 'Fault_level', 'Id', 'H5_Key', 'Dataset_id']: if col in sampled_labels_df.columns: sampled_labels_df[col] = sampled_labels_df[col].astype(str) sampled_labels_df.to_csv(raw_segments_labels_cache_path, index=False) print("Sampled raw data cached successfully.") except Exception as e: print(f"Error caching sampled raw data: {e}") return sampled_raw_segments_matrix, sampled_labels_df # --- 3. UMAP Processing --- def run_umap_reduction(data_matrix, cache_dir, umap_params, filename_prefix="umap_embedding"): """Runs UMAP and caches the embedding.""" n_neighbors = umap_params.get('n_neighbors', 15) min_dist = umap_params.get('min_dist', 0.1) n_components = umap_params.get('n_components', 2) metric = umap_params.get('metric', 'euclidean') # Common for raw signals random_state = umap_params.get('random_state', 42) embedding_cache_path = os.path.join(cache_dir, f"{filename_prefix}_nn{n_neighbors}_md{min_dist}_c{n_components}_{metric}.npy") if os.path.exists(embedding_cache_path): try: print("Loading UMAP embedding from cache...") embedding = np.load(embedding_cache_path) print("Successfully loaded UMAP embedding from cache.") return embedding except Exception as e: print(f"Error loading UMAP embedding from cache: {e}. Re-calculating.") if os.path.exists(embedding_cache_path): os.remove(embedding_cache_path) print(f"\nRunning UMAP (n_neighbors={n_neighbors}, min_dist={min_dist}, metric={metric})...") if data_matrix.shape[0] <= n_neighbors: print(f"WARNING: Number of samples ({data_matrix.shape[0]}) is less than or equal to n_neighbors ({n_neighbors}). Adjusting n_neighbors.") n_neighbors = max(1, data_matrix.shape[0] -1) # UMAP needs n_neighbors < n_samples if n_neighbors == 0 and data_matrix.shape[0] == 1: # Cannot run UMAP on 1 sample print("ERROR: Cannot run UMAP on a single sample.") return None reducer = umap.UMAP( n_neighbors=n_neighbors, min_dist=min_dist, n_components=n_components, metric=metric, random_state=random_state, verbose=True ) try: embedding = reducer.fit_transform(data_matrix) print("UMAP reduction complete.") np.save(embedding_cache_path, embedding) print(f"UMAP embedding cached: {embedding_cache_path}") return embedding except Exception as e: print(f"ERROR during UMAP: {e}"); return None # --- 4. Plotting --- def save_plot_multiformat(fig, output_dir, filename_base, high_dpi=300): """Saves the given figure in multiple formats.""" formats = {"png": {"dpi": high_dpi}, "svg": {}, "pdf": {}} for ext, options in formats.items(): try: save_path = os.path.join(output_dir, f"{filename_base}.{ext}") fig.savefig(save_path, **options, bbox_inches='tight') print(f"Plot saved: {save_path}") except Exception as e: print(f"ERROR saving plot {save_path}: {e}") def plot_umap_global(umap_df, hue_col, output_dir, filename_base, title_suffix): """Plots the global UMAP, colored by hue_col, and returns the palette.""" print(f"\nPlotting Global UMAP colored by {hue_col}...") if umap_df is None or umap_df.empty: print("No data for global UMAP plot."); return None unique_hues = sorted(umap_df[hue_col].unique()) palette = sns.color_palette("husl", n_colors=len(unique_hues)) # husl is good for many distinct colors # palette = sns.color_palette("viridis", n_colors=len(unique_hues)) color_map = dict(zip(unique_hues, palette)) fig, ax = plt.subplots(figsize=(16, 12)) sns.set_style("whitegrid") sns.scatterplot( x='UMAP1', y='UMAP2', hue=hue_col, hue_order=unique_hues, palette=color_map, data=umap_df, s=50, alpha=0.7, ax=ax, legend="auto" if len(unique_hues) <= 20 else False ) ax.set_title(f'Global UMAP: {title_suffix}', fontsize=18) ax.set_xlabel('UMAP Component 1', fontsize=14) ax.set_ylabel('UMAP Component 2', fontsize=14) if len(unique_hues) > 1 and len(unique_hues) <= 20: ax.legend(title=hue_col, bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0.) save_plot_multiformat(fig, output_dir, filename_base) plt.show() plt.close(fig) return color_map def plot_umap_faceted_by_dataset(umap_df, facet_col, color_col, style_col, dataset_color_map, output_dir, filename_base_prefix, title_prefix): """ Plots UMAP data faceted by facet_col (e.g., Dataset_Name). Markers are colored by color_col (e.g., Domain_id) and styled by style_col (e.g., Label). Subplot frames are colored based on dataset_color_map. """ print(f"\nPlotting Faceted UMAP: Facet by {facet_col}, Color by {color_col}, Style by {style_col}...") if umap_df is None or umap_df.empty: print(f"No data for faceted UMAP plot ({title_prefix})."); return if not all(c in umap_df.columns for c in [facet_col, color_col, style_col]): print(f"ERROR: Required columns '{facet_col}', '{color_col}', or '{style_col}' not in DataFrame for faceted plot."); return unique_facets = sorted(umap_df[facet_col].unique()) if not unique_facets: print("No unique facets found."); return # Define consistent color and style mappings across all facets all_unique_colors = sorted(umap_df[color_col].astype(str).unique()) color_palette_for_plot = sns.color_palette("Set2", n_colors=len(all_unique_colors)) all_unique_styles = sorted(umap_df[style_col].astype(str).unique()) # Define a list of markers - MODIFIED TO USE ONLY FILLED MARKERS or compatible ones available_markers = ['o', 's', 'D', '^', 'v', '<', '>', 'p', '*', 'h', 'H'] # Removed '+', 'X', 'P', '.' which can cause issues # 'X' and 'P' are often filled but can be treated differently # 'p' is pentagon, '*' is star, 'h'/'H' are hexagons if len(all_unique_styles) > len(available_markers): print(f"WARNING: More unique styles ({len(all_unique_styles)}) than available distinct markers ({len(available_markers)}). Markers will repeat.") markers_for_plot = {style_val: available_markers[j % len(available_markers)] for j, style_val in enumerate(all_unique_styles)} n_facets = len(unique_facets) n_cols = min(3, n_facets) n_rows = (n_facets + n_cols - 1) // n_cols fig, axes = plt.subplots(n_rows, n_cols, figsize=(6 * n_cols, 5 * n_rows), squeeze=False, sharex=True, sharey=True) axes_flat = axes.flatten() sns.set_style("whitegrid") for i, facet_value in enumerate(unique_facets): if i >= len(axes_flat): break ax = axes_flat[i] facet_data = umap_df[umap_df[facet_col] == facet_value].copy() facet_data[color_col] = facet_data[color_col].astype(str) facet_data[style_col] = facet_data[style_col].astype(str) if facet_data.empty: ax.text(0.5, 0.5, "No data", ha='center', va='center', transform=ax.transAxes) ax.set_title(f"{facet_value}", fontsize=12, color='grey') ax.tick_params(labelbottom=False, labelleft=False) continue # Ensure style_order matches the keys in markers_for_plot for consistency current_style_order = [s for s in all_unique_styles if s in facet_data[style_col].unique()] sns.scatterplot( x='UMAP1', y='UMAP2', hue=color_col, hue_order=all_unique_colors, palette=color_palette_for_plot, style=style_col, style_order=current_style_order, # Use the order of styles present in the current facet_data markers=markers_for_plot, data=facet_data, s=70, alpha=0.85, ax=ax, # Increased marker size slightly legend='auto' ) ax.set_title(f"{facet_value}", fontsize=14, fontweight='bold', color=dataset_color_map.get(facet_value, 'black')) if n_rows > 1 and i < (n_rows - 1) * n_cols : ax.set_xlabel('') ax.tick_params(labelbottom=False) if n_cols > 1 and i % n_cols != 0: ax.set_ylabel('') ax.tick_params(labelleft=False) spine_color = dataset_color_map.get(facet_value, 'grey') for spine in ax.spines.values(): spine.set_edgecolor(spine_color) spine.set_linewidth(2.5) try: handles, labels = ax.get_legend_handles_labels() if handles: legend_title = f"{color_col} (Color)\n{style_col} (Marker)" # Filter legend items to only those present in the current facet_data # This is a bit more complex as seaborn might already do this. # The primary goal is to ensure the legend isn't overly crowded. num_legend_items = len(handles) if num_legend_items > 10: ax.legend(title=legend_title, fontsize='xx-small', loc='best', ncol=2 if num_legend_items > 5 else 1) if num_legend_items > 20: ax.legend().set_visible(False) elif num_legend_items > 0 : # Only show legend if there are items ax.legend(title=legend_title, fontsize='x-small', loc='best') else: # No items, hide legend explicitly if ax.get_legend() is not None: ax.get_legend().set_visible(False) except AttributeError: pass for j in range(i + 1, len(axes_flat)): fig.delaxes(axes_flat[j]) fig.suptitle(f"{title_prefix}\n(Faceted by {facet_col})", fontsize=20, y=0.99) if n_rows > 0 and n_cols > 0 : fig.supxlabel("UMAP Component 1", fontsize=16, y=0.01 if n_rows >1 else -0.02) fig.supylabel("UMAP Component 2", fontsize=16, x=0.01 if n_cols >1 else -0.02) plt.tight_layout(rect=[0.03, 0.03, 0.97, 0.96]) save_plot_multiformat(fig, output_dir, f"{filename_base_prefix}_facet_{facet_col}_color_{color_col}_style_{style_col}") plt.show() plt.close(fig) # --- 5. Main Orchestration --- def main(): # --- File Paths & Directories --- excel_filepath = "/home/user/data/PHMbenchdata/PHM-Vibench/metadata_6_1.xlsx" # Ensure this is correct h5_filepath = "/home/user/data/PHMbenchdata/PHM-Vibench/metadata_6_1.h5" # Ensure this is correct base_output_dir = f"analysis_raw_m{TRUNCATE_M_POINTS}_k{SAMPLE_K_SEGMENTS}" cache_dir = os.path.join(base_output_dir, "00_cache") reports_dir = os.path.join(base_output_dir, "01_reports") umap_plots_dir = os.path.join(base_output_dir, "02_umap_plots") for d in [cache_dir, reports_dir, umap_plots_dir]: os.makedirs(d, exist_ok=True) # --- Parameters --- num_datasets_to_plot = DEFAULT_NUM_DATASETS_TO_DISPLAY # Or None for all, or specific list max_parallel_workers = os.cpu_count() // 2 if os.cpu_count() and os.cpu_count() > 1 else 1 umap_parameters = { 'n_neighbors': 200, # Might need tuning for raw signals 'min_dist': 0.1, 'n_components': 2, 'metric': 'correlation', # 'correlation' or 'dtw' could be alternatives for time series 'random_state': 42 } # --- Step 1: Load and Select Metadata --- # --- Step 1: Load and Select Metadata --- print("--- Step 1: Loading and Selecting Metadata ---") selected_metadata = load_and_select_metadata(excel_filepath, num_datasets_to_display=num_datasets_to_plot, reports_dir=reports_dir) if selected_metadata is None: print("Analysis halted due to metadata loading issues."); return # --- Step 2: Load, Process, and Sample Raw Data --- print("\n--- Step 2: Loading, Processing, and Sampling Raw Data ---") sampled_raw_matrix, sampled_labels = load_process_sample_raw_data( h5_filepath, selected_metadata, cache_dir, truncate_m_points=TRUNCATE_M_POINTS, sample_k_segments=SAMPLE_K_SEGMENTS, max_workers=max_parallel_workers ) if sampled_raw_matrix is None or sampled_labels is None or sampled_raw_matrix.shape[0] == 0: print("Analysis halted: No usable raw data segments after processing/sampling."); return print(f"Shape of sampled raw data matrix for UMAP: {sampled_raw_matrix.shape}") print(f"Number of labels for UMAP: {len(sampled_labels)}") # --- Step 3: Run UMAP --- print("\n--- Step 3: Running UMAP ---") umap_embedding = run_umap_reduction(sampled_raw_matrix, cache_dir, umap_parameters, filename_prefix=f"raw_m{TRUNCATE_M_POINTS}_k{SAMPLE_K_SEGMENTS}") if umap_embedding is None: print("Analysis halted: UMAP embedding failed."); return # Create DataFrame for plotting umap_plot_df = sampled_labels.copy() umap_plot_df['UMAP1'] = umap_embedding[:, 0] umap_plot_df['UMAP2'] = umap_embedding[:, 1] # Save UMAP results with labels umap_results_path = os.path.join(reports_dir, f"umap_results_m{TRUNCATE_M_POINTS}_k{SAMPLE_K_SEGMENTS}.csv") umap_plot_df.to_csv(umap_results_path, index=False) print(f"UMAP results with labels saved to: {umap_results_path}") # --- Step 4: Plotting --- print("\n--- Step 4: Generating UMAP Plots ---") # Global plot by Dataset_Name dataset_color_palette = plot_umap_global( umap_plot_df, hue_col='Dataset_Name', output_dir=umap_plots_dir, filename_base="global_umap_by_dataset_name", title_suffix="Colored by Dataset Name" ) if dataset_color_palette is None: dataset_color_palette = {} # Fallback # Combined Faceted plot: # Facet by Dataset_Name, Color by Domain_id, Marker Style by Label plot_umap_faceted_by_dataset( umap_plot_df, facet_col='Dataset_Name', color_col='Domain_id', # Color by Domain_id style_col='Label', # Marker style by Label dataset_color_map=dataset_color_palette, # For subplot frame color output_dir=umap_plots_dir, filename_base_prefix="faceted_umap_combined", # Updated filename prefix title_prefix="UMAP: Domain ID (Color) & Label (Marker)" # Updated title ) print("\n--- Analysis Complete ---") if __name__ == '__main__': main()