mustafa2ak commited on
Commit
a0358cb
·
verified ·
1 Parent(s): 3ca9a8a

Update debug_wrapper.py

Browse files
Files changed (1) hide show
  1. debug_wrapper.py +249 -174
debug_wrapper.py CHANGED
@@ -1,11 +1,17 @@
1
- # debug_wrapper.py
 
 
 
 
2
  import os
3
  import time
4
  import json
5
  import numpy as np
 
6
  from collections import deque, Counter
7
  from pathlib import Path
8
- from typing import Any
 
9
 
10
  # Disable Gradio analytics (avoids pandas OptionError bug)
11
  try:
@@ -14,207 +20,276 @@ try:
14
  except Exception:
15
  pass
16
 
17
- # Create debug dir
18
  DEBUG_DIR = Path("debug_samples")
19
  DEBUG_DIR.mkdir(exist_ok=True)
20
 
21
- def _save_image_crop(img_bgr, prefix, idx):
22
- try:
23
- import cv2
24
- fname = DEBUG_DIR / f"{prefix}_{idx:04d}.jpg"
25
- cv2.imwrite(str(fname), img_bgr)
26
- return str(fname)
27
- except Exception:
28
- return None
29
-
30
- def add_debugging(app: Any, keep_last_logs: int = 200):
31
- """
32
- Monkey-patch an existing DogMonitoringApp instance to collect
33
- detailed debugging info without editing original modules.
34
- """
35
- # state containers
36
- app._debug = {}
37
- app._debug['start_time'] = time.time()
38
- app._debug['frame'] = 0
39
- app._debug['per_frame'] = [] # list of dicts
40
- app._debug['counters'] = Counter()
41
- app._debug['similarities'] = deque(maxlen=2000)
42
- app._debug['similarity_hist_bins'] = np.linspace(0.0, 1.0, 21).tolist()
43
- app._debug['recent_logs'] = deque(maxlen=keep_last_logs)
44
- app._debug['bad_samples'] = [] # saved filenames for inspection
45
- app._debug['last_summary'] = ""
46
- app._debug['match_records'] = [] # store last N (score, dog_id, track_id)
47
-
48
- def _log(msg: str, show_time: bool = True):
49
  ts = time.strftime("%H:%M:%S")
50
- entry = f"[{ts}] {msg}" if show_time else msg
51
  print(entry, flush=True)
52
- app._debug['recent_logs'].append(entry)
53
- app._debug['last_summary'] = entry
54
-
55
- def get_debug_output(lines: int = 10) -> str:
56
- """Return recent debug log lines for UI display"""
57
- return "\n".join(list(app._debug['recent_logs'])[-lines:])
58
-
59
- def get_debug_summary() -> str:
60
- """Return a compact summary (counts, rates, avg similarity)"""
61
- c = app._debug['counters']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  total_matches = c['reid_matches']
63
  total_attempts = c['reid_attempts'] or 1
64
  match_rate = total_matches / total_attempts
65
- avg_sim = np.mean(app._debug['similarities']) if len(app._debug['similarities']) > 0 else 0.0
66
- elapsed = time.time() - app._debug['start_time']
67
- s = (
68
- f"Frames processed: {app._debug['frame']}\n"
69
- f"Detections total: {c['detections']} ; Tracks created: {c['tracks_created']} ; Tracks confirmed: {c['tracks_confirmed']}\n"
70
- f"ReID attempts: {total_attempts} ; Matches: {total_matches} ; Match rate: {match_rate:.2%}\n"
71
- f"Avg similarity (last {len(app._debug['similarities'])}): {avg_sim:.3f}\n"
72
- f"Feature extraction fails: {c['feature_fail']}\n"
73
- f"Saved bad samples: {len(app._debug['bad_samples'])}\n"
74
- f"Elapsed: {elapsed:.1f}s"
75
- )
76
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
- # --- Monkey-patch detector.detect ---
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  if hasattr(app, "detector") and hasattr(app.detector, "detect"):
80
  original_detect = app.detector.detect
81
-
82
  def wrapped_detect(frame, *args, **kwargs):
83
- detections = original_detect(frame, *args, **kwargs)
84
- n = len(detections) if detections is not None else 0
85
- app._debug['counters']['detections'] += n
86
- return detections
87
-
 
 
 
 
 
 
88
  app.detector.detect = wrapped_detect
89
- _log("Injected debug wrapper for detector.detect()")
90
-
91
- # --- Monkey-patch tracker.update ---
92
  if hasattr(app, "tracker") and hasattr(app.tracker, "update"):
93
  original_tracker_update = app.tracker.update
94
-
95
  def wrapped_tracker_update(detections, *args, **kwargs):
96
- before_ids = set([t.track_id for t in getattr(app.tracker, "tracks", [])])
97
- tracks = original_tracker_update(detections, *args, **kwargs)
98
- after_ids = set([t.track_id for t in getattr(app.tracker, "tracks", [])])
99
- new_ids = after_ids - before_ids
100
- created = len(new_ids)
101
- confirmed = len(tracks)
102
- app._debug['counters']['tracks_created'] += created
103
- app._debug['counters']['tracks_confirmed'] += confirmed
104
- return tracks
105
-
 
 
 
 
 
 
 
 
106
  app.tracker.update = wrapped_tracker_update
107
- _log("Injected debug wrapper for tracker.update()")
108
-
109
- # --- Monkey-patch reid.match_or_register ---
110
  if hasattr(app, "reid") and hasattr(app.reid, "match_or_register"):
111
  original_match = app.reid.match_or_register
112
-
113
  def wrapped_match(track, *args, **kwargs):
114
- app._debug['counters']['reid_attempts'] += 1
115
  try:
116
  dog_id, confidence = original_match(track, *args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  except Exception as e:
118
- app._debug['counters']['feature_fail'] += 1
119
- _log(f"ReID exception: {e}")
120
  return 0, 0.0
121
-
122
- sim = float(confidence or 0.0)
123
- app._debug['similarities'].append(sim)
124
- app._debug['match_records'].append({
125
- 'sim': sim,
126
- 'dog_id': int(dog_id or 0),
127
- 'track_id': getattr(track, 'track_id', None),
128
- 'ts': time.time()
129
- })
130
-
131
- # Save bad crop if similarity suspiciously low
132
- try:
133
- latest_crop = None
134
- for d in reversed(track.detections):
135
- if d.image_crop is not None:
136
- latest_crop = d.image_crop
137
- break
138
- if latest_crop is not None:
139
- if sim < (getattr(app, 'reid_threshold', 0.7) - 0.15):
140
- idx = len(app._debug['bad_samples']) + 1
141
- saved = _save_image_crop(latest_crop, "bad_sample", idx)
142
- if saved:
143
- app._debug['bad_samples'].append({
144
- 'file': saved,
145
- 'sim': sim,
146
- 'dog_id': dog_id,
147
- 'track_id': getattr(track, 'track_id', None)
148
- })
149
- _log(f"Saved bad sample #{idx} sim={sim:.3f} dog_id={dog_id} track={getattr(track,'track_id',None)}")
150
- except Exception as e:
151
- _log(f"Error saving sample: {e}")
152
-
153
- # Count match vs miss
154
- if sim >= getattr(app.reid, 'similarity_threshold', getattr(app, 'reid_threshold', 0.7)):
155
- app._debug['counters']['reid_matches'] += 1
156
- else:
157
- app._debug['counters']['reid_misses'] += 1
158
-
159
- return int(dog_id), sim
160
-
161
  app.reid.match_or_register = wrapped_match
162
- _log("Injected debug wrapper for reid.match_or_register()")
163
-
164
- # --- Wrap app.process_video ---
165
  if hasattr(app, "process_video"):
166
  original_process_video = app.process_video
167
-
168
  def wrapped_process_video(video_path: str, progress=None):
169
- # Ensure progress is callable
170
- if progress is None:
171
- def progress_stub(val, desc=None):
172
- return None
173
- progress = progress_stub
174
-
175
  # Reset debug state
176
- app._debug['start_time'] = time.time()
177
- app._debug['frame'] = 0
178
- app._debug['per_frame'].clear()
179
- app._debug['counters'] = Counter()
180
- app._debug['similarities'].clear()
181
- app._debug['bad_samples'].clear()
182
- app._debug['match_records'].clear()
183
- _log(f"DEBUG: Starting wrapped process_video for {video_path}")
184
-
185
- result = original_process_video(video_path, progress)
186
-
187
- # Save summary
188
- summary = {
189
- 'frames': app._debug['frame'],
190
- 'counters': dict(app._debug['counters']),
191
- 'avg_similarity': float(np.mean(app._debug['similarities']) if len(app._debug['similarities'])>0 else 0.0),
192
- 'bad_samples': app._debug['bad_samples'][:20],
193
- 'match_records_last': app._debug['match_records'][-20:]
194
- }
195
  try:
196
- with open("debug_summary.json", "w") as f:
197
- json.dump(summary, f, indent=2)
198
- _log("Saved debug_summary.json")
 
 
 
 
 
199
  except Exception as e:
200
- _log(f"Failed to save debug summary: {e}")
201
- _log("DEBUG: process_video finished")
202
- return result
203
-
204
  app.process_video = wrapped_process_video
205
- _log("Injected debug wrapper for app.process_video()")
206
-
207
- # Expose tools
208
- app.get_debug_output = get_debug_output
209
- app.get_debug_summary = get_debug_summary
210
- app._log_debug = _log
211
-
212
- def print_recent_matches(n=10):
213
- recs = list(app._debug['match_records'])[-n:]
214
- for r in recs:
215
- _log(json.dumps(r, default=str), show_time=False)
216
-
217
- app.print_recent_matches = print_recent_matches
218
-
219
- _log("Debug wrapper installed. Use app.get_debug_output(), app.get_debug_summary(), and inspect debug_samples/ for crops.")
220
- return app
 
1
+ """
2
+ debug_wrapper.py - Debug Monitoring Module
3
+ Injects debugging capabilities without modifying core modules
4
+ """
5
+
6
  import os
7
  import time
8
  import json
9
  import numpy as np
10
+ import cv2
11
  from collections import deque, Counter
12
  from pathlib import Path
13
+ from typing import Any, Dict, List, Optional
14
+ import traceback
15
 
16
  # Disable Gradio analytics (avoids pandas OptionError bug)
17
  try:
 
20
  except Exception:
21
  pass
22
 
23
+ # Create debug directory
24
  DEBUG_DIR = Path("debug_samples")
25
  DEBUG_DIR.mkdir(exist_ok=True)
26
 
27
+ class DebugMonitor:
28
+ """Centralized debug monitoring and logging"""
29
+
30
+ def __init__(self, keep_last_logs: int = 200):
31
+ self.keep_last_logs = keep_last_logs
32
+ self.reset()
33
+
34
+ def reset(self):
35
+ """Reset all debug state"""
36
+ self.start_time = time.time()
37
+ self.frame_count = 0
38
+ self.counters = Counter()
39
+ self.similarities = deque(maxlen=2000)
40
+ self.recent_logs = deque(maxlen=self.keep_last_logs)
41
+ self.bad_samples = []
42
+ self.match_records = []
43
+ self.live_stats = {} # Live statistics for display
44
+
45
+ def log(self, msg: str, level: str = "INFO"):
46
+ """Add log entry with timestamp"""
 
 
 
 
 
 
 
 
47
  ts = time.strftime("%H:%M:%S")
48
+ entry = f"[{ts}] [{level}] {msg}"
49
  print(entry, flush=True)
50
+ self.recent_logs.append(entry)
51
+
52
+ def save_bad_sample(self, img_bgr: np.ndarray, similarity: float, dog_id: int, track_id: int) -> Optional[str]:
53
+ """Save problematic detection sample"""
54
+ try:
55
+ idx = len(self.bad_samples) + 1
56
+ fname = DEBUG_DIR / f"bad_sample_{idx:04d}_sim{similarity:.2f}_dog{dog_id}.jpg"
57
+ cv2.imwrite(str(fname), img_bgr)
58
+
59
+ self.bad_samples.append({
60
+ 'file': str(fname),
61
+ 'similarity': similarity,
62
+ 'dog_id': dog_id,
63
+ 'track_id': track_id,
64
+ 'timestamp': time.time()
65
+ })
66
+
67
+ self.log(f"Saved bad sample #{idx}: sim={similarity:.3f}, dog_id={dog_id}, track={track_id}", "WARNING")
68
+ return str(fname)
69
+ except Exception as e:
70
+ self.log(f"Error saving bad sample: {e}", "ERROR")
71
+ return None
72
+
73
+ def update_live_stats(self):
74
+ """Update live statistics for display"""
75
+ total_matches = self.counters['reid_matches']
76
+ total_attempts = self.counters['reid_attempts'] or 1
77
+
78
+ self.live_stats = {
79
+ 'frame': self.frame_count,
80
+ 'dogs_detected': len(set(r['dog_id'] for r in self.match_records if r['dog_id'] > 0)),
81
+ 'active_tracks': self.counters['active_tracks'],
82
+ 'match_rate': total_matches / total_attempts,
83
+ 'avg_similarity': np.mean(self.similarities) if self.similarities else 0.0,
84
+ 'fps': self.frame_count / (time.time() - self.start_time + 1)
85
+ }
86
+
87
+ def get_debug_output(self, lines: int = 10) -> str:
88
+ """Get recent log lines for UI display"""
89
+ return "\n".join(list(self.recent_logs)[-lines:])
90
+
91
+ def get_debug_summary(self) -> str:
92
+ """Get comprehensive debug summary"""
93
+ c = self.counters
94
  total_matches = c['reid_matches']
95
  total_attempts = c['reid_attempts'] or 1
96
  match_rate = total_matches / total_attempts
97
+ avg_sim = np.mean(self.similarities) if self.similarities else 0.0
98
+ elapsed = time.time() - self.start_time
99
+
100
+ # Get unique dogs
101
+ unique_dogs = len(set(r['dog_id'] for r in self.match_records if r['dog_id'] > 0))
102
+
103
+ summary = f"""📊 Debug Summary
104
+ ━━━━━━━━━━━━━━━━━━━━━━
105
+ Frames: {self.frame_count} | Time: {elapsed:.1f}s | FPS: {self.frame_count/(elapsed+1):.1f}
106
+ Detections: {c['detections']} | Tracks: {c['tracks_created']} | Confirmed: {c['tracks_confirmed']}
107
+ Unique Dogs: {unique_dogs} | Active Tracks: {c['active_tracks']}
108
+ ReID Attempts: {total_attempts} | Matches: {total_matches} ({match_rate:.1%})
109
+ Avg Similarity: {avg_sim:.3f} | Feature Fails: {c['feature_fail']}
110
+ Bad Samples Saved: {len(self.bad_samples)}"""
111
+
112
+ return summary
113
+
114
+ def save_debug_report(self, filepath: str = "debug_report.json"):
115
+ """Save comprehensive debug report"""
116
+ try:
117
+ report = {
118
+ 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"),
119
+ 'frames_processed': self.frame_count,
120
+ 'elapsed_time': time.time() - self.start_time,
121
+ 'counters': dict(self.counters),
122
+ 'avg_similarity': float(np.mean(self.similarities)) if self.similarities else 0.0,
123
+ 'similarity_distribution': {
124
+ 'min': float(min(self.similarities)) if self.similarities else 0,
125
+ 'max': float(max(self.similarities)) if self.similarities else 0,
126
+ 'percentiles': {
127
+ '25%': float(np.percentile(list(self.similarities), 25)) if self.similarities else 0,
128
+ '50%': float(np.percentile(list(self.similarities), 50)) if self.similarities else 0,
129
+ '75%': float(np.percentile(list(self.similarities), 75)) if self.similarities else 0,
130
+ '90%': float(np.percentile(list(self.similarities), 90)) if self.similarities else 0
131
+ }
132
+ },
133
+ 'bad_samples': self.bad_samples[:20],
134
+ 'match_records_last': self.match_records[-50:]
135
+ }
136
+
137
+ with open(filepath, "w") as f:
138
+ json.dump(report, f, indent=2, default=str)
139
+
140
+ self.log(f"Saved debug report to {filepath}", "INFO")
141
+ except Exception as e:
142
+ self.log(f"Failed to save debug report: {e}", "ERROR")
143
 
144
+ def add_debugging(app: Any) -> Any:
145
+ """
146
+ Inject debug monitoring into DogMonitoringApp
147
+
148
+ Args:
149
+ app: DogMonitoringApp instance
150
+
151
+ Returns:
152
+ Modified app with debug capabilities
153
+ """
154
+ # Create debug monitor
155
+ app.debug_monitor = DebugMonitor()
156
+
157
+ # ============== Wrap detector.detect ==============
158
  if hasattr(app, "detector") and hasattr(app.detector, "detect"):
159
  original_detect = app.detector.detect
160
+
161
  def wrapped_detect(frame, *args, **kwargs):
162
+ try:
163
+ detections = original_detect(frame, *args, **kwargs)
164
+ n = len(detections) if detections else 0
165
+ app.debug_monitor.counters['detections'] += n
166
+ if n > 0:
167
+ app.debug_monitor.log(f"Detected {n} dogs in frame {app.debug_monitor.frame_count}", "DEBUG")
168
+ return detections
169
+ except Exception as e:
170
+ app.debug_monitor.log(f"Detection error: {e}", "ERROR")
171
+ return []
172
+
173
  app.detector.detect = wrapped_detect
174
+ app.debug_monitor.log("Injected debug wrapper for detector.detect()")
175
+
176
+ # ============== Wrap tracker.update ==============
177
  if hasattr(app, "tracker") and hasattr(app.tracker, "update"):
178
  original_tracker_update = app.tracker.update
179
+
180
  def wrapped_tracker_update(detections, *args, **kwargs):
181
+ try:
182
+ before_ids = set([t.track_id for t in getattr(app.tracker, "tracks", [])])
183
+ tracks = original_tracker_update(detections, *args, **kwargs)
184
+ after_ids = set([t.track_id for t in getattr(app.tracker, "tracks", [])])
185
+
186
+ new_ids = after_ids - before_ids
187
+ app.debug_monitor.counters['tracks_created'] += len(new_ids)
188
+ app.debug_monitor.counters['tracks_confirmed'] += len(tracks)
189
+ app.debug_monitor.counters['active_tracks'] = len(tracks)
190
+
191
+ if new_ids:
192
+ app.debug_monitor.log(f"Created {len(new_ids)} new tracks", "DEBUG")
193
+
194
+ return tracks
195
+ except Exception as e:
196
+ app.debug_monitor.log(f"Tracker error: {e}", "ERROR")
197
+ return []
198
+
199
  app.tracker.update = wrapped_tracker_update
200
+ app.debug_monitor.log("Injected debug wrapper for tracker.update()")
201
+
202
+ # ============== Wrap reid.match_or_register ==============
203
  if hasattr(app, "reid") and hasattr(app.reid, "match_or_register"):
204
  original_match = app.reid.match_or_register
205
+
206
  def wrapped_match(track, *args, **kwargs):
207
+ app.debug_monitor.counters['reid_attempts'] += 1
208
  try:
209
  dog_id, confidence = original_match(track, *args, **kwargs)
210
+ dog_id = int(dog_id)
211
+ sim = float(confidence)
212
+
213
+ # Record similarity
214
+ app.debug_monitor.similarities.append(sim)
215
+ app.debug_monitor.match_records.append({
216
+ 'sim': sim,
217
+ 'dog_id': dog_id,
218
+ 'track_id': getattr(track, 'track_id', None),
219
+ 'frame': app.debug_monitor.frame_count
220
+ })
221
+
222
+ # Check for problematic matches
223
+ threshold = getattr(app.reid, 'similarity_threshold', getattr(app, 'reid_threshold', 0.7))
224
+
225
+ # Save bad samples (very low similarity)
226
+ if sim < (threshold - 0.2) and track.detections:
227
+ latest_crop = None
228
+ for det in reversed(track.detections):
229
+ if det.image_crop is not None:
230
+ latest_crop = det.image_crop
231
+ break
232
+
233
+ if latest_crop is not None:
234
+ app.debug_monitor.save_bad_sample(
235
+ latest_crop, sim, dog_id,
236
+ getattr(track, 'track_id', 0)
237
+ )
238
+
239
+ # Count match/miss
240
+ if sim >= threshold:
241
+ app.debug_monitor.counters['reid_matches'] += 1
242
+ else:
243
+ app.debug_monitor.counters['reid_misses'] += 1
244
+
245
+ return dog_id, sim
246
+
247
  except Exception as e:
248
+ app.debug_monitor.log(f"ReID error: {traceback.format_exc()}", "ERROR")
249
+ app.debug_monitor.counters['feature_fail'] += 1
250
  return 0, 0.0
251
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  app.reid.match_or_register = wrapped_match
253
+ app.debug_monitor.log("Injected debug wrapper for reid.match_or_register()")
254
+
255
+ # ============== Wrap main process_video ==============
256
  if hasattr(app, "process_video"):
257
  original_process_video = app.process_video
258
+
259
  def wrapped_process_video(video_path: str, progress=None):
 
 
 
 
 
 
260
  # Reset debug state
261
+ app.debug_monitor.reset()
262
+ app.debug_monitor.log(f"Starting video processing: {video_path}")
263
+
264
+ # Wrap progress callback to count frames
265
+ if progress:
266
+ original_progress = progress
267
+ def wrapped_progress(val, desc=None):
268
+ app.debug_monitor.frame_count = int(val * 100) # Approximate frame count
269
+ app.debug_monitor.update_live_stats()
270
+ return original_progress(val, desc)
271
+ progress = wrapped_progress
272
+
 
 
 
 
 
 
 
273
  try:
274
+ result = original_process_video(video_path, progress)
275
+
276
+ # Save debug report
277
+ app.debug_monitor.save_debug_report()
278
+ app.debug_monitor.log("Video processing completed successfully")
279
+
280
+ return result
281
+
282
  except Exception as e:
283
+ app.debug_monitor.log(f"Process video error: {traceback.format_exc()}", "ERROR")
284
+ raise
285
+
 
286
  app.process_video = wrapped_process_video
287
+ app.debug_monitor.log("Injected debug wrapper for process_video()")
288
+
289
+ # ============== Add convenience methods ==============
290
+ app.get_debug_output = lambda lines=10: app.debug_monitor.get_debug_output(lines)
291
+ app.get_debug_summary = lambda: app.debug_monitor.get_debug_summary()
292
+ app.get_live_stats = lambda: app.debug_monitor.live_stats
293
+
294
+ app.debug_monitor.log("✓ Debug monitoring system initialized")
295
+ return app