mustafa2ak commited on
Commit
1b0864d
Β·
verified Β·
1 Parent(s): 688d84b

Update reid.py

Browse files
Files changed (1) hide show
  1. reid.py +97 -372
reid.py CHANGED
@@ -1,290 +1,104 @@
1
  """
2
- reid.py - Multi-Component ReID System with ResNet, Statistics, and Heuristics
3
- Includes persistent ID management and configurable component weights
4
  """
5
  import numpy as np
6
  import cv2
7
  import torch
8
  import torch.nn as nn
9
- import torchvision.models as models
10
- import torchvision.transforms as transforms
11
  from sklearn.metrics.pairwise import cosine_similarity
12
  from typing import Dict, List, Optional, Tuple
13
  from dataclasses import dataclass, field
14
- import json
15
- from pathlib import Path
16
  import warnings
17
  warnings.filterwarnings('ignore')
18
 
19
 
20
  @dataclass
21
  class DogFeatures:
22
- """Enhanced container for dog features"""
23
- resnet_features: np.ndarray
24
- color_histogram: np.ndarray = None
25
- size_info: Dict = field(default_factory=dict)
26
- velocity: np.ndarray = None
27
  confidence: float = 0.5
28
- quality_score: float = 0.5
29
  frame_num: int = 0
30
- bbox: List[float] = field(default_factory=list)
31
- track_id: int = 0
32
 
33
 
34
- class MultiComponentReID:
35
  """
36
- Three-component ReID system with ResNet, Statistics, and Heuristics
37
  """
38
 
39
- def __init__(self, device: str = 'cuda', id_offset_file: str = 'reid_state.json'):
40
  self.device = device if torch.cuda.is_available() else 'cpu'
41
- self.id_offset_file = Path(id_offset_file)
42
-
43
- # Component weights (will be set by sliders)
44
- self.component_weights = {
45
- 'resnet': 0.70, # 70% default
46
- 'statistics': 0.20, # 20% default
47
- 'heuristics': 0.10 # 10% default
48
- }
49
-
50
- # Similarity thresholds
51
  self.base_threshold = 0.60
52
- self.adaptive_threshold = True
53
 
54
- # Dog database
55
  self.dog_database = {} # dog_id -> list of DogFeatures
56
- self.next_dog_id = self._load_id_offset()
57
-
58
- # Tracking maps
59
- self.track_to_dog = {}
60
- self.dog_statistics = {} # dog_id -> statistical features
61
- self.dog_last_seen = {}
62
- self.dog_entrance_point = {} # dog_id -> first seen location
63
- self.dog_trajectory = {} # dog_id -> list of positions
64
-
65
  self.current_frame = 0
66
- self.frame_width = 640
67
- self.frame_height = 480
68
 
69
- # Initialize ResNet
70
- self._initialize_resnet()
71
 
72
- print(f"βœ… Multi-Component ReID initialized")
73
- print(f" Device: {self.device}")
74
- print(f" Starting Dog ID: {self.next_dog_id}")
75
- print(f" Components: ResNet, Statistics, Heuristics")
76
 
77
- def _load_id_offset(self) -> int:
78
- """Load the last used dog ID from persistent storage"""
79
- if self.id_offset_file.exists():
80
- try:
81
- with open(self.id_offset_file, 'r') as f:
82
- data = json.load(f)
83
- return data.get('next_dog_id', 1)
84
- except:
85
- pass
86
- return 1
87
-
88
- def _save_id_offset(self):
89
- """Save the current dog ID counter"""
90
- try:
91
- with open(self.id_offset_file, 'w') as f:
92
- json.dump({'next_dog_id': self.next_dog_id}, f)
93
- except:
94
- pass
95
-
96
- def _initialize_resnet(self):
97
- """Initialize ResNet50 for feature extraction"""
98
  try:
99
- resnet = models.resnet50(weights='IMAGENET1K_V1')
100
- self.resnet_model = nn.Sequential(*list(resnet.children())[:-1])
101
- self.resnet_model.to(self.device).eval()
 
 
 
 
 
 
 
 
 
102
 
103
- self.transform = transforms.Compose([
104
- transforms.ToPILImage(),
105
- transforms.Resize((224, 224)),
106
- transforms.ToTensor(),
107
- transforms.Normalize(
108
- mean=[0.485, 0.456, 0.406],
109
- std=[0.229, 0.224, 0.225]
110
- )
111
- ])
112
  except Exception as e:
113
- print(f"❌ ResNet initialization error: {e}")
114
- self.resnet_model = None
115
 
116
- def set_component_weights(self, resnet: float, statistics: float, heuristics: float):
117
- """
118
- Set component weights from UI sliders (0-100 scale)
119
- Normalizes to ensure they sum to 1.0
120
- """
121
- total = resnet + statistics + heuristics
122
- if total > 0:
123
- self.component_weights['resnet'] = resnet / total
124
- self.component_weights['statistics'] = statistics / total
125
- self.component_weights['heuristics'] = heuristics / total
126
- else:
127
- # Default if all are zero
128
- self.component_weights = {'resnet': 0.7, 'statistics': 0.2, 'heuristics': 0.1}
129
-
130
- print(f"πŸ“Š Component weights updated:")
131
- print(f" ResNet: {self.component_weights['resnet']:.2%}")
132
- print(f" Statistics: {self.component_weights['statistics']:.2%}")
133
- print(f" Heuristics: {self.component_weights['heuristics']:.2%}")
134
-
135
- def extract_features(self, image: np.ndarray, bbox: List[float] = None,
136
- track_info: Dict = None) -> Optional[DogFeatures]:
137
- """Extract multi-component features"""
138
- if image is None or image.size == 0:
139
  return None
140
 
141
- features = DogFeatures(
142
- resnet_features=np.zeros(2048),
143
- bbox=bbox if bbox else [0, 0, 100, 100]
144
- )
145
-
146
- # 1. ResNet features
147
- if self.resnet_model is not None:
148
- try:
149
- img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
150
- img_tensor = self.transform(img_rgb).unsqueeze(0).to(self.device)
151
-
152
- with torch.no_grad():
153
- resnet_feat = self.resnet_model(img_tensor)
154
- resnet_feat = resnet_feat.squeeze().cpu().numpy()
155
- features.resnet_features = resnet_feat / (np.linalg.norm(resnet_feat) + 1e-7)
156
- except:
157
- pass
158
-
159
- # 2. Color histogram (for heuristics)
160
  try:
161
- # Calculate color histogram in HSV space
162
- hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
163
- hist_h = cv2.calcHist([hsv], [0], None, [30], [0, 180])
164
- hist_s = cv2.calcHist([hsv], [1], None, [32], [0, 256])
165
- hist_v = cv2.calcHist([hsv], [2], None, [32], [0, 256])
166
 
167
- color_hist = np.concatenate([hist_h.flatten(), hist_s.flatten(), hist_v.flatten()])
168
- features.color_histogram = color_hist / (np.sum(color_hist) + 1e-7)
169
- except:
170
- features.color_histogram = np.zeros(94) # 30+32+32
171
-
172
- # 3. Size information (for statistics)
173
- if bbox:
174
- features.size_info = {
175
- 'width': bbox[2] - bbox[0],
176
- 'height': bbox[3] - bbox[1],
177
- 'aspect_ratio': (bbox[2] - bbox[0]) / max(1, bbox[3] - bbox[1]),
178
- 'area': (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
179
- }
180
-
181
- # 4. Motion information (if available)
182
- if track_info:
183
- features.velocity = track_info.get('velocity', np.array([0, 0]))
184
- features.track_id = track_info.get('track_id', 0)
185
-
186
- features.frame_num = self.current_frame
187
-
188
- return features
189
-
190
- def _calculate_resnet_similarity(self, feat1: DogFeatures, feat2: DogFeatures) -> float:
191
- """Calculate ResNet-based similarity"""
192
- if feat1.resnet_features is None or feat2.resnet_features is None:
193
- return 0.0
194
-
195
- return cosine_similarity(
196
- feat1.resnet_features.reshape(1, -1),
197
- feat2.resnet_features.reshape(1, -1)
198
- )[0, 0]
199
-
200
- def _calculate_statistical_similarity(self, dog_id: int, new_features: DogFeatures) -> float:
201
- """Calculate statistics-based similarity"""
202
- if dog_id not in self.dog_statistics:
203
- return 0.5 # Neutral score if no statistics
204
-
205
- stats = self.dog_statistics[dog_id]
206
- score = 0.0
207
- weights_sum = 0.0
208
-
209
- # Size consistency
210
- if 'avg_size' in stats and new_features.size_info:
211
- size_diff = abs(new_features.size_info['area'] - stats['avg_size'])
212
- size_score = max(0, 1 - size_diff / (stats['avg_size'] + 1e-7))
213
- score += size_score * 0.3
214
- weights_sum += 0.3
215
-
216
- # Aspect ratio consistency
217
- if 'avg_aspect' in stats and new_features.size_info:
218
- aspect_diff = abs(new_features.size_info['aspect_ratio'] - stats['avg_aspect'])
219
- aspect_score = max(0, 1 - aspect_diff / 2)
220
- score += aspect_score * 0.2
221
- weights_sum += 0.2
222
-
223
- # Velocity consistency
224
- if 'avg_velocity' in stats and new_features.velocity is not None:
225
- vel_magnitude_old = np.linalg.norm(stats['avg_velocity'])
226
- vel_magnitude_new = np.linalg.norm(new_features.velocity)
227
- vel_diff = abs(vel_magnitude_new - vel_magnitude_old)
228
- vel_score = max(0, 1 - vel_diff / 50) # 50 pixels/frame max expected
229
- score += vel_score * 0.3
230
- weights_sum += 0.3
231
-
232
- # Confidence pattern
233
- if 'avg_confidence' in stats:
234
- conf_diff = abs(new_features.confidence - stats['avg_confidence'])
235
- conf_score = max(0, 1 - conf_diff)
236
- score += conf_score * 0.2
237
- weights_sum += 0.2
238
-
239
- return score / max(weights_sum, 0.1)
240
-
241
- def _calculate_heuristic_similarity(self, dog_id: int, new_features: DogFeatures) -> float:
242
- """Calculate heuristic-based similarity"""
243
- score = 0.0
244
- weights_sum = 0.0
245
-
246
- # Temporal proximity (recently seen dogs more likely)
247
- if dog_id in self.dog_last_seen:
248
- frames_since = self.current_frame - self.dog_last_seen[dog_id]
249
- if frames_since < 30: # Within 1 second at 30fps
250
- temporal_score = 1.0 - frames_since / 30
251
- score += temporal_score * 0.3
252
- weights_sum += 0.3
253
-
254
- # Spatial coherence (can't teleport)
255
- if dog_id in self.dog_trajectory and len(self.dog_trajectory[dog_id]) > 0:
256
- last_pos = self.dog_trajectory[dog_id][-1]
257
- new_pos = [(new_features.bbox[0] + new_features.bbox[2])/2,
258
- (new_features.bbox[1] + new_features.bbox[3])/2]
259
 
260
- distance = np.linalg.norm(np.array(new_pos) - np.array(last_pos))
261
- max_reasonable_dist = 100 # pixels per frame
262
 
263
- if distance < max_reasonable_dist:
264
- spatial_score = 1.0 - distance / max_reasonable_dist
265
- score += spatial_score * 0.4
266
- weights_sum += 0.4
267
-
268
- # Color similarity
269
- if dog_id in self.dog_database and new_features.color_histogram is not None:
270
- dog_features_list = self.dog_database[dog_id]
271
- if dog_features_list and dog_features_list[-1].color_histogram is not None:
272
- color_sim = cosine_similarity(
273
- new_features.color_histogram.reshape(1, -1),
274
- dog_features_list[-1].color_histogram.reshape(1, -1)
275
- )[0, 0]
276
- score += color_sim * 0.3
277
- weights_sum += 0.3
278
-
279
- return score / max(weights_sum, 0.1)
280
 
281
  def match_or_register(self, track, image_crop=None) -> Tuple[int, float]:
282
- """
283
- Main matching function using all three components
284
- """
285
  self.current_frame += 1
286
 
287
- # Get detection and extract features
288
  detection = None
289
  for det in reversed(track.detections[-3:]):
290
  if det.image_crop is not None:
@@ -296,15 +110,9 @@ class MultiComponentReID:
296
  return 0, 0.0
297
 
298
  # Extract features
299
- track_info = {
300
- 'track_id': track.track_id,
301
- 'velocity': track.velocity if hasattr(track, 'velocity') else np.array([0, 0])
302
- }
303
-
304
  features = self.extract_features(
305
  image_crop,
306
- detection.bbox if hasattr(detection, 'bbox') else None,
307
- track_info
308
  )
309
 
310
  if features is None:
@@ -312,120 +120,49 @@ class MultiComponentReID:
312
 
313
  features.confidence = detection.confidence if hasattr(detection, 'confidence') else 0.5
314
 
315
- # Calculate similarities with all dogs
316
  best_dog_id = None
317
  best_score = -1.0
318
 
319
- for dog_id in self.dog_database:
320
- # ResNet similarity
321
- resnet_scores = []
322
- for dog_feat in self.dog_database[dog_id][-5:]:
323
- resnet_scores.append(self._calculate_resnet_similarity(features, dog_feat))
324
- resnet_sim = np.mean(resnet_scores) if resnet_scores else 0
325
-
326
- # Statistical similarity
327
- stat_sim = self._calculate_statistical_similarity(dog_id, features)
328
-
329
- # Heuristic similarity
330
- heur_sim = self._calculate_heuristic_similarity(dog_id, features)
331
-
332
- # Weighted combination
333
- total_score = (
334
- self.component_weights['resnet'] * resnet_sim +
335
- self.component_weights['statistics'] * stat_sim +
336
- self.component_weights['heuristics'] * heur_sim
337
- )
338
 
339
- if total_score > best_score:
340
- best_score = total_score
341
- best_dog_id = dog_id
342
-
343
- # Determine threshold
344
- threshold = self.base_threshold
345
- if self.adaptive_threshold:
346
- # Adjust based on number of dogs
347
- n_dogs = len(self.dog_database)
348
- if n_dogs > 5:
349
- threshold += 0.05 * min(1, (n_dogs - 5) / 10)
350
-
351
- # Make decision
352
- if best_dog_id is not None and best_score >= threshold:
353
- # Match found
354
- self._update_dog(best_dog_id, features)
355
  return best_dog_id, best_score
356
  else:
357
  # New dog
358
- new_dog_id = self._register_new_dog(features)
 
 
 
359
  return new_dog_id, 1.0
360
 
361
- def _update_dog(self, dog_id: int, features: DogFeatures):
362
- """Update existing dog with new features"""
363
- # Add to database
364
- self.dog_database[dog_id].append(features)
365
- if len(self.dog_database[dog_id]) > 10:
366
- self.dog_database[dog_id] = self.dog_database[dog_id][-10:]
367
-
368
- # Update statistics
369
- if dog_id not in self.dog_statistics:
370
- self.dog_statistics[dog_id] = {}
371
-
372
- stats = self.dog_statistics[dog_id]
373
-
374
- # Update running averages
375
- if features.size_info:
376
- stats['avg_size'] = stats.get('avg_size', 0) * 0.9 + features.size_info['area'] * 0.1
377
- stats['avg_aspect'] = stats.get('avg_aspect', 1) * 0.9 + features.size_info['aspect_ratio'] * 0.1
378
-
379
- if features.velocity is not None:
380
- if 'avg_velocity' not in stats:
381
- stats['avg_velocity'] = features.velocity
382
- else:
383
- stats['avg_velocity'] = stats['avg_velocity'] * 0.9 + features.velocity * 0.1
384
-
385
- stats['avg_confidence'] = stats.get('avg_confidence', 0.5) * 0.9 + features.confidence * 0.1
386
-
387
- # Update tracking
388
- self.dog_last_seen[dog_id] = self.current_frame
389
-
390
- if dog_id not in self.dog_trajectory:
391
- self.dog_trajectory[dog_id] = []
392
-
393
- if features.bbox:
394
- center = [(features.bbox[0] + features.bbox[2])/2,
395
- (features.bbox[1] + features.bbox[3])/2]
396
- self.dog_trajectory[dog_id].append(center)
397
- if len(self.dog_trajectory[dog_id]) > 30:
398
- self.dog_trajectory[dog_id] = self.dog_trajectory[dog_id][-30:]
399
-
400
- def _register_new_dog(self, features: DogFeatures) -> int:
401
- """Register a new dog"""
402
- new_dog_id = self.next_dog_id
403
- self.next_dog_id += 1
404
- self._save_id_offset() # Save the updated counter
405
-
406
- self.dog_database[new_dog_id] = [features]
407
- self.dog_statistics[new_dog_id] = {}
408
- self.dog_last_seen[new_dog_id] = self.current_frame
409
-
410
- if features.bbox:
411
- center = [(features.bbox[0] + features.bbox[2])/2,
412
- (features.bbox[1] + features.bbox[3])/2]
413
- self.dog_entrance_point[new_dog_id] = center
414
- self.dog_trajectory[new_dog_id] = [center]
415
-
416
- print(f" πŸ†• New dog registered: Dog {new_dog_id}")
417
-
418
- return new_dog_id
419
-
420
  def match_or_register_all(self, track) -> Dict:
421
- """Compatible interface for existing code"""
422
  dog_id, confidence = self.match_or_register(track)
423
-
424
  return {
425
- 'ResNet50': {
426
  'dog_id': dog_id,
427
- 'confidence': confidence,
428
- 'processing_time': 0
429
  }
430
  }
431
 
@@ -435,33 +172,21 @@ class MultiComponentReID:
435
  print(f"πŸ“Š ReID threshold updated to: {self.base_threshold:.2f}")
436
 
437
  def reset_all(self):
438
- """Reset for new video but preserve ID counter"""
439
- # Clear temporary data
440
  self.dog_database.clear()
441
- self.track_to_dog.clear()
442
- self.dog_statistics.clear()
443
- self.dog_last_seen.clear()
444
- self.dog_trajectory.clear()
445
- self.dog_entrance_point.clear()
446
  self.current_frame = 0
447
-
448
- # DO NOT reset next_dog_id - preserve it!
449
- print(f"πŸ”„ ReID reset - Next dog ID: {self.next_dog_id}")
450
 
451
  def get_statistics(self) -> Dict:
452
  """Get current statistics"""
453
  return {
454
- 'ResNet50': {
455
- 'total_dogs': len(self.dog_database),
456
- 'next_dog_id': self.next_dog_id,
457
- 'threshold': self.base_threshold,
458
- 'weights': self.component_weights,
459
- 'current_frame': self.current_frame
460
- }
461
  }
462
 
463
 
464
- # Compatibility aliases
465
- SingleModelReID = MultiComponentReID
466
- ImprovedResNet50ReID = MultiComponentReID
467
- DualModelReID = MultiComponentReID
 
1
  """
2
+ Simplified ReID with MegaDescriptor-B-224
 
3
  """
4
  import numpy as np
5
  import cv2
6
  import torch
7
  import torch.nn as nn
8
+ import timm
 
9
  from sklearn.metrics.pairwise import cosine_similarity
10
  from typing import Dict, List, Optional, Tuple
11
  from dataclasses import dataclass, field
 
 
12
  import warnings
13
  warnings.filterwarnings('ignore')
14
 
15
 
16
  @dataclass
17
  class DogFeatures:
18
+ """Container for dog features"""
19
+ features: np.ndarray
20
+ bbox: List[float] = field(default_factory=list)
 
 
21
  confidence: float = 0.5
 
22
  frame_num: int = 0
 
 
23
 
24
 
25
+ class MegaDescriptorReID:
26
  """
27
+ Simplified ReID using MegaDescriptor-B-224
28
  """
29
 
30
+ def __init__(self, device: str = 'cuda'):
31
  self.device = device if torch.cuda.is_available() else 'cpu'
 
 
 
 
 
 
 
 
 
 
32
  self.base_threshold = 0.60
 
33
 
34
+ # Dog database (temporary only)
35
  self.dog_database = {} # dog_id -> list of DogFeatures
36
+ self.next_dog_id = 1
 
 
 
 
 
 
 
 
37
  self.current_frame = 0
 
 
38
 
39
+ # Initialize MegaDescriptor
40
+ self._initialize_megadescriptor()
41
 
42
+ print(f"βœ… MegaDescriptor ReID initialized on {self.device}")
 
 
 
43
 
44
+ def _initialize_megadescriptor(self):
45
+ """Initialize MegaDescriptor-B-224"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  try:
47
+ # Load MegaDescriptor-B-224 (balanced model)
48
+ self.model = timm.create_model(
49
+ 'hf-hub:BVRA/MegaDescriptor-B-224',
50
+ pretrained=True
51
+ )
52
+ self.model.to(self.device).eval()
53
+
54
+ # Get the preprocessing config
55
+ data_config = timm.data.resolve_model_data_config(self.model)
56
+ self.transform = timm.data.create_transform(**data_config, is_training=False)
57
+
58
+ print("βœ… MegaDescriptor-B-224 loaded successfully")
59
 
 
 
 
 
 
 
 
 
 
60
  except Exception as e:
61
+ print(f"❌ MegaDescriptor initialization error: {e}")
62
+ self.model = None
63
 
64
+ def extract_features(self, image: np.ndarray, bbox: List[float] = None) -> Optional[DogFeatures]:
65
+ """Extract features using MegaDescriptor"""
66
+ if image is None or image.size == 0 or self.model is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  return None
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  try:
70
+ # Convert BGR to RGB
71
+ img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
 
 
 
72
 
73
+ # Convert to PIL Image for transform
74
+ from PIL import Image
75
+ pil_img = Image.fromarray(img_rgb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ # Apply MegaDescriptor transforms
78
+ img_tensor = self.transform(pil_img).unsqueeze(0).to(self.device)
79
 
80
+ # Extract features
81
+ with torch.no_grad():
82
+ features = self.model(img_tensor)
83
+ features = features.squeeze().cpu().numpy()
84
+ # L2 normalize
85
+ features = features / (np.linalg.norm(features) + 1e-7)
86
+
87
+ return DogFeatures(
88
+ features=features,
89
+ bbox=bbox if bbox else [0, 0, 100, 100],
90
+ frame_num=self.current_frame
91
+ )
92
+
93
+ except Exception as e:
94
+ print(f"Feature extraction error: {e}")
95
+ return None
 
96
 
97
  def match_or_register(self, track, image_crop=None) -> Tuple[int, float]:
98
+ """Match or register a dog"""
 
 
99
  self.current_frame += 1
100
 
101
+ # Get detection with image
102
  detection = None
103
  for det in reversed(track.detections[-3:]):
104
  if det.image_crop is not None:
 
110
  return 0, 0.0
111
 
112
  # Extract features
 
 
 
 
 
113
  features = self.extract_features(
114
  image_crop,
115
+ detection.bbox if hasattr(detection, 'bbox') else None
 
116
  )
117
 
118
  if features is None:
 
120
 
121
  features.confidence = detection.confidence if hasattr(detection, 'confidence') else 0.5
122
 
123
+ # Find best match
124
  best_dog_id = None
125
  best_score = -1.0
126
 
127
+ for dog_id, dog_features_list in self.dog_database.items():
128
+ # Calculate similarity with stored features
129
+ similarities = []
130
+ for stored_feat in dog_features_list[-5:]: # Use last 5 features
131
+ sim = cosine_similarity(
132
+ features.features.reshape(1, -1),
133
+ stored_feat.features.reshape(1, -1)
134
+ )[0, 0]
135
+ similarities.append(sim)
 
 
 
 
 
 
 
 
 
 
136
 
137
+ if similarities:
138
+ avg_similarity = np.mean(similarities)
139
+ if avg_similarity > best_score:
140
+ best_score = avg_similarity
141
+ best_dog_id = dog_id
142
+
143
+ # Decision: match or new dog
144
+ if best_dog_id is not None and best_score >= self.base_threshold:
145
+ # Match found - update database
146
+ self.dog_database[best_dog_id].append(features)
147
+ # Keep only last 10 features per dog
148
+ if len(self.dog_database[best_dog_id]) > 10:
149
+ self.dog_database[best_dog_id] = self.dog_database[best_dog_id][-10:]
 
 
 
150
  return best_dog_id, best_score
151
  else:
152
  # New dog
153
+ new_dog_id = self.next_dog_id
154
+ self.next_dog_id += 1
155
+ self.dog_database[new_dog_id] = [features]
156
+ print(f" πŸ†• New dog registered: Dog {new_dog_id}")
157
  return new_dog_id, 1.0
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  def match_or_register_all(self, track) -> Dict:
160
+ """Compatible interface"""
161
  dog_id, confidence = self.match_or_register(track)
 
162
  return {
163
+ 'MegaDescriptor': {
164
  'dog_id': dog_id,
165
+ 'confidence': confidence
 
166
  }
167
  }
168
 
 
172
  print(f"πŸ“Š ReID threshold updated to: {self.base_threshold:.2f}")
173
 
174
  def reset_all(self):
175
+ """Reset for new video"""
 
176
  self.dog_database.clear()
177
+ self.next_dog_id = 1
 
 
 
 
178
  self.current_frame = 0
179
+ print("πŸ”„ ReID reset")
 
 
180
 
181
  def get_statistics(self) -> Dict:
182
  """Get current statistics"""
183
  return {
184
+ 'total_dogs': len(self.dog_database),
185
+ 'threshold': self.base_threshold
 
 
 
 
 
186
  }
187
 
188
 
189
+ # Compatibility aliases for existing code
190
+ MultiComponentReID = MegaDescriptorReID
191
+ SingleModelReID = MegaDescriptorReID
192
+ ImprovedResNet50ReID = MegaDescriptorReID