Spaces:
Running
Running
File size: 15,241 Bytes
5e1a30c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 |
"""
Adaptive Strategies for Neural Reranking.
This module provides query-type aware reranking strategies that can
adapt model selection and parameters based on query characteristics
to optimize relevance and performance.
Migrated from reranking/ module and simplified for integration with
the enhanced neural reranker in the rerankers/ component.
"""
import logging
import re
import time
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@dataclass
class QueryAnalysis:
"""Results of query analysis."""
query_type: str
confidence: float
features: Dict[str, Any]
recommended_model: str
class QueryTypeDetector:
"""
Detects query types to enable adaptive reranking strategies.
Analyzes queries to classify them into categories like technical,
procedural, comparative, etc. to enable optimal model selection.
"""
def __init__(self, confidence_threshold: float = 0.7):
"""
Initialize query type detector.
Args:
confidence_threshold: Minimum confidence for classification
"""
self.confidence_threshold = confidence_threshold
self.stats = {
"classifications": 0,
"type_counts": {},
"high_confidence": 0,
"low_confidence": 0
}
# Model strategies for different query types
self.strategies = {
"technical": "technical_model",
"general": "default_model",
"comparative": "technical_model",
"procedural": "default_model",
"factual": "default_model"
}
# Define patterns for different query types
self.patterns = {
"technical": [
r'\b(api|protocol|implementation|configuration|architecture)\b',
r'\b(install|setup|configure|deploy)\b',
r'\b(error|exception|debug|troubleshoot)\b',
r'\b(version|compatibility|requirement)\b'
],
"procedural": [
r'\bhow to\b',
r'\bstep by step\b',
r'\bguide|tutorial|walkthrough\b',
r'\bprocess|procedure|workflow\b'
],
"comparative": [
r'\bvs\b|\bversus\b',
r'\bdifference between\b',
r'\bcompare|comparison\b',
r'\bbetter|best|worse|worst\b'
],
"factual": [
r'\bwhat is\b|\bwho is\b|\bwhere is\b',
r'\bdefine|definition\b',
r'\bexplain|describe\b'
],
"general": [] # Catch-all for queries that don't match other patterns
}
logger.info("QueryTypeDetector initialized with built-in patterns")
def classify_query(self, query: str) -> QueryAnalysis:
"""
Classify a query into a type category.
Args:
query: The search query to classify
Returns:
Query analysis with type, confidence, and features
"""
query_lower = query.lower()
type_scores = {}
# Calculate scores for each query type
for query_type, patterns in self.patterns.items():
if not patterns: # Skip empty pattern lists (like general)
continue
score = 0
matches = 0
for pattern in patterns:
if re.search(pattern, query_lower):
matches += 1
score += 1
# Normalize score by number of patterns
if patterns:
type_scores[query_type] = score / len(patterns)
# Find the best matching type
if type_scores:
best_type = max(type_scores.keys(), key=lambda k: type_scores[k])
confidence = type_scores[best_type]
else:
best_type = "general"
confidence = 0.5 # Default confidence for general queries
# Apply confidence threshold
if confidence < self.confidence_threshold:
best_type = "general"
confidence = 0.5
# Extract additional features
features = self._extract_features(query)
# Get recommended model
recommended_model = self.strategies.get(best_type, "default_model")
# Update statistics
self._update_stats(best_type, confidence)
return QueryAnalysis(
query_type=best_type,
confidence=confidence,
features=features,
recommended_model=recommended_model
)
def _extract_features(self, query: str) -> Dict[str, Any]:
"""Extract additional features from the query."""
features = {
"length": len(query),
"word_count": len(query.split()),
"has_question_mark": "?" in query,
"has_quotes": '"' in query or "'" in query,
"is_uppercase": query.isupper(),
"starts_with_question_word": query.lower().startswith(('what', 'how', 'when', 'where', 'why', 'who')),
"technical_terms": len([w for w in query.lower().split() if w in ['api', 'protocol', 'config', 'setup']])
}
return features
def _update_stats(self, query_type: str, confidence: float):
"""Update classification statistics."""
self.stats["classifications"] += 1
self.stats["type_counts"][query_type] = self.stats["type_counts"].get(query_type, 0) + 1
if confidence >= 0.8:
self.stats["high_confidence"] += 1
elif confidence < 0.5:
self.stats["low_confidence"] += 1
def get_stats(self) -> Dict[str, Any]:
"""Get classification statistics."""
return self.stats.copy()
class AdaptiveStrategies:
"""
Adaptive reranking strategies that adjust based on query characteristics.
This component analyzes queries and selects optimal models and parameters
to maximize relevance while maintaining performance targets.
"""
def __init__(
self,
enabled: bool = True,
confidence_threshold: float = 0.7,
enable_dynamic_switching: bool = False,
performance_window: int = 100,
quality_threshold: float = 0.8
):
"""
Initialize adaptive strategies.
Args:
enabled: Whether adaptive strategies are enabled
confidence_threshold: Minimum confidence for query classification
enable_dynamic_switching: Whether to enable performance-based model switching
performance_window: Number of queries to track for performance
quality_threshold: Quality threshold for model switching
"""
self.enabled = enabled
self.enable_dynamic_switching = enable_dynamic_switching
self.performance_window = performance_window
self.quality_threshold = quality_threshold
self.detector = QueryTypeDetector(confidence_threshold) if enabled else None
self.stats = {
"model_selections": 0,
"adaptations": 0,
"fallbacks": 0
}
# Performance tracking for adaptive adjustments
self.performance_history = []
logger.info(f"AdaptiveStrategies initialized, enabled={enabled}")
def select_model(
self,
query: str,
available_models: List[str],
default_model: str
) -> str:
"""
Select the optimal model for a given query.
Args:
query: The search query
available_models: List of available model names
default_model: Default model to fall back to
Returns:
Name of the selected model
"""
if not self.enabled or not self.detector:
return default_model
try:
# Classify the query
analysis = self.detector.classify_query(query)
# Get recommended model
recommended_model = analysis.recommended_model
# Check if recommended model is available
if recommended_model in available_models:
selected_model = recommended_model
else:
logger.warning(f"Recommended model {recommended_model} not available, using default")
selected_model = default_model
self.stats["fallbacks"] += 1
# Consider performance-based adaptations
if self.enable_dynamic_switching:
selected_model = self._consider_performance_adaptation(
selected_model, available_models, default_model
)
self.stats["model_selections"] += 1
logger.debug(f"Selected model '{selected_model}' for query type '{analysis.query_type}' "
f"(confidence: {analysis.confidence:.2f})")
return selected_model
except Exception as e:
logger.error(f"Model selection failed: {e}")
self.stats["fallbacks"] += 1
return default_model
def _consider_performance_adaptation(
self,
current_selection: str,
available_models: List[str],
default_model: str
) -> str:
"""Consider performance-based model adaptation."""
try:
# Check recent performance history
if len(self.performance_history) >= self.performance_window:
recent_performance = self.performance_history[-self.performance_window:]
# Calculate average quality for current selection
current_model_performance = [
p for p in recent_performance
if p.get("model") == current_selection
]
if current_model_performance:
avg_quality = sum(p.get("quality", 0) for p in current_model_performance) / len(current_model_performance)
# Switch if quality is below threshold
if avg_quality < self.quality_threshold:
logger.info(f"Switching from {current_selection} due to low quality: {avg_quality:.2f}")
self.stats["adaptations"] += 1
return default_model
return current_selection
except Exception as e:
logger.warning(f"Performance adaptation failed: {e}")
return current_selection
def adapt_parameters(
self,
query: str,
model_name: str,
base_config: Dict[str, Any]
) -> Dict[str, Any]:
"""
Adapt model parameters based on query characteristics.
Args:
query: The search query
model_name: Selected model name
base_config: Base model configuration
Returns:
Adapted configuration
"""
if not self.enabled:
return base_config
try:
adapted_config = base_config.copy()
# Adapt batch size based on query complexity
query_complexity = self._assess_query_complexity(query)
if query_complexity == "high":
adapted_config["batch_size"] = max(1, adapted_config.get("batch_size", 16) // 2)
elif query_complexity == "low":
adapted_config["batch_size"] = min(64, adapted_config.get("batch_size", 16) * 2)
# Adapt number of candidates based on query type
if self.detector:
analysis = self.detector.classify_query(query)
if analysis.query_type == "technical":
# Technical queries might benefit from more candidates
adapted_config["max_candidates"] = min(100, adapted_config.get("max_candidates", 50) * 1.5)
elif analysis.query_type == "factual":
# Factual queries might need fewer candidates
adapted_config["max_candidates"] = max(10, adapted_config.get("max_candidates", 50) // 2)
return adapted_config
except Exception as e:
logger.error(f"Parameter adaptation failed: {e}")
return base_config
def _assess_query_complexity(self, query: str) -> str:
"""Assess query complexity for parameter adaptation."""
word_count = len(query.split())
if word_count > 10:
return "high"
elif word_count < 3:
return "low"
else:
return "medium"
def record_performance(
self,
model: str,
query_type: str,
latency_ms: float,
quality_score: float
):
"""
Record performance metrics for adaptive learning.
Args:
model: Model used
query_type: Type of query
latency_ms: Processing latency
quality_score: Quality metric (0-1)
"""
performance_record = {
"model": model,
"query_type": query_type,
"latency_ms": latency_ms,
"quality": quality_score,
"timestamp": time.time()
}
self.performance_history.append(performance_record)
# Keep only recent history
max_history = self.performance_window * 2
if len(self.performance_history) > max_history:
self.performance_history = self.performance_history[-max_history:]
def get_stats(self) -> Dict[str, Any]:
"""Get adaptive strategies statistics."""
stats = self.stats.copy()
if self.detector:
stats["detector"] = self.detector.get_stats()
# Add performance summary
if self.performance_history:
recent_performance = self.performance_history[-50:] # Last 50 records
stats["recent_performance"] = {
"avg_latency_ms": sum(p["latency_ms"] for p in recent_performance) / len(recent_performance),
"avg_quality": sum(p["quality"] for p in recent_performance) / len(recent_performance),
"total_records": len(self.performance_history)
}
return stats
def reset_stats(self) -> None:
"""Reset adaptive strategies statistics."""
self.stats = {
"model_selections": 0,
"adaptations": 0,
"fallbacks": 0
}
if self.detector:
self.detector.stats = {
"classifications": 0,
"type_counts": {},
"high_confidence": 0,
"low_confidence": 0
} |