Spaces:
Sleeping
Sleeping
File size: 11,224 Bytes
5e1a30c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
"""
Mock LLM adapter for testing without external dependencies.
This adapter provides deterministic responses for testing purposes,
eliminating the need for Ollama or other external LLM services.
Architecture Notes:
- Provides consistent test responses with proper citation format
- Supports all required adapter interface methods
- Returns predictable outputs for reliable testing
- No external dependencies or network calls
"""
import logging
from typing import Dict, Any, Optional, Iterator
import random
from .base_adapter import BaseLLMAdapter, LLMError
from ..base import GenerationParams
logger = logging.getLogger(__name__)
class MockLLMAdapter(BaseLLMAdapter):
"""
Mock adapter for testing without external LLM dependencies.
Features:
- Deterministic responses for testing
- Proper citation formatting
- Configurable response patterns
- No external dependencies
- Fast response times
Configuration:
- response_pattern: Type of response to generate (default: "technical")
- include_citations: Whether to include citations (default: True)
- simulate_errors: Whether to simulate errors (default: False)
- fixed_response: Use a fixed response instead of patterns (optional)
"""
# Predefined response patterns for different test scenarios
RESPONSE_PATTERNS = {
"technical": {
"template": "Based on the technical documentation, {topic} is {description}. The implementation details show that {detail} [Document {doc_num}].",
"descriptions": [
"a key component of the system architecture",
"an important feature for performance optimization",
"designed for scalability and reliability",
"implemented using industry best practices"
],
"details": [
"it uses advanced algorithms for efficient processing",
"the modular design allows for easy extension",
"performance benchmarks show significant improvements",
"the architecture supports both local and distributed deployment"
]
},
"simple": {
"template": "{topic} refers to {description} [Document {doc_num}].",
"descriptions": [
"a fundamental concept",
"an essential component",
"a critical feature",
"an important aspect"
],
"details": []
},
"detailed": {
"template": "The query about {topic} can be answered as follows:\n\n{description}\n\nKey points include:\n1. {point1} [Document {doc1}]\n2. {point2} [Document {doc2}]\n3. {point3} [Document {doc3}]\n\nIn conclusion, {conclusion} [Document {doc4}].",
"descriptions": [
"This is a comprehensive topic that requires detailed explanation",
"This involves multiple interconnected components",
"This represents a complex system with various considerations"
],
"points": [
"The architecture is designed for modularity",
"Performance optimization is a key consideration",
"The system supports various configuration options",
"Integration with external services is supported",
"Error handling is comprehensive and robust"
],
"conclusions": [
"the implementation provides a solid foundation",
"the system meets all specified requirements",
"the design ensures future scalability"
]
}
}
def __init__(self,
model_name: str = "mock-model",
response_pattern: str = "technical",
include_citations: bool = True,
simulate_errors: bool = False,
fixed_response: Optional[str] = None,
config: Optional[Dict[str, Any]] = None):
"""
Initialize mock adapter.
Args:
model_name: Mock model name for identification
response_pattern: Type of response pattern to use
include_citations: Whether to include document citations
simulate_errors: Whether to simulate random errors
fixed_response: Use this fixed response if provided
config: Additional configuration
"""
adapter_config = {
'response_pattern': response_pattern,
'include_citations': include_citations,
'simulate_errors': simulate_errors,
'fixed_response': fixed_response,
**(config or {})
}
super().__init__(model_name, adapter_config)
self.response_pattern = adapter_config['response_pattern']
self.include_citations = adapter_config['include_citations']
self.simulate_errors = adapter_config['simulate_errors']
self.fixed_response = adapter_config.get('fixed_response')
logger.info(f"Initialized Mock adapter with pattern '{response_pattern}'")
def _make_request(self, prompt: str, params: GenerationParams) -> Dict[str, Any]:
"""
Generate a mock response without external calls.
Args:
prompt: The prompt to respond to
params: Generation parameters (mostly ignored for mock)
Returns:
Mock response in expected format
"""
# Simulate errors if configured
if self.simulate_errors and random.random() < 0.1:
raise LLMError("Simulated mock error for testing")
# Generate response text
if self.fixed_response:
response_text = self.fixed_response
else:
response_text = self._generate_response(prompt, params)
# Build mock response structure
response = {
"model": self.model_name,
"response": response_text,
"done": True,
"context": [],
"total_duration": 1000000, # 1ms in nanoseconds
"load_duration": 0,
"prompt_eval_count": len(prompt.split()),
"prompt_eval_duration": 500000,
"eval_count": len(response_text.split()),
"eval_duration": 500000
}
return response
def _parse_response(self, response: Dict[str, Any]) -> str:
"""
Extract text from mock response.
Args:
response: Mock response dict
Returns:
Response text
"""
return response.get("response", "")
def _get_provider_name(self) -> str:
"""Return the provider name."""
return "Mock"
def _validate_model(self) -> bool:
"""Mock models are always valid."""
return True
def _supports_streaming(self) -> bool:
"""Mock adapter can simulate streaming."""
return True
def generate_streaming(self, prompt: str, params: GenerationParams) -> Iterator[str]:
"""
Generate a mock streaming response.
Args:
prompt: The prompt to respond to
params: Generation parameters
Yields:
Response chunks
"""
# Get full response
full_response = self._generate_response(prompt, params)
# Simulate streaming by yielding word by word
words = full_response.split()
for i, word in enumerate(words):
if i < len(words) - 1:
yield word + " "
else:
yield word
def _generate_response(self, prompt: str, params: GenerationParams) -> str:
"""
Generate a response based on the configured pattern.
Args:
prompt: The user's prompt
params: Generation parameters
Returns:
Generated response text
"""
# Extract topic from prompt (simple heuristic)
prompt_lower = prompt.lower()
topic = "the requested information"
# Try to extract a more specific topic
if "risc-v" in prompt_lower:
topic = "RISC-V"
elif "pipeline" in prompt_lower:
topic = "pipeline architecture"
elif "memory" in prompt_lower:
topic = "memory management"
elif "what is" in prompt_lower:
# Extract what comes after "what is"
parts = prompt_lower.split("what is")
if len(parts) > 1:
topic = parts[1].strip().rstrip("?").strip()
# Get pattern configuration
pattern_config = self.RESPONSE_PATTERNS.get(
self.response_pattern,
self.RESPONSE_PATTERNS["technical"]
)
# Generate response based on pattern
if self.response_pattern == "simple":
description = random.choice(pattern_config["descriptions"])
response = pattern_config["template"].format(
topic=topic,
description=description,
doc_num=random.randint(1, 5) if self.include_citations else ""
)
elif self.response_pattern == "detailed":
description = random.choice(pattern_config["descriptions"])
points = random.sample(pattern_config["points"], 3)
conclusion = random.choice(pattern_config["conclusions"])
response = pattern_config["template"].format(
topic=topic,
description=description,
point1=points[0],
point2=points[1],
point3=points[2],
conclusion=conclusion,
doc1=1 if self.include_citations else "",
doc2=2 if self.include_citations else "",
doc3=3 if self.include_citations else "",
doc4=4 if self.include_citations else ""
)
else: # technical (default)
description = random.choice(pattern_config["descriptions"])
detail = random.choice(pattern_config["details"])
response = pattern_config["template"].format(
topic=topic,
description=description,
detail=detail,
doc_num=random.randint(1, 5) if self.include_citations else ""
)
# Remove citation markers if not including citations
if not self.include_citations:
response = response.replace(" [Document ]", "").replace(" []", "")
return response
def get_model_info(self) -> Dict[str, Any]:
"""
Get mock model information.
Returns:
Model info dict
"""
info = super().get_model_info()
info.update({
'is_mock': True,
'response_pattern': self.response_pattern,
'include_citations': self.include_citations,
'simulate_errors': self.simulate_errors
})
return info |