File size: 8,318 Bytes
5b5f50c
 
8c617d5
5b5f50c
 
 
e0ec429
5b5f50c
e0ec429
5b5f50c
 
 
 
 
 
 
 
28471a4
e0ec429
5b5f50c
 
6f941c0
a20d863
 
e0ec429
5b5f50c
 
e0ec429
5b5f50c
 
e0ec429
3371ef4
6f941c0
 
 
 
 
 
 
 
 
 
e0ec429
28471a4
 
 
 
 
5b5f50c
 
 
 
 
 
 
e0ec429
5b5f50c
 
 
 
 
 
 
e0ec429
5b5f50c
28471a4
 
 
 
 
 
5b5f50c
 
28471a4
 
5b5f50c
 
 
 
e0ec429
5b5f50c
e0ec429
 
8c617d5
28471a4
e0ec429
 
 
 
 
 
5b5f50c
 
 
8c617d5
a20d863
 
 
 
 
5b5f50c
 
 
 
 
 
 
e0ec429
5b5f50c
 
 
8c617d5
a20d863
 
 
 
 
5b5f50c
 
 
 
e0ec429
5b5f50c
e0ec429
 
8c617d5
28471a4
e0ec429
 
 
 
 
 
5b5f50c
 
 
8c617d5
a20d863
5b5f50c
a20d863
 
 
 
5b5f50c
 
 
 
 
 
 
 
 
 
 
 
e0ec429
5b5f50c
 
 
8c617d5
a20d863
5b5f50c
a20d863
 
 
 
5b5f50c
 
 
 
 
 
 
 
 
e0ec429
5b5f50c
 
 
 
e0ec429
 
 
 
5b5f50c
 
e0ec429
 
 
 
28471a4
 
 
 
e0ec429
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import time
import logging
from datetime import datetime
from typing import List, Dict, Optional, Union
from core.providers.base import LLMProvider
from utils.config import config
from services.weather import weather_service
logger = logging.getLogger(__name__)

try:
    from openai import OpenAI
    HUGGINGFACE_SDK_AVAILABLE = True
except ImportError:
    HUGGINGFACE_SDK_AVAILABLE = False
    OpenAI = None

class HuggingFaceProvider(LLMProvider):
    """Hugging Face LLM provider implementation with cached validation"""
    
    def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 3):
        super().__init__(model_name, timeout, max_retries)
        logger.info(f"Initializing HuggingFaceProvider with:")
        logger.info(f" HF_API_URL: {config.hf_api_url}")
        logger.info(f" HF_TOKEN SET: {bool(config.hf_token)}")
        
        if not HUGGINGFACE_SDK_AVAILABLE:
            raise ImportError("Hugging Face provider requires 'openai' package")
            
        if not config.hf_token:
            raise ValueError("HF_TOKEN not set - required for Hugging Face provider")
            
        # Make sure NO proxies parameter is included
        try:
            self.client = OpenAI(
                base_url=config.hf_api_url,
                api_key=config.hf_token
            )
            logger.info("HuggingFaceProvider initialized successfully")
        except Exception as e:
            logger.error(f"Failed to initialize HuggingFaceProvider: {e}")
            logger.error(f"Error type: {type(e)}")
            raise
            
        # Add caching attributes for model validation
        self._model_validated = False
        self._last_validation = 0
        self._validation_cache_duration = 300  # 5 minutes
            
    def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
        """Generate a response synchronously"""
        try:
            return self._retry_with_backoff(self._generate_impl, prompt, conversation_history)
        except Exception as e:
            logger.error(f"Hugging Face generation failed: {e}")
            return None
            
    def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
        """Generate a response with streaming support"""
        try:
            return self._retry_with_backoff(self._stream_generate_impl, prompt, conversation_history)
        except Exception as e:
            logger.error(f"Hugging Face stream generation failed: {e}")
            return None
            
    def validate_model(self) -> bool:
        """Validate if the model is available with caching"""
        current_time = time.time()
        if (self._model_validated and 
            current_time - self._last_validation < self._validation_cache_duration):
            return True
            
        try:
            self.client.models.list()
            self._model_validated = True
            self._last_validation = current_time
            return True
        except Exception as e:
            logger.warning(f"Hugging Face model validation failed: {e}")
            return False
            
    def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
        """Implementation of synchronous generation with proper configuration and context injection"""
        # Inject context message with current time/date/weather
        current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
        weather_summary = weather_service.get_weather_summary()
        context_msg = {
            "role": "system",
            "content": f"[Current Context: {current_time} | Weather: {weather_summary}]"
        }
        enhanced_history = [context_msg] + conversation_history
        
        try:
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=enhanced_history,
                max_tokens=8192,  # Set to 8192 as requested
                temperature=0.7,
                top_p=0.9,
                frequency_penalty=0.1,
                presence_penalty=0.1
            )
            return response.choices[0].message.content
        except Exception as e:
            # Handle scale-to-zero behavior
            if self._is_scale_to_zero_error(e):
                logger.info("Hugging Face endpoint is scaling up, waiting...")
                time.sleep(60)  # Wait for endpoint to initialize
                
                # Retry once after waiting
                response = self.client.chat.completions.create(
                    model=self.model_name,
                    messages=enhanced_history,
                    max_tokens=8192,  # Set to 8192 as requested
                    temperature=0.7,
                    top_p=0.9,
                    frequency_penalty=0.1,
                    presence_penalty=0.1
                )
                return response.choices[0].message.content
            else:
                raise
                
    def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
        """Implementation of streaming generation with proper configuration and context injection"""
        # Inject context message with current time/date/weather
        current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
        weather_summary = weather_service.get_weather_summary()
        context_msg = {
            "role": "system",
            "content": f"[Current Context: {current_time} | Weather: {weather_summary}]"
        }
        enhanced_history = [context_msg] + conversation_history
        
        try:
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=enhanced_history,
                max_tokens=8192,  # Set to 8192 as requested
                temperature=0.7,
                top_p=0.9,
                frequency_penalty=0.1,
                presence_penalty=0.1,
                stream=True  # Enable streaming
            )
            chunks = []
            for chunk in response:
                content = chunk.choices[0].delta.content
                if content:
                    chunks.append(content)
            return chunks
        except Exception as e:
            # Handle scale-to-zero behavior
            if self._is_scale_to_zero_error(e):
                logger.info("Hugging Face endpoint is scaling up, waiting...")
                time.sleep(60)  # Wait for endpoint to initialize
                
                # Retry once after waiting
                response = self.client.chat.completions.create(
                    model=self.model_name,
                    messages=enhanced_history,
                    max_tokens=8192,  # Set to 8192 as requested
                    temperature=0.7,
                    top_p=0.9,
                    frequency_penalty=0.1,
                    presence_penalty=0.1,
                    stream=True  # Enable streaming
                )
                chunks = []
                for chunk in response:
                    content = chunk.choices[0].delta.content
                    if content:
                        chunks.append(content)
                return chunks
            else:
                raise
                
    def _is_scale_to_zero_error(self, error: Exception) -> bool:
        """Check if the error is related to scale-to-zero initialization"""
        error_str = str(error).lower()
        scale_to_zero_indicators = [
            "503",
            "service unavailable",
            "initializing",
            "cold start"
        ]
        return any(indicator in error_str for indicator in scale_to_zero_indicators)
        
    def _get_weather_summary(self) -> str:
        """Get formatted weather summary"""
        try:
            weather = weather_service.get_current_weather_cached(
                "New York", 
                ttl_hash=weather_service._get_ttl_hash(300)
            )
            if weather:
                return f"{weather.get('temperature', 'N/A')}°C, {weather.get('description', 'Clear skies')}"
            else:
                return "Clear skies"
        except:
            return "Clear skies"