File size: 14,750 Bytes
0d34ea8
 
 
 
 
 
 
 
 
 
 
 
 
0e1d4ae
0d34ea8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e1d4ae
 
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
 
0e1d4ae
0d34ea8
0e1d4ae
 
 
 
 
 
0d34ea8
0e1d4ae
 
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
 
0e1d4ae
0d34ea8
0e1d4ae
0d34ea8
0e1d4ae
 
 
 
 
0d34ea8
0e1d4ae
 
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
0e1d4ae
0d34ea8
 
 
0e1d4ae
0d34ea8
0e1d4ae
0d34ea8
0e1d4ae
 
 
 
0d34ea8
0e1d4ae
 
0d34ea8
0e1d4ae
 
0d34ea8
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
"""
GPU monitoring service for Video Model Studio.
Tracks NVIDIA GPU resources like utilization, memory, and temperature.
"""

import os
import time
import logging
from typing import Dict, List, Any, Optional, Tuple
from collections import deque
from datetime import datetime

import numpy as np
import pandas as pd

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

# Optional import of pynvml
try:
    import pynvml
    PYNVML_AVAILABLE = True
except ImportError:
    PYNVML_AVAILABLE = False
    logger.info("pynvml not available, GPU monitoring will be limited")

class GPUMonitoringService:
    """Service for monitoring NVIDIA GPU resources"""
    
    def __init__(self, history_minutes: int = 10, sample_interval: int = 5):
        """Initialize the GPU monitoring service
        
        Args:
            history_minutes: How many minutes of history to keep
            sample_interval: How many seconds between samples
        """
        self.history_minutes = history_minutes
        self.sample_interval = sample_interval
        self.max_samples = (history_minutes * 60) // sample_interval
        
        # Track if the monitoring thread is running
        self.is_running = False
        self.thread = None
        
        # Check if NVIDIA GPUs are available
        self.has_nvidia_gpus = False
        self.gpu_count = 0
        self.device_info = []
        self.history = {}
        
        # Try to initialize NVML
        self._initialize_nvml()
        
        # Initialize history data structures if GPUs are available
        if self.has_nvidia_gpus:
            self._initialize_history()
    
    def _initialize_nvml(self):
        """Initialize NVIDIA Management Library"""
        if not PYNVML_AVAILABLE:
            logger.info("pynvml module not installed, GPU monitoring disabled")
            return
            
        try:
            pynvml.nvmlInit()
            self.gpu_count = pynvml.nvmlDeviceGetCount()
            self.has_nvidia_gpus = self.gpu_count > 0
            
            if self.has_nvidia_gpus:
                logger.info(f"Successfully initialized NVML, found {self.gpu_count} GPU(s)")
                # Get static information about each GPU
                for i in range(self.gpu_count):
                    self.device_info.append(self._get_device_info(i))
            else:
                logger.info("No NVIDIA GPUs found")
                
        except Exception as e:
            logger.warning(f"Failed to initialize NVML: {str(e)}")
            self.has_nvidia_gpus = False
    
    def _initialize_history(self):
        """Initialize data structures for storing metric history"""
        for i in range(self.gpu_count):
            self.history[i] = {
                'timestamps': deque(maxlen=self.max_samples),
                'utilization': deque(maxlen=self.max_samples),
                'memory_used': deque(maxlen=self.max_samples),
                'memory_total': deque(maxlen=self.max_samples),
                'memory_percent': deque(maxlen=self.max_samples),
                'temperature': deque(maxlen=self.max_samples),
                'power_usage': deque(maxlen=self.max_samples),
                'power_limit': deque(maxlen=self.max_samples),
            }
    
    def _get_device_info(self, device_index: int) -> Dict[str, Any]:
        """Get static information about a GPU device
        
        Args:
            device_index: Index of the GPU device
            
        Returns:
            Dictionary with device information
        """
        if not PYNVML_AVAILABLE or not self.has_nvidia_gpus:
            return {"error": "NVIDIA GPUs not available"}
            
        try:
            handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
            
            # Get device name (decode if it's bytes)
            name = pynvml.nvmlDeviceGetName(handle)
            if isinstance(name, bytes):
                name = name.decode('utf-8')
                
            # Get device UUID
            uuid = pynvml.nvmlDeviceGetUUID(handle)
            if isinstance(uuid, bytes):
                uuid = uuid.decode('utf-8')
                
            # Get memory info, compute capability
            memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            compute_capability = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
            
            # Get power limits if available
            try:
                power_limit = pynvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000.0  # Convert to watts
            except pynvml.NVMLError:
                power_limit = None
                
            return {
                'index': device_index,
                'name': name,
                'uuid': uuid,
                'memory_total': memory_info.total,
                'memory_total_gb': memory_info.total / (1024**3),  # Convert to GB
                'compute_capability': f"{compute_capability[0]}.{compute_capability[1]}",
                'power_limit': power_limit
            }
            
        except Exception as e:
            logger.error(f"Error getting device info for GPU {device_index}: {str(e)}")
            return {"error": str(e), "index": device_index}
    
    def collect_gpu_metrics(self) -> List[Dict[str, Any]]:
        """Collect current GPU metrics for all available GPUs
        
        Returns:
            List of dictionaries with current metrics for each GPU
        """
        if not PYNVML_AVAILABLE or not self.has_nvidia_gpus:
            return []
            
        metrics = []
        timestamp = datetime.now()
        
        for i in range(self.gpu_count):
            try:
                handle = pynvml.nvmlDeviceGetHandleByIndex(i)
                
                # Get utilization rates
                utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
                
                # Get memory information
                memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                
                # Get temperature
                temperature = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
                
                # Get power usage if available
                try:
                    power_usage = pynvml.nvmlDeviceGetPowerUsage(handle) / 1000.0  # Convert to watts
                except pynvml.NVMLError:
                    power_usage = None
                
                # Get process information
                processes = []
                try:
                    for proc in pynvml.nvmlDeviceGetComputeRunningProcesses(handle):
                        try:
                            process_name = pynvml.nvmlSystemGetProcessName(proc.pid)
                            if isinstance(process_name, bytes):
                                process_name = process_name.decode('utf-8')
                        except pynvml.NVMLError:
                            process_name = f"Unknown (PID: {proc.pid})"
                            
                        processes.append({
                            'pid': proc.pid,
                            'name': process_name,
                            'memory_used': proc.usedGpuMemory,
                            'memory_used_mb': proc.usedGpuMemory / (1024**2)  # Convert to MB
                        })
                except pynvml.NVMLError:
                    # Unable to get process information, continue with empty list
                    pass
                
                gpu_metrics = {
                    'index': i,
                    'timestamp': timestamp,
                    'utilization_gpu': utilization.gpu,
                    'utilization_memory': utilization.memory,
                    'memory_total': memory_info.total,
                    'memory_used': memory_info.used,
                    'memory_free': memory_info.free,
                    'memory_percent': (memory_info.used / memory_info.total) * 100,
                    'temperature': temperature,
                    'power_usage': power_usage,
                    'processes': processes
                }
                
                metrics.append(gpu_metrics)
                
            except Exception as e:
                logger.error(f"Error collecting metrics for GPU {i}: {str(e)}")
                metrics.append({
                    'index': i,
                    'error': str(e)
                })
        
        return metrics
    
    def update_history(self):
        """Update GPU metrics history"""
        if not self.has_nvidia_gpus:
            return
            
        current_metrics = self.collect_gpu_metrics()
        timestamp = datetime.now()
        
        for gpu_metrics in current_metrics:
            if 'error' in gpu_metrics:
                continue
                
            idx = gpu_metrics['index']
            
            self.history[idx]['timestamps'].append(timestamp)
            self.history[idx]['utilization'].append(gpu_metrics['utilization_gpu'])
            self.history[idx]['memory_used'].append(gpu_metrics['memory_used'])
            self.history[idx]['memory_total'].append(gpu_metrics['memory_total'])
            self.history[idx]['memory_percent'].append(gpu_metrics['memory_percent'])
            self.history[idx]['temperature'].append(gpu_metrics['temperature'])
            
            if gpu_metrics['power_usage'] is not None:
                self.history[idx]['power_usage'].append(gpu_metrics['power_usage'])
            else:
                self.history[idx]['power_usage'].append(0)
                
            # Store power limit in history (static but kept for consistency)
            info = self.device_info[idx]
            if 'power_limit' in info and info['power_limit'] is not None:
                self.history[idx]['power_limit'].append(info['power_limit'])
            else:
                self.history[idx]['power_limit'].append(0)
    
    def start_monitoring(self):
        """Start background thread for collecting GPU metrics"""
        if self.is_running:
            logger.warning("GPU monitoring thread already running")
            return
            
        if not self.has_nvidia_gpus:
            logger.info("No NVIDIA GPUs found, not starting monitoring thread")
            return
            
        import threading
        
        self.is_running = True
        
        def _monitor_loop():
            while self.is_running:
                try:
                    self.update_history()
                    time.sleep(self.sample_interval)
                except Exception as e:
                    logger.error(f"Error in GPU monitoring thread: {str(e)}", exc_info=True)
                    time.sleep(self.sample_interval)
        
        self.thread = threading.Thread(target=_monitor_loop, daemon=True)
        self.thread.start()
        logger.info("GPU monitoring thread started")
    
    def stop_monitoring(self):
        """Stop the GPU monitoring thread"""
        if not self.is_running:
            return
            
        self.is_running = False
        if self.thread:
            self.thread.join(timeout=1.0)
            logger.info("GPU monitoring thread stopped")
    
    def get_gpu_info(self) -> List[Dict[str, Any]]:
        """Get information about all available GPUs
        
        Returns:
            List of dictionaries with GPU information
        """
        return self.device_info
    
    def get_current_metrics(self) -> List[Dict[str, Any]]:
        """Get current metrics for all GPUs
        
        Returns:
            List of dictionaries with current GPU metrics
        """
        return self.collect_gpu_metrics()
    
    def get_utilization_data(self, gpu_index: int) -> pd.DataFrame:
        """Get utilization data as a DataFrame
        
        Args:
            gpu_index: Index of the GPU to get data for
            
        Returns:
            DataFrame with time, utilization, and temperature
        """
        if not self.has_nvidia_gpus or gpu_index not in self.history:
            return pd.DataFrame()
            
        history = self.history[gpu_index]
        if not history['timestamps']:
            return pd.DataFrame()
            
        # Convert to dataframe
        return pd.DataFrame({
            'time': list(history['timestamps']),
            'GPU Utilization (%)': list(history['utilization']),
            'Temperature (°C)': list(history['temperature'])
        })
        
    def get_memory_data(self, gpu_index: int) -> pd.DataFrame:
        """Get memory data as a DataFrame
        
        Args:
            gpu_index: Index of the GPU to get data for
            
        Returns:
            DataFrame with time, memory percent, and memory used
        """
        if not self.has_nvidia_gpus or gpu_index not in self.history:
            return pd.DataFrame()
            
        history = self.history[gpu_index]
        if not history['timestamps']:
            return pd.DataFrame()
            
        # Convert to dataframe
        memory_used_gb = [m / (1024**3) for m in history['memory_used']]
        return pd.DataFrame({
            'time': list(history['timestamps']),
            'Memory Usage (%)': list(history['memory_percent']),
            'Memory Used (GB)': memory_used_gb
        })
    
    def get_power_data(self, gpu_index: int) -> pd.DataFrame:
        """Get power data as a DataFrame
        
        Args:
            gpu_index: Index of the GPU to get data for
            
        Returns:
            DataFrame with time and power usage
        """
        if not self.has_nvidia_gpus or gpu_index not in self.history:
            return pd.DataFrame()
            
        history = self.history[gpu_index]
        if not history['timestamps'] or not any(history['power_usage']):
            return pd.DataFrame()
            
        power_limit = max(history['power_limit']) if any(history['power_limit']) else None
        
        df = pd.DataFrame({
            'time': list(history['timestamps']),
            'Power Usage (W)': list(history['power_usage'])
        })
        
        if power_limit and power_limit > 0:
            df['Power Limit (W)'] = power_limit
        
        return df
            
    def shutdown(self):
        """Clean up resources when shutting down"""
        self.stop_monitoring()
        
        # Shutdown NVML if it was initialized
        if PYNVML_AVAILABLE and self.has_nvidia_gpus:
            try:
                pynvml.nvmlShutdown()
                logger.info("NVML shutdown complete")
            except Exception as e:
                logger.error(f"Error during NVML shutdown: {str(e)}")