|
import gradio as gr |
|
import psutil |
|
import torch |
|
|
|
|
|
def get_available_memory(): |
|
if torch.cuda.is_available(): |
|
total_memory = torch.cuda.get_device_properties(0).total_memory / (1024 ** 3) |
|
return total_memory, "GPU RAM" |
|
else: |
|
total_memory = psutil.virtual_memory().total / (1024 ** 3) |
|
return total_memory, "System RAM" |
|
|
|
total_memory, memory_type = get_available_memory() |
|
|
|
|
|
max_instances = int(total_memory // 4.5) |
|
|
|
|
|
def update_usage(num_instances): |
|
if num_instances > max_instances: |
|
num_instances = max_instances |
|
warning = f"⚠️ You tried to exceed the available {memory_type}. Max instances set to {max_instances}." |
|
else: |
|
warning = "" |
|
|
|
usage = num_instances * 4.5 |
|
usage_percentage = min((usage / total_memory) * 100, 100) |
|
|
|
|
|
bar_html = f""" |
|
<div style="width: 100%; background-color: #d6e6f2; border-radius: 12px; overflow: hidden; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);"> |
|
<div style="width: {usage_percentage}%; background: linear-gradient(to right, #1e3c72, #2a5298); height: 30px; transition: width 0.3s;"></div> |
|
</div> |
|
""" |
|
memory_text = f"{usage:.2f} GB / {total_memory:.2f} GB ({memory_type})" |
|
|
|
return num_instances, memory_text, bar_html, warning |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(f"# Memory Usage Tracker ({memory_type})") |
|
|
|
slider = gr.Slider(minimum=1, maximum=max_instances, step=1, value=1, label="Number of Instances") |
|
memory_display = gr.Label(value=f"0.00 GB / {total_memory:.2f} GB ({memory_type})") |
|
progress_bar = gr.HTML(value="<div style='height: 30px;'></div>") |
|
warning_message = gr.HTML(value="") |
|
|
|
def handle_slider_change(num_instances): |
|
|
|
num_instances, memory_text, bar_html, warning = update_usage(num_instances) |
|
return num_instances, memory_text, bar_html, warning |
|
|
|
|
|
slider.change(handle_slider_change, inputs=[slider], outputs=[slider, memory_display, progress_bar, warning_message]) |
|
|
|
demo.launch() |
|
|