|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
MODEL_NAME = "segolilylabs/Lily-Cybersecurity-7B-v0.2" |
|
|
|
@st.cache_resource() |
|
def load_model(): |
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
|
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto") |
|
return tokenizer, model |
|
|
|
tokenizer, model = load_model() |
|
|
|
st.title("π Cybersecurity Chatbot") |
|
|
|
user_input = st.text_input("Ask a cybersecurity question:") |
|
|
|
if user_input: |
|
input_ids = tokenizer.encode(user_input, return_tensors="pt").to("cuda") |
|
output = model.generate(input_ids, max_length=200) |
|
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True) |
|
|
|
st.write("π€", response) |
|
|