import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_name = "georgesung/llama2_7b_chat_uncensored" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto" ) def chat(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=256, do_sample=True, temperature=0.7, top_p=0.9 ) return tokenizer.decode(outputs[0], skip_special_tokens=True) demo = gr.Interface( fn=chat, inputs=gr.Textbox(lines=3, placeholder="Type your message here..."), outputs="text", title="Llama2 7B Uncensored Chatbot" ) demo.launch()