import gradio as gr from transformers import AutoModelForCTC, AutoTokenizer from huggingface_hub import inference_client from huggingface_hub.modelHub import ModelHubClient model_hub_client = ModelHubClient() inference_client = inference_client() def chatbot(input_text): # Load models model_name = "your-model-name" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCTC.from_pretrained(model_name) # Generate output inputs = tokenizer(input_text, return_tensors="pt") outputs = model(**inputs) output_text = outputs.logits.argmax(-1) # Return output return output_text demov2 = gr.Interface( fn=chatbot, inputs="text", outputs="text", title="My Chatbot", description="A chatbot that uses Hugging Face models to respond to user input" ) demov2.launch()