File size: 1,305 Bytes
3f8013c
352c3dc
a2f89f6
3f8013c
a2f89f6
3f8013c
4592799
4df8aca
a2f89f6
3f8013c
 
 
a2f89f6
 
 
 
 
 
 
 
 
3f8013c
a2f89f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f8013c
 
a2f89f6
 
 
4592799
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
import os

# Initialize the model using the secret token
llm = HuggingFaceEndpoint(
    repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
    task="text-generation",
    huggingfacehub_api_token=os.environ.get('HF_TOKEN')  # Access token from environment secret
)
model = ChatHuggingFace(llm=llm)

def process_query(query):
    """
    Process the user query and return the model's response
    """
    try:
        result = model.invoke(query)
        return result.content
    except Exception as e:
        return f"An error occurred: {str(e)}"

# Create the Gradio interface
interface = gr.Interface(
    fn=process_query,
    inputs=gr.Textbox(
        lines=2,
        placeholder="Enter your question here...",
        label="Question"
    ),
    outputs=gr.Textbox(
        label="Answer",
        lines=5
    ),
    title="TinyLlama Chat Assistant",
    description="Ask any question and get answers from TinyLlama-1.1B-Chat model",
    examples=[
        ["Who is the Prime Minister of India?"],
        ["What is artificial intelligence?"],
        ["Explain quantum computing in simple terms."]
    ],
    theme=gr.themes.Soft()
)

# Launch the interface
if __name__ == "__main__":
    interface.launch()