Spaces:
Runtime error
Runtime error
File size: 1,794 Bytes
016174f 0d32d53 d32a13b 9f15b87 61aa908 d87df45 016174f f9aa63c 6fd100a 0eecf08 ce67b54 e23f987 0eecf08 e23f987 0eecf08 20edf29 016174f e0250ec 9f15b87 0d32d53 016174f 0eecf08 016174f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import argparse
import gradio as gr
from ui import chat
import os
from dotenv import load_dotenv
load_dotenv()
USERNAME = os.getenv("USERNAME")
PWD = os.getenv("USER_PWD")
def main(args):
demo = gr.ChatInterface(
fn=chat,
examples=["Explain the AI adoption challenges for enterprises.", "How can we identify a fraud transaction?", "Por que os grandes modelos de linguagem de AI halucinam?"],
title="Chat and LLM server in the same application",
description="This space is a template that we can duplicate for your own usage. "
"This space let you build LLM powered idea on top of [Gradio](https://www.gradio.app/) "
"and open LLM served locally by [TGI(Text Generation Inference)](https://huggingface.co/docs/text-generation-inference/en/index). "
"Below is a placeholder Gradio ChatInterface for you to try out Mistral-7B backed by the power of TGI's efficiency. \n\n"
"To use this space for your own usecase, follow the simple steps below:\n"
"1. Duplicate this space. \n"
"2. Set which LLM you wish to use (i.e. mistralai/Mistral-7B-Instruct-v0.2). \n"
"3. Inside app/main.py write Gradio application. \n",
multimodal=False,
theme='sudeepshouche/minimalist',
)
demo.queue(
default_concurrency_limit=20,
max_size=256
).launch(auth=(USERNAME, PWD), server_name="0.0.0.0", server_port=args.port)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A MAGIC example by ConceptaTech")
parser.add_argument("--port", type=int, default=7860, help="Port to expose Gradio app")
args = parser.parse_args()
main(args)
|