Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +7 -7
- files/audio.wav +0 -0
- files/avatar.png +0 -0
- files/sample.txt +1 -0
- files/world.mp4 +3 -0
- requirements.txt +2 -0
- run.ipynb +1 -0
- run.py +179 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
files/world.mp4 filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.36.1
|
8 |
-
app_file:
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
|
2 |
---
|
3 |
+
title: chatbot_core_components_main
|
4 |
+
emoji: 🔥
|
5 |
+
colorFrom: indigo
|
6 |
+
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
sdk_version: 4.36.1
|
9 |
+
app_file: run.py
|
10 |
pinned: false
|
11 |
+
hf_oauth: true
|
12 |
---
|
|
|
|
files/audio.wav
ADDED
Binary file (132 kB). View file
|
|
files/avatar.png
ADDED
![]() |
files/sample.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
hello friends
|
files/world.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71944d7430c461f0cd6e7fd10cee7eb72786352a3678fc7bc0ae3d410f72aece
|
3 |
+
size 1570024
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio-client @ git+https://github.com/gradio-app/gradio@bb504b494947a287d6386e0e7ead3860c0f15223#subdirectory=client/python
|
2 |
+
https://gradio-builds.s3.amazonaws.com/bb504b494947a287d6386e0e7ead3860c0f15223/gradio-4.36.1-py3-none-any.whl
|
run.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_core_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/audio.wav https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/audio.wav\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/avatar.png\n", "!wget -q -O files/sample.txt https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/sample.txt\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import plotly.express as px\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(\n", " df,\n", " x=\"sepal_width\",\n", " y=\"sepal_length\",\n", " color=\"species\",\n", " size=\"petal_length\",\n", " hover_data=[\"petal_width\"],\n", " )\n", " return fig\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def random_bokeh_plot():\n", " from bokeh.models import ColumnDataSource, Whisker\n", " from bokeh.plotting import figure\n", " from bokeh.sampledata.autompg2 import autompg2 as df\n", " from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", "\n", "\n", "def random_matplotlib_plot():\n", " import numpy as np\n", " import pandas as pd\n", " import matplotlib.pyplot as plt\n", "\n", " countries = [\"USA\", \"Canada\", \"Mexico\", \"UK\"]\n", " months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n", " m = months.index(\"January\")\n", " r = 3.2\n", " start_day = 30 * m\n", " final_day = 30 * (m + 1)\n", " x = np.arange(start_day, final_day + 1)\n", " pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n", " df = pd.DataFrame({\"day\": x})\n", " for country in countries:\n", " df[country] = x ** (r) * (pop_count[country] + 1)\n", "\n", " fig = plt.figure()\n", " plt.plot(df[\"day\"], df[countries].to_numpy())\n", " plt.title(\"Outbreak in \" + \"January\")\n", " plt.ylabel(\"Cases\")\n", " plt.xlabel(\"Days since Day 0\")\n", " plt.legend(countries)\n", " return fig\n", "\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history, response_type):\n", " if response_type == \"plot\":\n", " history[-1][1] = gr.Plot(random_plot())\n", " elif response_type == \"bokeh_plot\":\n", " history[-1][1] = gr.Plot(random_bokeh_plot())\n", " elif response_type == \"matplotlib_plot\":\n", " history[-1][1] = gr.Plot(random_matplotlib_plot())\n", " elif response_type == \"gallery\":\n", " history[-1][1] = gr.Gallery(\n", " [os.path.join(\"files\", \"avatar.png\"), os.path.join(\"files\", \"avatar.png\")]\n", " )\n", " elif response_type == \"image\":\n", " history[-1][1] = gr.Image(os.path.join(\"files\", \"avatar.png\"))\n", " elif response_type == \"video\":\n", " history[-1][1] = gr.Video(os.path.join(\"files\", \"world.mp4\"))\n", " elif response_type == \"audio\":\n", " history[-1][1] = gr.Audio(os.path.join(\"files\", \"audio.wav\"))\n", " elif response_type == \"audio_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"audio.wav\"), \"description\")\n", " elif response_type == \"image_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"avatar.png\"), \"description\")\n", " elif response_type == \"video_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"world.mp4\"), \"description\")\n", " elif response_type == \"txt_file\":\n", " history[-1][1] = (os.path.join(\"files\", \"sample.txt\"), \"description\")\n", " else:\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " )\n", " response_type = gr.Radio(\n", " [\n", " \"audio_file\",\n", " \"image_file\",\n", " \"video_file\",\n", " \"txt_file\",\n", " \"plot\",\n", " \"matplotlib_plot\",\n", " \"bokeh_plot\",\n", " \"image\",\n", " \"text\",\n", " \"gallery\",\n", " \"video\",\n", " \"audio\",\n", " ],\n", " value=\"text\",\n", " label=\"Response Type\",\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(\n", " bot, [chatbot, response_type], chatbot, api_name=\"bot_response\"\n", " )\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import plotly.express as px
|
4 |
+
|
5 |
+
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
|
6 |
+
|
7 |
+
|
8 |
+
def random_plot():
|
9 |
+
df = px.data.iris()
|
10 |
+
fig = px.scatter(
|
11 |
+
df,
|
12 |
+
x="sepal_width",
|
13 |
+
y="sepal_length",
|
14 |
+
color="species",
|
15 |
+
size="petal_length",
|
16 |
+
hover_data=["petal_width"],
|
17 |
+
)
|
18 |
+
return fig
|
19 |
+
|
20 |
+
|
21 |
+
def print_like_dislike(x: gr.LikeData):
|
22 |
+
print(x.index, x.value, x.liked)
|
23 |
+
|
24 |
+
|
25 |
+
def random_bokeh_plot():
|
26 |
+
from bokeh.models import ColumnDataSource, Whisker
|
27 |
+
from bokeh.plotting import figure
|
28 |
+
from bokeh.sampledata.autompg2 import autompg2 as df
|
29 |
+
from bokeh.transform import factor_cmap, jitter, factor_mark
|
30 |
+
|
31 |
+
classes = list(sorted(df["class"].unique()))
|
32 |
+
|
33 |
+
p = figure(
|
34 |
+
height=400,
|
35 |
+
x_range=classes,
|
36 |
+
background_fill_color="#efefef",
|
37 |
+
title="Car class vs HWY mpg with quintile ranges",
|
38 |
+
)
|
39 |
+
p.xgrid.grid_line_color = None
|
40 |
+
|
41 |
+
g = df.groupby("class")
|
42 |
+
upper = g.hwy.quantile(0.80)
|
43 |
+
lower = g.hwy.quantile(0.20)
|
44 |
+
source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))
|
45 |
+
|
46 |
+
error = Whisker(
|
47 |
+
base="base",
|
48 |
+
upper="upper",
|
49 |
+
lower="lower",
|
50 |
+
source=source,
|
51 |
+
level="annotation",
|
52 |
+
line_width=2,
|
53 |
+
)
|
54 |
+
error.upper_head.size = 20
|
55 |
+
error.lower_head.size = 20
|
56 |
+
p.add_layout(error)
|
57 |
+
|
58 |
+
p.circle(
|
59 |
+
jitter("class", 0.3, range=p.x_range),
|
60 |
+
"hwy",
|
61 |
+
source=df,
|
62 |
+
alpha=0.5,
|
63 |
+
size=13,
|
64 |
+
line_color="white",
|
65 |
+
color=factor_cmap("class", "Light6", classes),
|
66 |
+
)
|
67 |
+
return p
|
68 |
+
|
69 |
+
|
70 |
+
def random_matplotlib_plot():
|
71 |
+
import numpy as np
|
72 |
+
import pandas as pd
|
73 |
+
import matplotlib.pyplot as plt
|
74 |
+
|
75 |
+
countries = ["USA", "Canada", "Mexico", "UK"]
|
76 |
+
months = ["January", "February", "March", "April", "May"]
|
77 |
+
m = months.index("January")
|
78 |
+
r = 3.2
|
79 |
+
start_day = 30 * m
|
80 |
+
final_day = 30 * (m + 1)
|
81 |
+
x = np.arange(start_day, final_day + 1)
|
82 |
+
pop_count = {"USA": 350, "Canada": 40, "Mexico": 300, "UK": 120}
|
83 |
+
df = pd.DataFrame({"day": x})
|
84 |
+
for country in countries:
|
85 |
+
df[country] = x ** (r) * (pop_count[country] + 1)
|
86 |
+
|
87 |
+
fig = plt.figure()
|
88 |
+
plt.plot(df["day"], df[countries].to_numpy())
|
89 |
+
plt.title("Outbreak in " + "January")
|
90 |
+
plt.ylabel("Cases")
|
91 |
+
plt.xlabel("Days since Day 0")
|
92 |
+
plt.legend(countries)
|
93 |
+
return fig
|
94 |
+
|
95 |
+
|
96 |
+
def add_message(history, message):
|
97 |
+
for x in message["files"]:
|
98 |
+
history.append(((x,), None))
|
99 |
+
if message["text"] is not None:
|
100 |
+
history.append((message["text"], None))
|
101 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
102 |
+
|
103 |
+
|
104 |
+
def bot(history, response_type):
|
105 |
+
if response_type == "plot":
|
106 |
+
history[-1][1] = gr.Plot(random_plot())
|
107 |
+
elif response_type == "bokeh_plot":
|
108 |
+
history[-1][1] = gr.Plot(random_bokeh_plot())
|
109 |
+
elif response_type == "matplotlib_plot":
|
110 |
+
history[-1][1] = gr.Plot(random_matplotlib_plot())
|
111 |
+
elif response_type == "gallery":
|
112 |
+
history[-1][1] = gr.Gallery(
|
113 |
+
[os.path.join("files", "avatar.png"), os.path.join("files", "avatar.png")]
|
114 |
+
)
|
115 |
+
elif response_type == "image":
|
116 |
+
history[-1][1] = gr.Image(os.path.join("files", "avatar.png"))
|
117 |
+
elif response_type == "video":
|
118 |
+
history[-1][1] = gr.Video(os.path.join("files", "world.mp4"))
|
119 |
+
elif response_type == "audio":
|
120 |
+
history[-1][1] = gr.Audio(os.path.join("files", "audio.wav"))
|
121 |
+
elif response_type == "audio_file":
|
122 |
+
history[-1][1] = (os.path.join("files", "audio.wav"), "description")
|
123 |
+
elif response_type == "image_file":
|
124 |
+
history[-1][1] = (os.path.join("files", "avatar.png"), "description")
|
125 |
+
elif response_type == "video_file":
|
126 |
+
history[-1][1] = (os.path.join("files", "world.mp4"), "description")
|
127 |
+
elif response_type == "txt_file":
|
128 |
+
history[-1][1] = (os.path.join("files", "sample.txt"), "description")
|
129 |
+
else:
|
130 |
+
history[-1][1] = "Cool!"
|
131 |
+
return history
|
132 |
+
|
133 |
+
|
134 |
+
fig = random_plot()
|
135 |
+
|
136 |
+
with gr.Blocks(fill_height=True) as demo:
|
137 |
+
chatbot = gr.Chatbot(
|
138 |
+
elem_id="chatbot",
|
139 |
+
bubble_full_width=False,
|
140 |
+
scale=1,
|
141 |
+
)
|
142 |
+
response_type = gr.Radio(
|
143 |
+
[
|
144 |
+
"audio_file",
|
145 |
+
"image_file",
|
146 |
+
"video_file",
|
147 |
+
"txt_file",
|
148 |
+
"plot",
|
149 |
+
"matplotlib_plot",
|
150 |
+
"bokeh_plot",
|
151 |
+
"image",
|
152 |
+
"text",
|
153 |
+
"gallery",
|
154 |
+
"video",
|
155 |
+
"audio",
|
156 |
+
],
|
157 |
+
value="text",
|
158 |
+
label="Response Type",
|
159 |
+
)
|
160 |
+
|
161 |
+
chat_input = gr.MultimodalTextbox(
|
162 |
+
interactive=True,
|
163 |
+
placeholder="Enter message or upload file...",
|
164 |
+
show_label=False,
|
165 |
+
)
|
166 |
+
|
167 |
+
chat_msg = chat_input.submit(
|
168 |
+
add_message, [chatbot, chat_input], [chatbot, chat_input]
|
169 |
+
)
|
170 |
+
bot_msg = chat_msg.then(
|
171 |
+
bot, [chatbot, response_type], chatbot, api_name="bot_response"
|
172 |
+
)
|
173 |
+
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
174 |
+
|
175 |
+
chatbot.like(print_like_dislike, None, None)
|
176 |
+
|
177 |
+
demo.queue()
|
178 |
+
if __name__ == "__main__":
|
179 |
+
demo.launch()
|