luck210 commited on
Commit
07335a7
·
verified ·
1 Parent(s): da08cf4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +847 -587
app.py CHANGED
@@ -1,606 +1,866 @@
1
- from fastapi import FastAPI, HTTPException
2
- from fastapi.responses import HTMLResponse
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- app = FastAPI(title="Chatbot")
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Load the chatbot model (DialoGPT-medium) at startup
8
- model_name = "microsoft/DialoGPT-medium"
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Function to generate chatbot response
13
- def get_chatbot_response(user_input: str, max_length=1000):
14
- if not user_input:
15
- return "Please say something!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
18
- chat_history_ids = model.generate(
19
- input_ids,
20
- max_length=max_length,
21
- pad_token_id=tokenizer.eos_token_id,
22
- no_repeat_ngram_size=3,
23
- do_sample=True,
24
- top_k=50,
25
- top_p=0.95,
26
- temperature=0.8
27
- )
28
- response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
29
- return response.strip()
30
-
31
- # HTML, CSS, and JS with two modes (dark and soft)
32
- HTML_CONTENT = """
33
- <!DOCTYPE html>
34
- <html lang="en">
35
- <head>
36
- <meta charset="UTF-8">
37
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
38
- <meta name="description" content="Vion IA: Your friendly AI chatbot powered by xAI">
39
- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/github-dark.min.css">
40
- <link href='https://unpkg.com/boxicons@2.1.4/css/boxicons.min.css' rel='stylesheet'>
41
- <title>Vion IA | Powered by xAI</title>
42
- <style>
43
- @import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@300;400;600;800&display=swap');
44
- * { margin: 0; padding: 0; outline: none; box-sizing: border-box; font-family: "Open Sans", sans-serif; }
45
- :root {
46
- --primary-color: #0A0A0B; --secondary-color: #1A1B1E; --secondary-hover-color: #2A2B2E;
47
- --focus-color: #242528; --focus-hover-color: #343538; --button-hover-color: #252627;
48
- --text-color: #FFFFFF; --text-secondary-color: #A0A1A3; --heading-secondary-color: #606162;
49
- --placeholder-color: #78797A; --accent-color: #00A3FF; --star-color: #FFFFFF;
50
- --background-color: #000; --error-color: #FF4D4D; --success-color: #4CAF50;
51
- }
52
- .soft_mode {
53
- --primary-color: rgb(220, 230, 240); /* Soft pastel blue */
54
- --secondary-color: rgb(235, 240, 245); /* Lighter pastel */
55
- --secondary-hover-color: rgb(200, 210, 225);
56
- --focus-color: rgb(245, 245, 250); --focus-hover-color: rgb(230, 230, 235);
57
- --button-hover-color: rgb(210, 220, 230);
58
- --text-color: rgb(40, 50, 60); --text-secondary-color: rgb(90, 100, 110);
59
- --heading-secondary-color: rgb(150, 160, 170);
60
- --placeholder-color: rgb(120, 130, 140); --accent-color: rgb(100, 150, 255); /* Softer blue */
61
- --star-color: rgb(180, 190, 200); --background-color: rgb(240, 245, 250); /* Very light pastel */
62
- --error-color: rgb(255, 100, 100); --success-color: rgb(100, 200, 100);
63
- }
64
- body {
65
- min-height: 100vh; display: flex; flex-direction: row; justify-content: space-between;
66
- background: var(--background-color); position: relative; overflow-y: hidden; transition: background 0.3s ease;
67
- }
68
- .stars {
69
- position: fixed; width: 100%; height: 100%;
70
- background: url('https://www.transparenttextures.com/patterns/stardust.png');
71
- animation: starsAnim 100s linear infinite; z-index: -1; transition: opacity 0.3s ease; opacity: 0.8;
72
- }
73
- .soft_mode .stars {
74
- background: url('https://www.transparenttextures.com/patterns/stardust.png') rgba(240, 245, 250, 0.4);
75
- opacity: 0.3;
76
- }
77
- @keyframes starsAnim { from { background-position: 0 0; } to { background-position: 10000px 10000px; } }
78
- @keyframes pulse { 0% { transform: scale(1); } 50% { transform: scale(1.1); } 100% { transform: scale(1); } }
79
- @keyframes fadeIn { from { opacity: 0; transform: translateY(10px); } to { opacity: 1; transform: translateY(0); } }
80
- .sidebar {
81
- width: 70px; height: 100vh; background: rgba(26, 27, 30, 0.95); display: flex; flex-direction: column;
82
- align-items: center; padding: 1rem 0; position: fixed; left: 0; top: 0; transition: width 0.3s ease; z-index: 1001;
83
- }
84
- .sidebar:hover { width: 200px; }
85
- .soft_mode .sidebar { background: rgba(235, 240, 245, 0.95); }
86
- .sidebar__item {
87
- width: 100%; padding: 1rem; color: var(--text-secondary-color); text-decoration: none;
88
- display: flex; align-items: center; gap: 1rem; transition: all 0.3s ease; position: relative;
89
- }
90
- .sidebar__item:hover, .sidebar__item:focus {
91
- background: var(--secondary-hover-color); color: var(--accent-color); padding-left: 1.5rem; transform: scale(1.05);
92
- }
93
- .sidebar__item i { font-size: 1.5rem; }
94
- .sidebar__item span { display: none; font-size: 1rem; }
95
- .sidebar:hover .sidebar__item span { display: inline; }
96
- .tooltip {
97
- visibility: hidden; background: var(--secondary-color); color: var(--text-color); font-size: 0.8rem;
98
- padding: 0.5rem; border-radius: 0.3rem; position: absolute; top: -30px; left: 50%; transform: translateX(-50%);
99
- white-space: nowrap; z-index: 1002; transition: visibility 0.2s, opacity 0.2s; opacity: 0;
100
- }
101
- .sidebar__item:hover .tooltip, .sidebar__item:focus .tooltip { visibility: visible; opacity: 1; }
102
- .main-content {
103
- flex: 1; display: flex; flex-direction: column; padding-bottom: 100px; padding-top: 2rem; margin-left: 70px;
104
- height: 50vh; overflow: hidden;
105
- }
106
- .header { max-width: 900px; text-align: center; padding: 0 2rem; margin: 0 auto; }
107
- .header__title h1 {
108
- color: var(--text-color); font-size: 3.5rem; font-weight: 800; margin-bottom: 1rem;
109
- text-shadow: 0 0 10px rgba(0, 163, 255, 0.5); animation: fadeIn 1s ease-in;
110
- }
111
- .header__title h2 {
112
- color: var(--text-secondary-color); font-size: 1.5rem; font-weight: 400;
113
- max-width: 600px; margin: 0 auto; text-shadow: 0 0 5px rgba(0, 0, 0, 0.3);
114
- transition: opacity 0.3s ease, height 0.3s ease;
115
- }
116
- .suggests {
117
- display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
118
- margin: 2rem auto; max-width: 900px; gap: 1rem; padding: 0 2rem; transition: opacity 0.3s ease, height 0.3s ease;
119
- animation: fadeIn 0.5s ease-in;
120
- }
121
- .suggests.hidden, .header__title h2.hidden {
122
- opacity: 0; height: 0; margin: 0; overflow: hidden;
123
- }
124
- .suggests__item {
125
- background: rgba(26, 27, 30, 0.9); color: var(--text-secondary-color); padding: 1.5rem;
126
- border-radius: 0.5rem; cursor: pointer; transition: all 0.3s ease; border: 1px solid var(--focus-color);
127
- position: relative;
128
- }
129
- .soft_mode .suggests__item { background: rgba(235, 240, 245, 0.9); }
130
- .suggests__item:hover, .suggests__item:focus {
131
- background: var(--secondary-hover-color); border-color: var(--accent-color); color: var(--text-color);
132
- transform: translateY(-3px);
133
- }
134
- .suggests__item-icon { margin-top: 1rem; color: var(--accent-color); transition: transform 0.2s ease; }
135
- .suggests__item:hover .suggests__item-icon, .suggests__item:focus .suggests__item-icon { transform: scale(1.2); }
136
- .suggests__item .tooltip { top: -40px; left: 50%; transform: translateX(-50%); }
137
- .suggests__item:hover .tooltip, .suggests__item:focus .tooltip { visibility: visible; opacity: 1; }
138
- .prompt {
139
- position: fixed; background: rgba(10, 10, 11, 0.9); z-index: 1000; width: calc(100% - 70px);
140
- left: 70px; bottom: 0; padding: 1rem; border-top: 1px solid var(--secondary-color); transition: all 0.3s ease;
141
- }
142
- .soft_mode .prompt { background: rgba(235, 240, 245, 0.9); border-top: 1px solid var(--focus-color); }
143
- .prompt__input-wrapper {
144
- max-width: 900px; margin: 0 auto; position: relative; display: flex; align-items: center;
145
- background: var(--secondary-color); border: 1px solid var(--focus-color); border-radius: 0.5rem;
146
- padding: 0.2rem; transition: all 0.3s ease; animation: fadeIn 0.5s ease-in;
147
- }
148
- .prompt__input-wrapper:focus-within {
149
- border-color: var(--accent-color); background: var(--focus-color); transform: scale(1.02);
150
- }
151
- .prompt__input-wrapper.dragover {
152
- border: 2px dashed var(--accent-color); background: var(--focus-hover-color);
153
- }
154
- .prompt__form-input {
155
- flex-grow: 1; border: none; resize: none; font-size: 1.1rem; color: var(--text-color);
156
- padding: 0.3rem 0.5rem; background: transparent; outline: none; transition: all 0.3s ease;
157
- }
158
- .prompt__form-input::placeholder { color: var(--placeholder-color); transition: opacity 0.3s ease; }
159
- .prompt__form-input:focus::placeholder { opacity: 0.5; }
160
- .prompt__action-buttons {
161
- display: flex; align-items: center; gap: 0.3rem; padding-right: 0.3rem; position: relative;
162
- }
163
- .prompt__action-buttons.advanced { display: none; }
164
- .prompt__action-buttons.advanced.active { display: flex; }
165
- .prompt__form-button {
166
- background: none; border: none; color: var(--text-secondary-color); font-size: 1.3rem;
167
- cursor: pointer; padding: 0.3rem; transition: all 0.3s ease; position: relative;
168
- }
169
- .prompt__form-button:hover, .prompt__form-button:focus { color: var(--accent-color); transform: scale(1.1); }
170
- .prompt__form-button.send { font-size: 1.5rem; }
171
- .prompt__form-button .tooltip { top: -35px; left: 50%; transform: translateX(-50%); }
172
- .prompt__form-button:hover .tooltip, .prompt__form-button:focus .tooltip { visibility: visible; opacity: 1; }
173
- .prompt__disclaim {
174
- text-align: center; color: var(--placeholder-color); font-size: 0.8rem; margin-top: 1rem;
175
- max-width: 900px; margin-left: auto; margin-right: auto; transition: opacity 0.3s ease;
176
- }
177
- .chat-bar {
178
- max-width: 900px; margin: 2rem auto; padding: 0 2rem; display: flex; flex-direction: column;
179
- overflow-y: auto; max-height: calc(100vh - 180px); -ms-overflow-style: none; scrollbar-width: none;
180
- }
181
- .chat-bar::-webkit-scrollbar { display: none; }
182
- .chat-message {
183
- margin-bottom: 1rem; padding: 1rem; border-radius: 0.5rem; background: rgba(26, 27, 30, 0.9);
184
- color: var(--text-color); word-wrap: break-word; animation: fadeIn 0.3s ease-in; position: relative;
185
- }
186
- .soft_mode .chat-message { background: rgba(235, 240, 245, 0.9); }
187
- .chat-message.user {
188
- background: rgba(122, 92, 250, 0.2); border: 1px solid var(--accent-color); border-radius: 0.5rem;
189
- }
190
- .chat-message.bot { background: rgba(36, 37, 40, 0.9); }
191
- .soft_mode .chat-message.bot { background: rgba(245, 245, 250, 0.9); }
192
- .chat-message.user.bubble-rounded { border-radius: 1rem; }
193
- .chat-message.user.bubble-sharp { border-radius: 0; border: 2px solid var(--accent-color); }
194
- .chat-message.user.bubble-starry {
195
- border-radius: 0.5rem; border: 1px dashed var(--accent-color);
196
- background: rgba(122, 92, 250, 0.2) url('https://www.transparenttextures.com/patterns/stardust.png') repeat;
197
- background-size: 100px 100px;
198
- }
199
- .chat-message.feedback::after {
200
- content: 'Was this helpful?'; color: var(--text-secondary-color); font-size: 0.8rem; display: block;
201
- margin-top: 0.5rem; cursor: pointer; text-decoration: underline;
202
- }
203
- .chat-message.feedback .feedback-options {
204
- display: none; position: absolute; bottom: -30px; left: 1rem; gap: 0.5rem;
205
- }
206
- .chat-message.feedback:hover .feedback-options { display: flex; }
207
- .feedback-options button {
208
- background: none; border: none; color: var(--text-secondary-color); font-size: 1rem; cursor: pointer;
209
- transition: color 0.2s ease;
210
- }
211
- .feedback-options button:hover, .feedback-options button:focus { color: var(--accent-color); }
212
- .error-message {
213
- background: rgba(255, 77, 77, 0.2); border: 1px solid var(--error-color); color: var(--text-color);
214
- padding: 1rem; border-radius: 0.5rem; margin-bottom: 1rem; animation: fadeIn 0.3s ease-in;
215
- display: flex; justify-content: space-between; align-items: center;
216
- }
217
- .error-message button {
218
- background: var(--error-color); color: var(--text-color); border: none; padding: 0.3rem 0.6rem;
219
- border-radius: 0.3rem; cursor: pointer; transition: background 0.2s ease;
220
- }
221
- .error-message button:hover, .error-message button:focus { background: var(--button-hover-color); }
222
- .back-to-latest {
223
- display: none; position: fixed; bottom: 100px; right: 2rem; background: var(--secondary-color);
224
- color: var(--text-color); padding: 0.5rem 1rem; border-radius: 0.5rem; cursor: pointer;
225
- border: 1px solid var(--accent-color); transition: all 0.3s ease; z-index: 1000;
226
- }
227
- .back-to-latest.visible { display: block; }
228
- .back-to-latest:hover, .back-to-latest:focus { background: var(--secondary-hover-color); transform: scale(1.05); }
229
- .processing-dots {
230
- display: none; position: absolute; right: 60px; color: var(--accent-color); font-size: 1.2rem;
231
- }
232
- .processing-dots.active { display: inline; animation: pulse 1.5s infinite; }
233
- @keyframes blink {
234
- 0% { opacity: 1; } 50% { opacity: 0.3; } 100% { opacity: 1; }
235
- }
236
- .processing-dots span {
237
- animation: blink 1s infinite; animation-delay: calc(0.2s * var(--i));
238
- }
239
- </style>
240
- </head>
241
- <body>
242
- <div class="stars"></div>
243
- <nav class="sidebar" aria-label="Main navigation">
244
- <a href="#" class="sidebar__item" tabindex="0" aria-label="Home"><i class='bx bx-home'></i><span>Home</span><div class="tooltip">Go to Home</div></a>
245
- <a href="#" class="sidebar__item" tabindex="0" aria-label="Profile"><i class='bx bx-user'></i><span>Profile</span><div class="tooltip">View Profile</div></a>
246
- <a href="#" class="sidebar__item" tabindex="0" aria-label="Settings"><i class='bx bx-cog'></i><span>Settings</span><div class="tooltip">Adjust Settings</div></a>
247
- <a href="#" class="sidebar__item" tabindex="0" aria-label="Help"><i class='bx bx-help-circle'></i><span>Help</span><div class="tooltip">Get Help</div></a>
248
- </nav>
249
- <div class="main-content">
250
- <header class="header">
251
- <div class="header__title">
252
- <h1>Vion IA</h1>
253
- <h2 id="welcome-text">Ask me anything and I'll provide helpful and truthful answers from an outside perspective on humanity.</h2>
254
- </div>
255
- </header>
256
- <div class="suggests">
257
- <div class="suggests__item" tabindex="0"><p class="suggests__item-text">What is the meaning of life?</p><div class="suggests__item-icon"><i class='bx bx-bulb'></i></div><div class="tooltip">Explore life's purpose</div></div>
258
- <div class="suggests__item" tabindex="0"><p class="suggests__item-text">Explain quantum physics simply</p><div class="suggests__item-icon"><i class='bx bx-atom'></i></div><div class="tooltip">Learn about quantum physics</div></div>
259
- <div class="suggests__item" tabindex="0"><p class="suggests__item-text">How does the universe work?</p><div class="suggests__item-icon"><i class='bx bx-planet'></i></div><div class="tooltip">Discover the universe</div></div>
260
- </div>
261
- <div class="chat-bar" id="chatBar" aria-live="polite"></div>
262
- <button class="back-to-latest" id="backToLatest" tabindex="0">Back to Latest</button>
263
- </div>
264
- <section class="prompt">
265
- <form action="#" class="prompt__form" novalidate>
266
- <div class="prompt__input-wrapper">
267
- <input type="text" placeholder="Ask me anything..." class="prompt__form-input" id="chatInput" required aria-label="Chat input">
268
- <div class="prompt__action-buttons basic">
269
- <button type="button" class="prompt__form-button send" id="sendButton" tabindex="0" aria-label="Send message"><i class='bx bx-send'></i><div class="tooltip">Send Message (Ctrl+S)</div></button>
270
- <button type="button" class="prompt__form-button" id="moreOptions" tabindex="0" aria-label="Show more options"><i class='bx bx-dots-horizontal-rounded'></i><div class="tooltip">More Options</div></button>
271
- </div>
272
- <div class="prompt__action-buttons advanced" id="advancedOptions">
273
- <label for="fileInput" class="prompt__form-button" tabindex="0" aria-label="Upload file"><i class='bx bx-upload'></i><div class="tooltip">Upload File</div></label>
274
- <input type="file" id="fileInput" style="display: none;" accept=".txt,.pdf,.jpg,.png">
275
- <button type="button" class="prompt__form-button" id="deepSearchButton" tabindex="0" aria-label="Deep search"><i class='bx bx-search'></i><div class="tooltip">Deep Search</div></button>
276
- <button type="button" class="prompt__form-button" id="thinkButton" tabindex="0" aria-label="Think mode"><i class='bx bx-brain'></i><div class="tooltip">Think Mode</div></button>
277
- <button type="button" class="prompt__form-button" id="bubbleToggle" tabindex="0" aria-label="Toggle bubble style"><i class='bx bx-chat'></i><div class="tooltip">Change Bubble Style</div></button>
278
- <button type="button" class="prompt__form-button" id="soundToggle" tabindex="0" aria-label="Toggle sound"><i class='bx bx-volume-full'></i><div class="tooltip">Toggle Sound</div></button>
279
- <button type="button" class="prompt__form-button" id="themeToggler" tabindex="0" aria-label="Toggle theme"><i class='bx bx-adjust'></i><div class="tooltip">Toggle Dark/Soft (Ctrl+T)</div></button>
280
- <span class="processing-dots" id="processingDots"><span style="--i:1">.</span><span style="--i:2">.</span><span style="--i:3">.</span></span>
281
- </div>
282
- </div>
283
- </form>
284
- <p class="prompt__disclaim">Vion IA provides answers based on its training and design. It may make mistakes.</p>
285
- </section>
286
- <audio id="sendSound" src="https://www.soundjay.com/buttons/beep-01a.mp3"></audio>
287
- <audio id="responseSound" src="https://www.soundjay.com/buttons/beep-02.mp3"></audio>
288
- <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
289
- <script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/highlight.min.js"></script>
290
- <script>
291
- // Initialize state
292
- let soundEnabled = localStorage.getItem('soundEnabled') !== 'false';
293
- let messageHistory = [];
294
- let currentBubbleIndex = 0;
295
- const bubbleStyles = ['bubble-rounded', 'bubble-sharp', 'bubble-starry'];
296
- const chatBar = document.getElementById('chatBar');
297
- const inputField = document.getElementById('chatInput');
298
- const suggests = document.querySelector('.suggests');
299
- const welcomeText = document.getElementById('welcome-text');
300
- const processingDots = document.getElementById('processingDots');
301
- const sendButton = document.getElementById('sendButton');
302
- const backToLatest = document.getElementById('backToLatest');
303
- const themeToggler = document.getElementById('themeToggler');
304
- const soundToggle = document.getElementById('soundToggle');
305
- const advancedOptions = document.getElementById('advancedOptions');
306
- const moreOptions = document.getElementById('moreOptions');
307
-
308
- // Personalized greeting
309
- function setGreeting() {
310
- const hour = new Date().getHours();
311
- const greeting = hour < 12 ? 'Good morning!' : hour < 18 ? 'Good afternoon!' : 'Good evening!';
312
- const returning = localStorage.getItem('visited');
313
- welcomeText.textContent = returning ? `Welcome back! ${greeting}` : `${greeting} Ask me anything!`;
314
- localStorage.setItem('visited', 'true');
315
- }
316
- setGreeting();
317
-
318
- // Theme management (dark or soft)
319
- function setTheme(theme) {
320
- document.body.classList.remove('soft_mode');
321
- if (theme === 'soft') document.body.classList.add('soft_mode');
322
- themeToggler.innerHTML = theme === 'soft' ? "<i class='bx bx-moon'></i>" : "<i class='bx bx-adjust'></i>";
323
- localStorage.setItem('theme', theme);
324
- }
325
- const savedTheme = localStorage.getItem('theme') || 'dark';
326
- setTheme(savedTheme);
327
-
328
- // Toggle theme with button or Ctrl+T
329
- themeToggler.addEventListener('click', () => {
330
- const newTheme = document.body.classList.contains('soft_mode') ? 'dark' : 'soft';
331
- setTheme(newTheme);
332
- });
333
- document.addEventListener('keydown', (e) => {
334
- if (e.ctrlKey && e.key === 't') {
335
- e.preventDefault();
336
- const newTheme = document.body.classList.contains('soft_mode') ? 'dark' : 'soft';
337
- setTheme(newTheme);
338
- }
339
- });
340
-
341
- // Sound toggle
342
- soundToggle.innerHTML = soundEnabled ? "<i class='bx bx-volume-full'></i>" : "<i class='bx bx-volume-mute'></i>";
343
- soundToggle.addEventListener('click', () => {
344
- soundEnabled = !soundEnabled;
345
- localStorage.setItem('soundEnabled', soundEnabled);
346
- soundToggle.innerHTML = soundEnabled ? "<i class='bx bx-volume-full'></i>" : "<i class='bx bx-volume-mute'></i>";
347
- });
348
-
349
- // Progressive disclosure
350
- moreOptions.addEventListener('click', () => {
351
- advancedOptions.classList.toggle('active');
352
- moreOptions.style.transform = advancedOptions.classList.contains('active') ? 'rotate(90deg)' : 'rotate(0)';
353
- });
354
-
355
- // Bubble customization
356
- document.getElementById('bubbleToggle').addEventListener('click', () => {
357
- currentBubbleIndex = (currentBubbleIndex + 1) % bubbleStyles.length;
358
- applyBubbleStyle();
359
- });
360
-
361
- function applyBubbleStyle() {
362
- const userMessages = document.querySelectorAll('.chat-message.user');
363
- userMessages.forEach(msg => {
364
- bubbleStyles.forEach(style => msg.classList.remove(style));
365
- msg.classList.add(bubbleStyles[currentBubbleIndex]);
366
- });
367
- }
368
-
369
- // Add message with feedback option
370
- function addMessage(content, isUser = false, isError = false) {
371
- if (isError) {
372
- const errorDiv = document.createElement('div');
373
- errorDiv.classList.add('error-message');
374
- errorDiv.innerHTML = `${content} <button onclick="retryLastMessage()" tabindex="0">Retry</button>`;
375
- chatBar.appendChild(errorDiv);
376
- } else {
377
- const messageDiv = document.createElement('div');
378
- messageDiv.classList.add('chat-message', isUser ? 'user' : 'bot');
379
- if (isUser) messageDiv.classList.add(bubbleStyles[currentBubbleIndex]);
380
- if (!isUser) messageDiv.classList.add('feedback');
381
- messageDiv.textContent = content;
382
- if (!isUser) {
383
- const feedbackDiv = document.createElement('div');
384
- feedbackDiv.classList.add('feedback-options');
385
- feedbackDiv.innerHTML = `
386
- <button onclick="handleFeedback('up')" tabindex="0" aria-label="Thumbs up"><i class='bx bx-thumbs-up'></i></button>
387
- <button onclick="handleFeedback('down')" tabindex="0" aria-label="Thumbs down"><i class='bx bx-thumbs-down'></i></button>
388
- `;
389
- messageDiv.appendChild(feedbackDiv);
390
  }
391
- chatBar.appendChild(messageDiv);
392
- messageHistory.push({ content, isUser });
393
  }
394
- if (chatBar.scrollTop + chatBar.clientHeight >= chatBar.scrollHeight - 100) {
395
- chatBar.scrollTop = chatBar.scrollHeight;
396
- }
397
- }
398
-
399
- // Handle feedback (placeholder)
400
- function handleFeedback(type) {
401
- console.log(`Feedback: ${type}`);
402
- alert(`Thanks for your feedback! (${type === 'up' ? 'Thumbs up' : 'Thumbs down'})`);
403
- }
404
-
405
- // Retry last message
406
- function retryLastMessage() {
407
- const lastUserMessage = messageHistory.filter(m => m.isUser).slice(-1)[0];
408
- if (lastUserMessage) sendMessage(lastUserMessage.content);
409
- }
410
-
411
- // Input events
412
- inputField.addEventListener('input', () => {
413
- const input = inputField.value.trim();
414
- suggests.classList.toggle('hidden', input.length > 0);
415
- welcomeText.classList.toggle('hidden', input.length > 0);
416
- inputField.setCustomValidity(input.length < 3 && input.length > 0 ? 'Please enter at least 3 characters.' : '');
417
- });
418
-
419
- inputField.addEventListener('focus', () => {
420
- inputField.parentElement.style.boxShadow = `0 0 5px var(--accent-color)`;
421
- });
422
-
423
- inputField.addEventListener('blur', () => {
424
- inputField.parentElement.style.boxShadow = 'none';
425
- });
426
-
427
- inputField.addEventListener('keydown', (e) => {
428
- if (e.key === 'Enter' && !e.shiftKey) {
429
- e.preventDefault();
430
- sendMessage();
431
- } else if (e.ctrlKey && e.key === 's') {
432
- e.preventDefault();
433
- sendMessage();
434
- } else if (e.key === 'Escape') {
435
- e.preventDefault();
436
- inputField.value = '';
437
- suggests.classList.remove('hidden');
438
- welcomeText.classList.remove('hidden');
439
- }
440
- });
441
-
442
- // Drag and drop
443
- const inputWrapper = document.querySelector('.prompt__input-wrapper');
444
- inputWrapper.addEventListener('dragover', (e) => {
445
- e.preventDefault();
446
- inputWrapper.classList.add('dragover');
447
- });
448
- inputWrapper.addEventListener('dragleave', () => {
449
- inputWrapper.classList.remove('dragover');
450
- });
451
- inputWrapper.addEventListener('drop', (e) => {
452
- e.preventDefault();
453
- inputWrapper.classList.remove('dragover');
454
- const file = e.dataTransfer.files[0];
455
- if (file) alert(`File dropped: ${file.name} (Processing to be implemented)`);
456
- });
457
-
458
- // Suggestion interactions
459
- document.querySelectorAll('.suggests__item').forEach(item => {
460
- item.addEventListener('click', () => {
461
- inputField.value = item.querySelector('.suggests__item-text').textContent;
462
- suggests.classList.add('hidden');
463
- welcomeText.classList.add('hidden');
464
- inputField.focus();
465
- });
466
- item.addEventListener('dblclick', () => {
467
- alert(`Pinned suggestion: ${item.querySelector('.suggests__item-text').textContent} (Placeholder)`);
468
- });
469
- item.addEventListener('keydown', (e) => {
470
- if (e.key === 'Enter' || e.key === ' ') {
471
- e.preventDefault();
472
- item.click();
473
  }
474
- });
475
- });
476
-
477
- // Scroll handling
478
- chatBar.addEventListener('scroll', () => {
479
- const isScrolledUp = chatBar.scrollTop < chatBar.scrollHeight - chatBar.clientHeight - 100;
480
- backToLatest.classList.toggle('visible', isScrolledUp);
481
- if (chatBar.scrollTop < 100 && messageHistory.length > 10) {
482
- loadMoreMessages();
483
- }
484
- });
485
-
486
- backToLatest.addEventListener('click', () => {
487
- chatBar.scrollTop = chatBar.scrollHeight;
488
- });
489
- backToLatest.addEventListener('keydown', (e) => {
490
- if (e.key === 'Enter' || e.key === ' ') {
491
- e.preventDefault();
492
- chatBar.scrollTop = chatBar.scrollHeight;
493
- }
494
- });
495
-
496
- // Placeholder for loading more messages
497
- function loadMoreMessages() {
498
- console.log('Loading more messages (placeholder)');
499
- }
500
-
501
- // Send message
502
- function sendMessage(input = inputField.value.trim()) {
503
- if (!input) {
504
- addMessage('Oops, please type something to ask!', false, true);
505
- return;
506
- }
507
- if (input.length < 3) {
508
- addMessage('Your query is too short—try adding more details!', false, true);
509
- return;
510
  }
511
 
512
- sendButton.disabled = true;
513
- processingDots.classList.add('active');
514
- addMessage(input, true);
515
- inputField.value = '';
516
- if (soundEnabled) document.getElementById('sendSound').play();
517
-
518
- fetch('/chat', {
519
- method: 'POST',
520
- headers: { 'Content-Type': 'application/json' },
521
- body: JSON.stringify({ message: input })
522
- })
523
- .then(res => res.json())
524
- .then(data => {
525
- if (data.response) {
526
- addMessage(data.response);
527
- suggests.classList.add('hidden');
528
- if (soundEnabled) document.getElementById('responseSound').play();
529
- } else {
530
- addMessage(data.detail || 'Something went wrong! Please try again.', false, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
  }
532
- })
533
- .catch(error => {
534
- addMessage('Failed to process the query! Please check your connection.', false, true);
535
- })
536
- .finally(() => {
537
- sendButton.disabled = false;
538
- processingDots.classList.remove('active');
539
- });
540
- }
541
-
542
- sendButton.addEventListener('click', () => sendMessage());
543
- sendButton.addEventListener('keydown', (e) => {
544
- if (e.key === 'Enter' || e.key === ' ') {
545
- e.preventDefault();
546
- sendMessage();
547
  }
548
- });
549
-
550
- // Placeholder button actions
551
- document.getElementById('deepSearchButton').addEventListener('click', () => {
552
- alert('Initiating DeepSearch... (Functionality to be implemented)');
553
- });
554
- document.getElementById('thinkButton').addEventListener('click', () => {
555
- alert('Activating Think mode... (Functionality to be implemented)');
556
- });
557
- document.getElementById('fileInput').addEventListener('change', (e) => {
558
- const file = e.target.files[0];
559
- if (file) alert(`File selected: ${file.name} (Processing to be implemented)`);
560
- });
561
-
562
- // Responsive adjustments
563
- window.addEventListener('resize', () => {
564
- const width = window.innerWidth;
565
- chatBar.style.maxHeight = width < 768 ? 'calc(100vh - 200px)' : 'calc(100vh - 180px)';
566
- });
567
-
568
- // Starry background effects
569
- document.addEventListener('mousemove', (e) => {
570
- let x = e.clientX / window.innerWidth;
571
- let y = e.clientY / window.innerHeight;
572
- document.querySelector('.stars').style.transform = `translate(${x * 50}px, ${y * 50}px)`;
573
- });
574
-
575
- // Enhanced keyboard navigation
576
- document.querySelectorAll('.sidebar__item, .suggests__item, .prompt__form-button, .back-to-latest, .feedback-options button').forEach(el => {
577
- el.addEventListener('keydown', (e) => {
578
- if (e.key === 'Enter' || e.key === ' ') {
579
- e.preventDefault();
580
- el.click();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581
  }
582
- });
583
- });
584
- </script>
585
- </body>
586
- </html>
587
- """
 
 
 
 
 
 
588
 
589
- @app.get("/", response_class=HTMLResponse)
590
- async def read_root():
591
- return HTML_CONTENT
 
 
 
 
592
 
593
- @app.post("/chat")
594
- async def chat_endpoint(data: dict):
595
- message = data.get("message", "")
596
- if not message:
597
- raise HTTPException(status_code=400, detail="No message provided")
598
  try:
599
- response = get_chatbot_response(message)
600
- return {"response": response}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
  except Exception as e:
602
- raise HTTPException(status_code=500, detail=f"Chat error: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603
 
604
  if __name__ == "__main__":
605
  import uvicorn
606
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
1
+ from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request
2
+ from fastapi.staticfiles import StaticFiles
3
+ from fastapi.responses import RedirectResponse, JSONResponse, HTMLResponse
4
+ from transformers import pipeline, ViltProcessor, ViltForQuestionAnswering, M2M100ForConditionalGeneration, M2M100Tokenizer
5
+ from typing import Optional, Dict, Any, List
6
+ import logging
7
+ import time
8
+ import os
9
+ import io
10
+ import json
11
+ import re
12
+ from PIL import Image
13
+ from docx import Document
14
+ import fitz # PyMuPDF
15
+ import pandas as pd
16
+ from functools import lru_cache
17
+ import torch
18
+ import numpy as np
19
+ from pydantic import BaseModel
20
+ import asyncio
21
+ import google.generativeai as genai
22
+ from spellchecker import SpellChecker
23
+ import nltk
24
+ from nltk.tokenize import sent_tokenize
25
+
26
+ # Configure logging
27
+ logging.basicConfig(
28
+ level=logging.INFO,
29
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
30
+ )
31
+ logger = logging.getLogger("cosmic_ai")
32
+
33
+ # Set a custom NLTK data directory
34
+ nltk_data_dir = os.getenv('NLTK_DATA_DIR', '/tmp/nltk_data')
35
+ os.makedirs(nltk_data_dir, exist_ok=True)
36
+ nltk.data.path.append(nltk_data_dir)
37
+
38
+ # Download punkt_tab data if not already present
39
+ try:
40
+ nltk.download('punkt_tab', download_dir=nltk_data_dir, quiet=True, raise_on_error=True)
41
+ logger.info(f"NLTK punkt_tab verified in {nltk_data_dir}")
42
+ except Exception as e:
43
+ logger.error(f"Error verifying NLTK punkt_tab: {str(e)}")
44
+ raise Exception(f"Failed to verify NLTK punkt_tab: {str(e)}")
45
+
46
+ # Create app directory if it doesn't exist
47
+ upload_dir = os.getenv('UPLOAD_DIR', '/tmp/uploads')
48
+ os.makedirs(upload_dir, exist_ok=True)
49
+
50
+ app = FastAPI(
51
+ title="Cosmic AI Assistant",
52
+ description="An advanced AI assistant with space-themed interface, translation, and file question-answering features",
53
+ version="2.0.0"
54
+ )
55
+
56
+ # Mount static files
57
+ app.mount("/static", StaticFiles(directory="static"), name="static")
58
+
59
+ # Mount images directory
60
+ app.mount("/images", StaticFiles(directory="images"), name="images")
61
+
62
+ # Gemini API Configuration
63
+ API_KEY = "AIzaSyDtLhhmXpy8ubSGb84ImaxM_ywlL0l_8bo" # Replace with your actual API key
64
+ genai.configure(api_key=API_KEY)
65
+
66
+ # Model configurations
67
+ MODELS = {
68
+ "summarization": "sshleifer/distilbart-cnn-12-6",
69
+ "image-to-text": "Salesforce/blip-image-captioning-large",
70
+ "visual-qa": "dandelin/vilt-b32-finetuned-vqa",
71
+ "chatbot": "gemini-1.5-pro",
72
+ "translation": "facebook/m2m100_418M",
73
+ "file-qa": "distilbert-base-cased-distilled-squad"
74
+ }
75
+
76
+ # Supported languages for translation
77
+ SUPPORTED_LANGUAGES = {
78
+ "english": "en",
79
+ "french": "fr",
80
+ "german": "de",
81
+ "spanish": "es",
82
+ "italian": "it",
83
+ "russian": "ru",
84
+ "chinese": "zh",
85
+ "japanese": "ja",
86
+ "arabic": "ar",
87
+ "hindi": "hi",
88
+ "portuguese": "pt",
89
+ "korean": "ko"
90
+ }
91
+
92
+ # Global variables for pre-loaded translation model
93
+ translation_model = None
94
+ translation_tokenizer = None
95
+
96
+ # Initialize spell checker
97
+ spell = SpellChecker()
98
+
99
+ # Cache for model loading (excluding translation)
100
+ @lru_cache(maxsize=8)
101
+ def load_model(task: str, model_name: str = None):
102
+ """Cached model loader with proper task names and error handling"""
103
+ try:
104
+ logger.info(f"Loading model for task: {task}, model: {model_name or MODELS.get(task)}")
105
+ start_time = time.time()
106
+
107
+ model_to_load = model_name or MODELS.get(task)
108
+
109
+ if task == "chatbot":
110
+ return genai.GenerativeModel(model_to_load)
111
+
112
+ if task == "visual-qa":
113
+ processor = ViltProcessor.from_pretrained(model_to_load)
114
+ model = ViltForQuestionAnswering.from_pretrained(model_to_load)
115
+ device = "cuda" if torch.cuda.is_available() else "cpu"
116
+ model.to(device)
117
+
118
+ def vqa_function(image, question, **generate_kwargs):
119
+ if image.mode != "RGB":
120
+ image = image.convert("RGB")
121
+ inputs = processor(image, question, return_tensors="pt").to(device)
122
+ logger.info(f"VQA inputs - question: {question}, image size: {image.size}")
123
+ with torch.no_grad():
124
+ outputs = model(**inputs)
125
+ logits = outputs.logits
126
+ idx = logits.argmax(-1).item()
127
+ answer = model.config.id2label[idx]
128
+ logger.info(f"VQA raw output: {answer}")
129
+ return answer
130
+
131
+ return vqa_function
132
+
133
+ # Use pipeline for summarization, image-to-text, and file-qa
134
+ return pipeline(task if task != "file-qa" else "question-answering", model=model_to_load)
135
+
136
+ except Exception as e:
137
+ logger.error(f"Model load failed: {str(e)}")
138
+ raise HTTPException(status_code=500, detail=f"Model loading failed: {task} - {str(e)}")
139
 
140
+ def get_gemini_response(user_input: str, is_generation: bool = False):
141
+ """Function to generate response with Gemini for both chat and text generation"""
142
+ if not user_input:
143
+ return "Please provide some input."
144
+ try:
145
+ chatbot = load_model("chatbot")
146
+ if is_generation:
147
+ prompt = f"Generate creative text based on this prompt: {user_input}"
148
+ else:
149
+ prompt = user_input
150
+ response = chatbot.generate_content(prompt)
151
+ return response.text.strip()
152
+ except Exception as e:
153
+ return f"Error: {str(e)}"
154
 
155
+ def translate_text(text: str, target_language: str):
156
+ """Translate text to any target language using pre-loaded M2M100 model"""
157
+ if not text:
158
+ return "Please provide text to translate."
159
+
160
+ try:
161
+ global translation_model, translation_tokenizer
162
+
163
+ target_lang = target_language.lower()
164
+ if target_lang not in SUPPORTED_LANGUAGES:
165
+ similar = [lang for lang in SUPPORTED_LANGUAGES if target_lang in lang or lang in target_lang]
166
+ if similar:
167
+ target_lang = similar[0]
168
+ else:
169
+ return f"Language '{target_language}' not supported. Available languages: {', '.join(SUPPORTED_LANGUAGES.keys())}"
170
+
171
+ lang_code = SUPPORTED_LANGUAGES[target_lang]
172
+
173
+ if translation_model is None or translation_tokenizer is None:
174
+ raise Exception("Translation model not initialized")
175
+
176
+ match = re.search(r'how to say\s+(.+?)\s+in\s+(\w+)', text.lower())
177
+ if match:
178
+ text_to_translate = match.group(1)
179
+ else:
180
+ content_match = re.search(r'(?:translate|convert).*to\s+[a-zA-Z]+\s*[:\s]*(.+)', text, re.IGNORECASE)
181
+ text_to_translate = content_match.group(1) if content_match else text
182
+
183
+ translation_tokenizer.src_lang = "en"
184
+ encoded = translation_tokenizer(text_to_translate, return_tensors="pt", padding=True, truncation=True).to(translation_model.device)
185
+
186
+ start_time = time.time()
187
+ generated_tokens = translation_model.generate(
188
+ **encoded,
189
+ forced_bos_token_id=translation_tokenizer.get_lang_id(lang_code),
190
+ max_length=512,
191
+ num_beams=1,
192
+ early_stopping=True
193
+ )
194
+ translated_text = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
195
+ logger.info(f"Translation took {time.time() - start_time:.2f} seconds")
196
+
197
+ return translated_text
198
+
199
+ except Exception as e:
200
+ logger.error(f"Translation error: {str(e)}", exc_info=True)
201
+ return f"Translation error: {str(e)}"
202
+
203
+ def detect_intent(text: str = None, file: UploadFile = None, intent: str = None) -> tuple[str, str]:
204
+ """Enhanced intent detection with explicit intent parameter support"""
205
+ target_language = "English" # Default
206
+ valid_intents = [
207
+ "chatbot", "translate", "file-translate", "summarize", "image-to-text",
208
+ "visual-qa", "visualize", "text-generation", "file-qa"
209
+ ]
210
+
211
+ # Check if an explicit intent is provided and valid
212
+ if intent and intent in valid_intents:
213
+ logger.info(f"Using explicit intent: {intent}")
214
+ # For translation intents, check if target language is specified in text
215
+ if intent in ["translate", "file-translate"] and text:
216
+ translate_patterns = [
217
+ r'translate.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
218
+ r'convert.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
219
+ r'how to say.*in\s+\[?([a-zA-Z]+)\]?:?\s*(.*)'
220
+ ]
221
+ for pattern in translate_patterns:
222
+ translate_match = re.search(pattern, text.lower())
223
+ if translate_match:
224
+ potential_lang = translate_match.group(1).lower()
225
+ if potential_lang in SUPPORTED_LANGUAGES:
226
+ target_language = potential_lang.capitalize()
227
+ break
228
+ return intent, target_language
229
+
230
+ # Existing intent detection logic for cases where intent is not provided
231
+ if file and text:
232
+ text_lower = text.lower()
233
+ filename = file.filename.lower() if file.filename else ""
234
+
235
+ # Check for file translation intent
236
+ translate_patterns = [
237
+ r'translate.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
238
+ r'convert.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
239
+ r'how to say.*in\s+\[?([a-zA-Z]+)\]?:?\s*(.*)'
240
+ ]
241
+ for pattern in translate_patterns:
242
+ translate_match = re.search(pattern, text_lower)
243
+ if translate_match and filename.endswith(('.pdf', '.docx', '.txt', '.rtf')):
244
+ potential_lang = translate_match.group(1).lower()
245
+ if potential_lang in SUPPORTED_LANGUAGES:
246
+ target_language = potential_lang.capitalize()
247
+ return "file-translate", target_language
248
+
249
+ # Image-related intents
250
+ content_type = file.content_type.lower() if file.content_type else ""
251
+ if content_type.startswith('image/') and text:
252
+ if "what’s this" in text_lower or "does this fly" in text_lower or ("fly" in text_lower and any(q in text_lower for q in ['does', 'can', 'will'])):
253
+ return "visual-qa", target_language
254
+ if any(q in text_lower for q in ['what is', 'what\'s', 'describe', 'tell me about', 'explain', 'how many', 'what color', 'is there', 'are they', 'does the']):
255
+ return "visual-qa", target_language
256
+ if "generate a caption" in text_lower or "caption" in text_lower:
257
+ return "image-to-text", target_language
258
+
259
+ # File-related intents
260
+ if filename.endswith(('.xlsx', '.xls', '.csv')):
261
+ return "visualize", target_language
262
+ elif filename.endswith(('.pdf', '.docx', '.doc', '.txt', '.rtf')):
263
+ if any(q in text_lower for q in ['what is', 'who is', 'where', 'when', 'why', 'how', 'what are', 'who are']):
264
+ return "file-qa", target_language
265
+ return "summarize", target_language
266
+
267
+ if not text:
268
+ # If only a file is provided, infer intent based on file type
269
+ if file:
270
+ filename = file.filename.lower() if file.filename else ""
271
+ content_type = file.content_type.lower() if file.content_type else ""
272
+ if content_type.startswith('image/'):
273
+ return "image-to-text", target_language # Default to image-to-text for images
274
+ elif filename.endswith(('.pdf', '.docx', '.doc', '.txt', '.rtf')):
275
+ return "summarize", target_language # Default to summarize for text files
276
+ elif filename.endswith(('.xlsx', '.xls', '.csv')):
277
+ return "visualize", target_language
278
+ return "chatbot", target_language
279
+
280
+ text_lower = text.lower()
281
+
282
+ if any(keyword in text_lower for keyword in ['chat', 'talk', 'converse', 'ask gemini']):
283
+ return "chatbot", target_language
284
+
285
+ # Text translation intent
286
+ translate_patterns = [
287
+ r'translate.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
288
+ r'convert.*to\s+\[?([a-zA-Z]+)\]?:?\s*(.*)',
289
+ r'how to say.*in\s+\[?([a-zA-Z]+)\]?:?\s*(.*)'
290
+ ]
291
+
292
+ for pattern in translate_patterns:
293
+ translate_match = re.search(pattern, text_lower)
294
+ if translate_match:
295
+ potential_lang = translate_match.group(1).lower()
296
+ if potential_lang in SUPPORTED_LANGUAGES:
297
+ target_language = potential_lang.capitalize()
298
+ return "translate", target_language
299
+ else:
300
+ logger.warning(f"Invalid language detected: {potential_lang}")
301
+ return "chatbot", target_language
302
+
303
+ vqa_patterns = [
304
+ r'how (many|much)',
305
+ r'what (color|size|position|shape)',
306
+ r'is (there|that|this) (a|an)',
307
+ r'are (they|there) (any|some)',
308
+ r'does (the|this) (image|picture) (show|contain)'
309
+ ]
310
+
311
+ if any(re.search(pattern, text_lower) for pattern in vqa_patterns):
312
+ return "visual-qa", target_language
313
+
314
+ summarization_patterns = [
315
+ r'\b(summar(y|ize|ise)|brief( overview)?)\b',
316
+ r'\b(long article|text|document)\b',
317
+ r'\bcan you (summar|brief|condense)\b',
318
+ r'\b(short summary|brief explanation)\b',
319
+ r'\b(overview|main points|key ideas)\b',
320
+ r'\b(tl;?dr|too long didn\'?t read)\b'
321
+ ]
322
+
323
+ if any(re.search(pattern, text_lower) for pattern in summarization_patterns):
324
+ return "summarize", target_language
325
+
326
+ generation_patterns = [
327
+ r'\b(write|generate|create|compose)\b',
328
+ r'\b(story|poem|essay|text|content)\b'
329
+ ]
330
+
331
+ if any(re.search(pattern, text_lower) for pattern in generation_patterns):
332
+ return "text-generation", target_language
333
+
334
+ if len(text) > 100:
335
+ return "summarize", target_language
336
+
337
+ return "chatbot", target_language
338
+
339
+ def preprocess_text(text: str) -> str:
340
+ """Correct spelling errors and improve text readability."""
341
+ words = text.split()
342
+ corrected_words = [spell.correction(word) if spell.correction(word) else word for word in words]
343
+ corrected_text = " ".join(corrected_words)
344
+ sentences = sent_tokenize(corrected_text)
345
+ return ". ".join(sentence.capitalize() for sentence in sentences) + (". " if sentences else "")
346
+
347
+ class ProcessResponse(BaseModel):
348
+ response: str
349
+ type: str
350
+ additional_data: Optional[Dict[str, Any]] = None
351
+
352
+ @app.get("/chatbot")
353
+ async def chatbot_interface():
354
+ """Redirect to the static index.html file for the chatbot interface"""
355
+ return RedirectResponse(url="/static/index.html")
356
 
357
+ @app.post("/chat")
358
+ async def chat_endpoint(data: dict):
359
+ """Endpoint for chatbot interactions"""
360
+ message = data.get("message", "")
361
+ if not message:
362
+ raise HTTPException(status_code=400, detail="No message provided")
363
+ try:
364
+ response = get_gemini_response(message)
365
+ return {"response": response}
366
+ except Exception as e:
367
+ raise HTTPException(status_code=500, detail=f"Chat error: {str(e)}")
368
+
369
+ @app.post("/process", response_model=ProcessResponse)
370
+ async def process_input(
371
+ request: Request,
372
+ text: str = Form(None),
373
+ file: UploadFile = File(None),
374
+ intent: str = Form(None)
375
+ ):
376
+ """Enhanced unified endpoint with dynamic translation and file translation"""
377
+ start_time = time.time()
378
+ client_ip = request.client.host
379
+ logger.info(f"Request from {client_ip}: text={text[:50] + '...' if text and len(text) > 50 else text}, file={file.filename if file else None}, intent={intent}")
380
 
381
+ detected_intent, target_language = detect_intent(text, file, intent)
382
+ logger.info(f"Detected intent: {detected_intent}, target_language: {target_language}")
383
+
384
+ try:
385
+ if detected_intent == "chatbot":
386
+ response = get_gemini_response(text)
387
+ return {"response": response, "type": "chat"}
388
+ elif detected_intent == "translate":
389
+ content = await extract_text_from_file(file) if file else text
390
+ if "all languages" in text.lower():
391
+ translations = {}
392
+ phrase_to_translate = "I want to explore the stars" if "I want to explore the stars" in text else content
393
+ for lang, code in SUPPORTED_LANGUAGES.items():
394
+ translation_tokenizer.src_lang = "en"
395
+ encoded = translation_tokenizer(phrase_to_translate, return_tensors="pt").to(translation_model.device)
396
+ generated_tokens = translation_model.generate(
397
+ **encoded,
398
+ forced_bos_token_id=translation_tokenizer.get_lang_id(code),
399
+ max_length=512,
400
+ num_beams=1
401
+ )
402
+ translations[lang] = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
403
+ response = "\n".join(f"{lang.capitalize()}: {translations[lang]}" for lang in translations)
404
+ logger.info(f"Translated to all supported languages: {', '.join(translations.keys())}")
405
+ return {"response": response, "type": "translation"}
406
+ else:
407
+ translated_text = translate_text(content, target_language)
408
+ return {"response": translated_text, "type": "translation"}
409
+
410
+ elif detected_intent == "file-translate":
411
+ if not file or not file.filename.lower().endswith(('.pdf', '.docx', '.txt', '.rtf')):
412
+ raise HTTPException(status_code=400, detail="A text-based file (PDF, DOCX, TXT, RTF) is required")
413
+ if not text:
414
+ raise HTTPException(status_code=400, detail="Please specify a target language for translation")
415
+
416
+ content = await extract_text_from_file(file)
417
+ if not content.strip():
418
+ raise HTTPException(status_code=400, detail="No text could be extracted from the file")
419
+
420
+ # Split content into chunks to handle large files
421
+ max_chunk_size = 512
422
+ chunks = [content[i:i+max_chunk_size] for i in range(0, len(content), max_chunk_size)]
423
+ translated_chunks = []
424
+
425
+ for chunk in chunks:
426
+ translated_chunk = translate_text(chunk, target_language)
427
+ translated_chunks.append(translated_chunk)
428
+
429
+ translated_text = " ".join(translated_chunks)
430
+ translated_text = translated_text.strip().capitalize()
431
+ if not translated_text.endswith(('.', '!', '?')):
432
+ translated_text += '.'
433
+
434
+ logger.info(f"File translated to {target_language}: {translated_text[:100]}...")
435
+
436
+ return {
437
+ "response": translated_text,
438
+ "type": "file_translation",
439
+ "additional_data": {
440
+ "file_name": file.filename,
441
+ "target_language": target_language
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  }
 
 
443
  }
444
+
445
+ elif detected_intent == "summarize":
446
+ content = await extract_text_from_file(file) if file else text
447
+ if not content.strip():
448
+ raise HTTPException(status_code=400, detail="No content to summarize")
449
+
450
+ content = preprocess_text(content)
451
+ logger.info(f"Preprocessed content: {content[:100]}...")
452
+
453
+ summarizer = load_model("summarization")
454
+
455
+ content_length = len(content.split())
456
+ max_len = max(50, min(200, content_length))
457
+ min_len = max(20, min(50, content_length // 3))
458
+
459
+ try:
460
+ if len(content) > 1024:
461
+ chunks = [content[i:i+1024] for i in range(0, len(content), 1024)]
462
+ summaries = []
463
+
464
+ for chunk in chunks[:3]:
465
+ summary = summarizer(
466
+ chunk,
467
+ max_length=max_len,
468
+ min_length=min_len,
469
+ do_sample=False,
470
+ truncation=True
471
+ )
472
+ summaries.append(summary[0]['summary_text'])
473
+
474
+ final_summary = " ".join(summaries)
475
+ else:
476
+ summary = summarizer(
477
+ content,
478
+ max_length=max_len,
479
+ min_length=min_len,
480
+ do_sample=False,
481
+ truncation=True
482
+ )
483
+ final_summary = summary[0]['summary_text']
484
+
485
+ final_summary = re.sub(r'\s+', ' ', final_summary).strip()
486
+ if not final_summary or final_summary.lower().startswith(content.lower()[:30]):
487
+ logger.warning("Summarizer produced inadequate output, falling back to Gemini")
488
+ final_summary = get_gemini_response(
489
+ f"Summarize this text in a concise and meaningful way: {content}"
490
+ )
491
+
492
+ if not final_summary.endswith(('.', '!', '?')):
493
+ final_summary += '.'
494
+
495
+ logger.info(f"Generated summary: {final_summary}")
496
+ return {"response": final_summary, "type": "summary", "message": "Text was preprocessed to correct spelling errors"}
497
+
498
+ except Exception as e:
499
+ logger.error(f"Summarization error: {str(e)}")
500
+ final_summary = get_gemini_response(
501
+ f"Summarize this text in a concise and meaningful way: {content}"
502
+ )
503
+ return {"response": final_summary, "type": "summary", "message": "Text was preprocessed to correct spelling errors"}
504
+
505
+ elif detected_intent == "image-to-text":
506
+ if not file or not file.content_type.startswith('image/'):
507
+ raise HTTPException(status_code=400, detail="An image file is required")
508
+
509
+ image = Image.open(io.BytesIO(await file.read()))
510
+ captioner = load_model("image-to-text")
511
+
512
+ caption = captioner(image, max_new_tokens=50)
513
+
514
+ return {
515
+ "response": caption[0]['generated_text'],
516
+ "type": "caption",
517
+ "additional_data": {
518
+ "image_size": f"{image.width}x{image.height}"
 
 
 
 
519
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
  }
521
 
522
+ elif detected_intent == "visual-qa":
523
+ if not file or not file.content_type.startswith('image/'):
524
+ raise HTTPException(status_code=400, detail="An image file is required")
525
+ if not text:
526
+ raise HTTPException(status_code=400, detail="A question is required for VQA")
527
+
528
+ image = Image.open(io.BytesIO(await file.read())).convert("RGB")
529
+ vqa_pipeline = load_model("visual-qa")
530
+
531
+ question = text.strip()
532
+ if not question.endswith('?'):
533
+ question += '?'
534
+
535
+ answer = vqa_pipeline(
536
+ image=image,
537
+ question=question
538
+ )
539
+
540
+ answer = answer.strip()
541
+ if not answer or answer.lower() == question.lower():
542
+ logger.warning(f"VQA failed to generate a meaningful answer: {answer}")
543
+ answer = "I couldn't determine the answer from the image."
544
+ else:
545
+ answer = answer.capitalize()
546
+ if not answer.endswith(('.', '!', '?')):
547
+ answer += '.'
548
+
549
+ # Check if the question asks for a specific, factual detail like color
550
+ factual_questions = ['color', 'size', 'number', 'how many', 'what is the']
551
+ is_factual = any(keyword in question.lower() for keyword in factual_questions)
552
+
553
+ if is_factual:
554
+ # Return the raw VQA answer for factual questions
555
+ final_answer = answer
556
+ else:
557
+ # Apply cosmic tone for non-factual, open-ended questions
558
+ chatbot = load_model("chatbot")
559
+ if "fly" in question.lower():
560
+ final_answer = chatbot.generate_content(f"Make this fun and spacey: {answer}").text.strip()
561
+ else:
562
+ final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {answer}").text.strip()
563
+
564
+ logger.info(f"Final VQA answer: {final_answer}")
565
+
566
+ return {
567
+ "response": final_answer,
568
+ "type": "visual_qa",
569
+ "additional_data": {
570
+ "question": text,
571
+ "image_size": f"{image.width}x{image.height}"
572
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573
  }
574
+
575
+ elif detected_intent == "visualize":
576
+ if not file:
577
+ raise HTTPException(status_code=400, detail="An Excel file is required")
578
+
579
+ file_content = await file.read()
580
+
581
+ if file.filename.endswith('.csv'):
582
+ df = pd.read_csv(io.BytesIO(file_content))
583
+ else:
584
+ df = pd.read_excel(io.BytesIO(file_content))
585
+
586
+ code = generate_visualization_code(df, text)
587
+ stats = df.describe().to_string()
588
+ response = f"Stats:\n{stats}\n\nChart Code:\n{code}"
589
+
590
+ return {"response": response, "type": "visualization_code"}
591
+
592
+ elif detected_intent == "text-generation":
593
+ response = get_gemini_response(text, is_generation=True)
594
+ lines = response.split(". ")
595
+ formatted_poem = "\n".join(line.strip() + ("." if not line.endswith(".") else "") for line in lines if line)
596
+ return {"response": formatted_poem, "type": "generated_text"}
597
+
598
+ elif detected_intent == "file-qa":
599
+ if not file or not file.filename.lower().endswith(('.pdf', '.docx', '.doc', '.txt', '.rtf')):
600
+ raise HTTPException(status_code=400, detail="A text-based file (PDF, DOCX, TXT, RTF) is required")
601
+ if not text:
602
+ raise HTTPException(status_code=400, detail="A question about the file is required")
603
+
604
+ content = await extract_text_from_file(file)
605
+ if not content.strip():
606
+ raise HTTPException(status_code=400, detail="No text could be extracted from the file")
607
+
608
+ qa_pipeline = load_model("file-qa")
609
+
610
+ question = text.strip()
611
+ if not question.endswith('?'):
612
+ question += '?'
613
+
614
+ if len(content) > 512:
615
+ chunks = [content[i:i+512] for i in range(0, len(content), 512)]
616
+ answers = []
617
+ for chunk in chunks[:3]:
618
+ result = qa_pipeline(question=question, context=chunk)
619
+ if result['score'] > 0.1:
620
+ answers.append((result['answer'], result['score']))
621
+ if answers:
622
+ best_answer = max(answers, key=lambda x: x[1])[0]
623
+ else:
624
+ best_answer = "I couldn't find a clear answer in the document."
625
+ else:
626
+ result = qa_pipeline(question=question, context=content)
627
+ best_answer = result['answer'] if result['score'] > 0.1 else "I couldn't find a clear answer in the document."
628
+
629
+ best_answer = best_answer.strip().capitalize()
630
+ if not best_answer.endswith(('.', '!', '?')):
631
+ best_answer += '.'
632
+
633
+ try:
634
+ chatbot = load_model("chatbot")
635
+ final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {best_answer}").text.strip()
636
+ except Exception as e:
637
+ logger.warning(f"Failed to add cosmic tone: {str(e)}. Using raw answer.")
638
+ final_answer = best_answer
639
+
640
+ logger.info(f"File QA answer: {final_answer}")
641
+
642
+ return {
643
+ "response": final_answer,
644
+ "type": "file_qa",
645
+ "additional_data": {
646
+ "question": text,
647
+ "file_name": file.filename
648
  }
649
+ }
650
+
651
+ else:
652
+ response = get_gemini_response(text or "Hello! How can I assist you?")
653
+ return {"response": response, "type": "chat"}
654
+
655
+ except Exception as e:
656
+ logger.error(f"Processing error: {str(e)}", exc_info=True)
657
+ raise HTTPException(status_code=500, detail=str(e))
658
+ finally:
659
+ process_time = time.time() - start_time
660
+ logger.info(f"Request processed in {process_time:.2f} seconds")
661
 
662
+ async def extract_text_from_file(file: UploadFile) -> str:
663
+ """Enhanced text extraction with multiple fallbacks"""
664
+ if not file:
665
+ return ""
666
+
667
+ content = await file.read()
668
+ filename = file.filename.lower()
669
 
 
 
 
 
 
670
  try:
671
+ if filename.endswith('.pdf'):
672
+ try:
673
+ doc = fitz.open(stream=content, filetype="pdf")
674
+ if doc.is_encrypted:
675
+ return "PDF is encrypted and cannot be read"
676
+ text = ""
677
+ for page in doc:
678
+ text += page.get_text()
679
+ return text
680
+ except Exception as pdf_error:
681
+ logger.warning(f"PyMuPDF failed: {str(pdf_error)}. Trying pdfminer.six...")
682
+ from pdfminer.high_level import extract_text
683
+ from io import BytesIO
684
+ return extract_text(BytesIO(content))
685
+
686
+ elif filename.endswith(('.docx', '.doc')):
687
+ doc = Document(io.BytesIO(content))
688
+ return "\n".join(para.text for para in doc.paragraphs)
689
+
690
+ elif filename.endswith('.txt'):
691
+ return content.decode('utf-8', errors='replace')
692
+
693
+ elif filename.endswith('.rtf'):
694
+ text = content.decode('utf-8', errors='replace')
695
+ text = re.sub(r'\\[a-z]+', ' ', text)
696
+ text = re.sub(r'\{|\}|\\', '', text)
697
+ return text
698
+
699
+ else:
700
+ raise HTTPException(status_code=400, detail=f"Unsupported file format: {filename}")
701
+
702
  except Exception as e:
703
+ logger.error(f"File extraction error: {str(e)}", exc_info=True)
704
+ raise HTTPException(
705
+ status_code=500,
706
+ detail=f"Error extracting text: {str(e)}. Supported formats: PDF, DOCX, TXT, RTF"
707
+ )
708
+
709
+ def generate_visualization_code(df: pd.DataFrame, request: str = None) -> str:
710
+ """Generate visualization code based on data analysis"""
711
+ num_rows, num_cols = df.shape
712
+ numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
713
+ categorical_cols = df.select_dtypes(include=['object']).columns.tolist()
714
+ date_cols = [col for col in df.columns if df[col].dtype == 'datetime64[ns]' or
715
+ (isinstance(df[col].dtype, np.dtype) and pd.to_datetime(df[col], errors='coerce').notna().all())]
716
+
717
+ if request:
718
+ request_lower = request.lower()
719
+ else:
720
+ request_lower = ""
721
+
722
+ if len(numeric_cols) >= 2 and ("scatter" in request_lower or "correlation" in request_lower):
723
+ x_col = numeric_cols[0]
724
+ y_col = numeric_cols[1]
725
+ return f"""import pandas as pd
726
+ import matplotlib.pyplot as plt
727
+ import seaborn as sns
728
+ df = pd.read_excel('data.xlsx')
729
+ plt.figure(figsize=(10, 6))
730
+ sns.regplot(x='{x_col}', y='{y_col}', data=df, scatter_kws={{'alpha': 0.6}})
731
+ plt.title('Correlation between {x_col} and {y_col}')
732
+ plt.grid(True, alpha=0.3)
733
+ plt.tight_layout()
734
+ plt.savefig('correlation_plot.png')
735
+ plt.show()
736
+ correlation = df['{x_col}'].corr(df['{y_col}'])
737
+ print(f"Correlation coefficient: {{correlation:.4f}}")"""
738
+
739
+ elif len(numeric_cols) >= 1 and len(categorical_cols) >= 1 and ("bar" in request_lower or "comparison" in request_lower):
740
+ cat_col = categorical_cols[0]
741
+ num_col = numeric_cols[0]
742
+ return f"""import pandas as pd
743
+ import matplotlib.pyplot as plt
744
+ import seaborn as sns
745
+ df = pd.read_excel('data.xlsx')
746
+ plt.figure(figsize=(12, 7))
747
+ ax = sns.barplot(x='{cat_col}', y='{num_col}', data=df, palette='viridis')
748
+ for p in ax.patches:
749
+ ax.annotate(f'{{p.get_height():.1f}}',
750
+ (p.get_x() + p.get_width() / 2., p.get_height()),
751
+ ha='center', va='bottom', fontsize=10, color='black', xytext=(0, 5),
752
+ textcoords='offset points')
753
+ plt.title('Comparison of {num_col} by {cat_col}', fontsize=15)
754
+ plt.xlabel('{cat_col}', fontsize=12)
755
+ plt.ylabel('{num_col}', fontsize=12)
756
+ plt.xticks(rotation=45, ha='right')
757
+ plt.grid(axis='y', alpha=0.3)
758
+ plt.tight_layout()
759
+ plt.savefig('comparison_chart.png')
760
+ plt.show()"""
761
+
762
+ elif len(numeric_cols) >= 1 and ("distribution" in request_lower or "histogram" in request_lower):
763
+ num_col = numeric_cols[0]
764
+ return f"""import pandas as pd
765
+ import matplotlib.pyplot as plt
766
+ import seaborn as sns
767
+ df = pd.read_excel('data.xlsx')
768
+ plt.figure(figsize=(10, 6))
769
+ sns.histplot(df['{num_col}'], kde=True, bins=20, color='purple')
770
+ plt.title('Distribution of {num_col}', fontsize=15)
771
+ plt.xlabel('{num_col}', fontsize=12)
772
+ plt.ylabel('Frequency', fontsize=12)
773
+ plt.grid(True, alpha=0.3)
774
+ plt.tight_layout()
775
+ plt.savefig('distribution_plot.png')
776
+ plt.show()
777
+ print(df['{num_col}'].describe())"""
778
+
779
+ else:
780
+ return f"""import pandas as pd
781
+ import matplotlib.pyplot as plt
782
+ import seaborn as sns
783
+ import numpy as np
784
+ df = pd.read_excel('data.xlsx')
785
+ print("Descriptive statistics:")
786
+ print(df.describe())
787
+ fig, axes = plt.subplots(2, 2, figsize=(15, 12))
788
+ numeric_df = df.select_dtypes(include=[np.number])
789
+ if not numeric_df.empty and numeric_df.shape[1] > 1:
790
+ sns.heatmap(numeric_df.corr(), annot=True, cmap='coolwarm', fmt='.2f', ax=axes[0, 0])
791
+ axes[0, 0].set_title('Correlation Matrix')
792
+ if not numeric_df.empty:
793
+ for i, col in enumerate(numeric_df.columns[:1]):
794
+ sns.histplot(df[col], kde=True, ax=axes[0, 1], color='purple')
795
+ axes[0, 1].set_title(f'Distribution of {col}')
796
+ axes[0, 1].set_xlabel(col)
797
+ axes[0, 1].set_ylabel('Frequency')
798
+ categorical_cols = df.select_dtypes(include=['object']).columns
799
+ if len(categorical_cols) > 0 and not numeric_df.empty:
800
+ cat_col = categorical_cols[0]
801
+ num_col = numeric_df.columns[0]
802
+ sns.barplot(x=cat_col, y=num_col, data=df, ax=axes[1, 0], palette='viridis')
803
+ axes[1, 0].set_title(f'{num_col} by {cat_col}')
804
+ axes[1, 0].set_xticklabels(axes[1, 0].get_xticklabels(), rotation=45, ha='right')
805
+ if not numeric_df.empty and len(categorical_cols) > 0:
806
+ cat_col = categorical_cols[0]
807
+ num_col = numeric_df.columns[0]
808
+ sns.boxplot(x=cat_col, y=num_col, data=df, ax=axes[1, 1], palette='Set3')
809
+ axes[1, 1].set_title(f'Distribution of {num_col} by {cat_col}')
810
+ axes[1, 1].set_xticklabels(axes[1, 1].get_xticklabels(), rotation=45, ha='right')
811
+ plt.tight_layout()
812
+ plt.savefig('dashboard.png')
813
+ plt.show()"""
814
+
815
+ @app.get("/", include_in_schema=False)
816
+ async def home():
817
+ """Redirect to the static index.html file"""
818
+ return RedirectResponse(url="/static/index.html")
819
+
820
+ @app.get("/health", include_in_schema=True)
821
+ async def health_check():
822
+ """Health check endpoint"""
823
+ return {"status": "healthy", "version": "2.0.0"}
824
+
825
+ @app.get("/models", include_in_schema=True)
826
+ async def list_models():
827
+ """List available models"""
828
+ return {"models": MODELS}
829
+
830
+ @app.on_event("startup")
831
+ async def startup_event():
832
+ """Pre-load models at startup with timeout"""
833
+ global translation_model, translation_tokenizer
834
+ logger.info("Starting model pre-loading...")
835
+
836
+ async def load_model_with_timeout(task):
837
+ try:
838
+ await asyncio.wait_for(asyncio.to_thread(load_model, task), timeout=60.0)
839
+ logger.info(f"Successfully loaded {task} model")
840
+ except asyncio.TimeoutError:
841
+ logger.warning(f"Timeout loading {task} model - will load on demand")
842
+ except Exception as e:
843
+ logger.error(f"Error pre-loading {task}: {str(e)}")
844
+
845
+ try:
846
+ model_name = MODELS["translation"]
847
+ translation_model = M2M100ForConditionalGeneration.from_pretrained(model_name)
848
+ translation_tokenizer = M2M100Tokenizer.from_pretrained(model_name)
849
+ device = "cuda" if torch.cuda.is_available() else "cpu"
850
+ translation_model.to(device)
851
+ logger.info("Translation model pre-loaded successfully")
852
+ except Exception as e:
853
+ logger.error(f"Error pre-loading translation model: {str(e)}")
854
+
855
+ await asyncio.gather(
856
+ load_model_with_timeout("summarization"),
857
+ load_model_with_timeout("image-to-text"),
858
+ load_model_with_timeout("visual-qa"),
859
+ load_model_with_timeout("chatbot"),
860
+ load_model_with_timeout("file-qa")
861
+ )
862
 
863
  if __name__ == "__main__":
864
  import uvicorn
865
+ uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=True)
866
+