localm / attempts /chat-gemin.html
mihailik's picture
Moving to a proper app.
811916b
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>On-Device AI Chat</title>
<style>
:root {
--background-color: #f0f2f5;
--container-bg-color: #ffffff;
--text-color: #050505;
--secondary-text-color: #65676b;
--border-color: #ced0d4;
--accent-color: #007bff;
--accent-text-color: #ffffff;
--user-msg-bg: #007bff;
--ai-msg-bg: #e4e6eb;
}
html,
body {
margin: 0;
padding: 0;
height: 100%;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
background-color: var(--background-color);
color: var(--text-color);
}
#app-container {
display: flex;
flex-direction: column;
height: 100vh;
/* Full viewport height */
max-width: 800px;
margin: 0 auto;
background-color: var(--container-bg-color);
box-shadow: 0 0 15px rgba(0, 0, 0, 0.1);
}
header {
padding: 10px 15px;
border-bottom: 1px solid var(--border-color);
background-color: #f7f7f7;
display: flex;
flex-wrap: wrap;
align-items: center;
gap: 15px;
}
header h1 {
font-size: 1.2em;
margin: 0;
flex-grow: 1;
}
#model-selector {
padding: 8px 12px;
border-radius: 8px;
border: 1px solid var(--border-color);
font-size: 0.9em;
background-color: white;
cursor: pointer;
}
#chat-history {
flex-grow: 1;
overflow-y: auto;
padding: 15px;
display: flex;
flex-direction: column;
}
.message {
max-width: 85%;
padding: 10px 14px;
border-radius: 18px;
margin-bottom: 10px;
line-height: 1.4;
word-wrap: break-word;
}
.user-message {
background-color: var(--user-msg-bg);
color: var(--accent-text-color);
align-self: flex-end;
border-bottom-right-radius: 4px;
}
.ai-message {
background-color: var(--ai-msg-bg);
color: var(--text-color);
align-self: flex-start;
border-bottom-left-radius: 4px;
}
#status {
padding: 5px 15px;
font-size: 0.85em;
color: var(--secondary-text-color);
text-align: center;
min-height: 22px;
transition: all 0.3s ease;
}
#input-area {
display: flex;
padding: 10px;
border-top: 1px solid var(--border-color);
gap: 10px;
}
#user-input {
flex-grow: 1;
border: 1px solid var(--border-color);
border-radius: 20px;
padding: 10px 15px;
font-size: 1em;
outline: none;
}
#user-input:focus {
border-color: var(--accent-color);
box-shadow: 0 0 0 2px rgba(0, 123, 255, 0.25);
}
#send-button {
padding: 10px 20px;
border: none;
background-color: var(--accent-color);
color: var(--accent-text-color);
border-radius: 20px;
font-size: 1em;
cursor: pointer;
transition: background-color 0.2s ease;
}
#send-button:hover:not(:disabled) {
background-color: #0056b3;
}
#send-button:disabled {
background-color: #a0a0a0;
cursor: not-allowed;
}
</style>
</head>
<body>
<div id="app-container">
<header>
<h1>Local Chat AI 🤖</h1>
<select id="model-selector" title="Select a model"></select>
</header>
<main id="chat-history"></main>
<div id="status">Select a model and start chatting!</div>
<footer id="input-area">
<input type="text" id="user-input" placeholder="Type your message..." autocomplete="off">
<button id="send-button">Send</button>
</footer>
</div>
<script type="module">
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.1';
// Configuration
// Prevent Transformers.js from looking for local models
env.allowLocalModels = false;
// --- DOM Elements ---
const statusDiv = document.getElementById('status');
const modelSelector = document.getElementById('model-selector');
const chatHistoryDiv = document.getElementById('chat-history');
const userInput = document.getElementById('user-input');
const sendButton = document.getElementById('send-button');
// --- State Management ---
let generator = null; // This will hold the loaded model pipeline
let isBusy = false;
let currentModel = '';
let chatHistory = []; // Array to store conversation [{role: 'user'|'assistant', content: '...'}, ...]
// --- Models ---
const MODELS = [
"Xenova/phi-3-mini-4k-instruct", // Good default, fast and capable
"Xenova/distilgpt2",
"Xenova/gemma-2b-it",
"Xenova/t5-small",
"Xenova/Mistral-7B-Instruct-v0.2",
"Xenova/llama-3-8b-instruct", // Note: Large model, may be slow/unstable on some devices
];
// --- Core Functions ---
/**
* Populates the model selector dropdown with the available models.
*/
function initializeModelSelector() {
MODELS.forEach(modelName => {
const option = document.createElement('option');
option.value = modelName;
option.textContent = modelName.split('/')[1]; // Display only the model name
modelSelector.appendChild(option);
});
currentModel = modelSelector.value;
}
/**
* Updates the UI to reflect the application's busy state.
* @param {boolean} busy - Whether the application is busy.
* @param {string} message - The message to display in the status div.
*/
function setBusyState(busy, message = '') {
isBusy = busy;
userInput.disabled = busy;
sendButton.disabled = busy;
modelSelector.disabled = busy;
statusDiv.textContent = message;
if (busy) {
sendButton.textContent = '...';
} else {
sendButton.textContent = 'Send';
userInput.focus();
}
}
/**
* Appends a message to the chat history div and scrolls to the bottom.
* @param {string} sender - 'user' or 'ai'.
* @param {string} text - The message content.
*/
function appendMessage(sender, text) {
const messageDiv = document.createElement('div');
messageDiv.classList.add('message', sender === 'user' ? 'user-message' : 'ai-message');
messageDiv.textContent = text;
chatHistoryDiv.appendChild(messageDiv);
chatHistoryDiv.scrollTop = chatHistoryDiv.scrollHeight; // Auto-scroll to the latest message
}
/**
* Main function to handle sending a message and generating a response.
*/
async function sendMessage() {
const userText = userInput.value.trim();
if (!userText || isBusy) return;
setBusyState(true, 'Preparing...');
const selectedModel = modelSelector.value;
userInput.value = '';
// 1. Add user message to UI and history
appendMessage('user', userText);
chatHistory.push({ role: 'user', content: userText });
try {
// 2. Load model if it has changed or hasn't been loaded yet
if (!generator || currentModel !== selectedModel) {
currentModel = selectedModel;
// Dispose of the old generator if it exists
if (generator) {
await generator.dispose();
}
setBusyState(true, `Loading ${currentModel.split('/')[1]}...`);
generator = await pipeline('text-generation', currentModel, {
progress_callback: (data) => {
if (data.status === 'progress') {
const loaded = (data.loaded / 1024 / 1024).toFixed(2);
const total = (data.total / 1024 / 1024).toFixed(2);
setBusyState(true, `Downloading: ${data.file} (${loaded} / ${total} MB)`);
} else {
setBusyState(true, `Status: ${data.status.replace(/_/g, ' ')}`);
}
}
});
}
// 3. Generate AI response
setBusyState(true, 'Generating response...');
// Format the conversation history for the model.
// `apply_chat_template` is the recommended way to format prompts for chat models.
const prompt = generator.tokenizer.apply_chat_template(chatHistory, {
tokenize: false,
add_generation_prompt: true,
});
const result = await generator(prompt, {
max_new_tokens: 512,
temperature: 0.7,
top_k: 50,
do_sample: true,
});
// 4. Process and display AI response
// The output contains the full conversation history; we extract only the newly generated part.
const rawResponse = result[0].generated_text;
const aiResponse = rawResponse.substring(prompt.length).trim();
appendMessage('ai', aiResponse);
chatHistory.push({ role: 'assistant', content: aiResponse });
} catch (error) {
console.error('An error occurred:', error);
appendMessage('ai', `Sorry, an error occurred: ${error.message}`);
} finally {
// 5. Reset UI state
setBusyState(false, 'Select a model and start chatting!');
}
}
// --- Event Listeners ---
sendButton.addEventListener('click', sendMessage);
userInput.addEventListener('keydown', (event) => {
if (event.key === 'Enter') {
event.preventDefault(); // Prevent form submission
sendMessage();
}
});
// Initialize the app
initializeModelSelector();
</script>
</body>
</html>