|
import math |
|
import random |
|
import time |
|
import os |
|
|
|
|
|
|
|
CONTEXT_WINDOW = 4 |
|
EPOCHS = 100 |
|
LR = 0.01 |
|
|
|
def relu(x): |
|
return max(0.0, x) |
|
|
|
def stable_softmax(x_list): |
|
if not x_list: |
|
return [] |
|
m = max(x_list) |
|
exps = [math.exp(i - m) for i in x_list] |
|
s = sum(exps) |
|
if s == 0: |
|
return [1.0 / len(x_list)] * len(x_list) |
|
return [e / s for e in exps] |
|
|
|
class NeuralNetwork: |
|
def __init__(self, layer_sizes=None, activation='relu', output_activation='softmax', |
|
init_range=0.1, grad_clip=1.0, seed=None, context_window=5): |
|
if seed is not None: |
|
random.seed(seed) |
|
self.layer_sizes = layer_sizes[:] if layer_sizes is not None else None |
|
self.activation = relu if activation == 'relu' else (lambda x: x) |
|
self.output_activation = stable_softmax if output_activation == 'softmax' else (lambda x: x) |
|
self.init_range = float(init_range) |
|
self.grad_clip = grad_clip |
|
self.context_window = context_window |
|
self.weights = [] |
|
self.biases = [] |
|
self.vocab = [] |
|
self.word_to_idx = {} |
|
self.idx_to_word = {} |
|
|
|
def prepare_data_with_context(self, text): |
|
words = [w.strip() for w in text.replace('\n', ' ').split(' ') if w.strip()] |
|
self.vocab = sorted(list(set(words))) |
|
self.word_to_idx = {w: i for i, w in enumerate(self.vocab)} |
|
self.idx_to_word = {i: w for w, i in self.word_to_idx.items()} |
|
|
|
vocab_size = len(self.vocab) |
|
X = [] |
|
Y = [] |
|
|
|
for i in range(len(words) - self.context_window): |
|
context_words = words[i : i + self.context_window] |
|
target_word = words[i + self.context_window] |
|
|
|
x = [0.0] * vocab_size |
|
for word in context_words: |
|
if word in self.word_to_idx: |
|
x[self.word_to_idx[word]] = 1.0 |
|
|
|
y = [0.0] * vocab_size |
|
if target_word in self.word_to_idx: |
|
y[self.word_to_idx[target_word]] = 1.0 |
|
|
|
X.append(x) |
|
Y.append(y) |
|
|
|
return X, Y |
|
|
|
def initialize_weights(self): |
|
if self.layer_sizes is None: |
|
raise ValueError("layer_sizes must be set before initializing weights.") |
|
if self.weights: |
|
return |
|
for i in range(len(self.layer_sizes) - 1): |
|
in_dim = self.layer_sizes[i] |
|
out_dim = self.layer_sizes[i + 1] |
|
W = [[random.uniform(-self.init_range, self.init_range) for _ in range(out_dim)] for _ in range(in_dim)] |
|
b = [0.0 for _ in range(out_dim)] |
|
self.weights.append(W) |
|
self.biases.append(b) |
|
|
|
def forward(self, x): |
|
a = x[:] |
|
for i in range(len(self.weights) - 1): |
|
next_a = [] |
|
W = self.weights[i] |
|
b = self.biases[i] |
|
out_dim = len(W[0]) |
|
for j in range(out_dim): |
|
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j] |
|
next_a.append(self.activation(s)) |
|
a = next_a |
|
|
|
W = self.weights[-1] |
|
b = self.biases[-1] |
|
out = [] |
|
out_dim = len(W[0]) |
|
for j in range(out_dim): |
|
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j] |
|
out.append(s) |
|
return self.output_activation(out) |
|
|
|
def train(self, training_data, lr=0.01, epochs=500, verbose_every=50): |
|
X, Y = self.prepare_data_with_context(training_data) |
|
if not X: |
|
raise ValueError("Not enough tokens in training data to create context windows.") |
|
|
|
vocab_size = len(self.vocab) |
|
if self.layer_sizes is None: |
|
self.layer_sizes = [vocab_size, 64, vocab_size] |
|
else: |
|
self.layer_sizes[0] = vocab_size |
|
self.layer_sizes[-1] = vocab_size |
|
|
|
self.initialize_weights() |
|
|
|
for epoch in range(epochs): |
|
total_loss = 0.0 |
|
indices = list(range(len(X))) |
|
random.shuffle(indices) |
|
|
|
for idx in indices: |
|
x = X[idx] |
|
y = Y[idx] |
|
|
|
activations = [x[:]] |
|
pre_acts = [] |
|
a = x[:] |
|
|
|
for i in range(len(self.weights) - 1): |
|
W, b = self.weights[i], self.biases[i] |
|
z = [] |
|
out_dim = len(W[0]) |
|
for j in range(out_dim): |
|
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j] |
|
z.append(s) |
|
pre_acts.append(z) |
|
a = [self.activation(val) for val in z] |
|
activations.append(a) |
|
|
|
W, b = self.weights[-1], self.biases[-1] |
|
z_final = [] |
|
out_dim = len(W[0]) |
|
for j in range(out_dim): |
|
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j] |
|
z_final.append(s) |
|
pre_acts.append(z_final) |
|
out = self.output_activation(z_final) |
|
|
|
delta = [out[j] - y[j] for j in range(len(y))] |
|
|
|
for i in reversed(range(len(self.weights))): |
|
in_act = activations[i] |
|
in_dim = len(in_act) |
|
out_dim = len(delta) |
|
|
|
db = delta[:] |
|
if self.grad_clip is not None: |
|
db = [max(-self.grad_clip, min(self.grad_clip, g)) for g in db] |
|
for j in range(len(self.biases[i])): |
|
self.biases[i][j] -= lr * db[j] |
|
|
|
for k in range(in_dim): |
|
for j in range(out_dim): |
|
grad_w = in_act[k] * delta[j] |
|
if self.grad_clip is not None: |
|
grad_w = max(-self.grad_clip, min(self.grad_clip, grad_w)) |
|
self.weights[i][k][j] -= lr * grad_w |
|
|
|
if i != 0: |
|
prev_delta = [0.0] * in_dim |
|
for p in range(in_dim): |
|
s = sum(self.weights[i][p][j] * delta[j] for j in range(out_dim)) |
|
if pre_acts[i-1][p] > 0: |
|
prev_delta[p] = s |
|
delta = prev_delta |
|
|
|
if epoch % verbose_every == 0 or epoch == epochs - 1: |
|
loss = 0.0 |
|
for x_val, y_val in zip(X, Y): |
|
p = self.forward(x_val) |
|
for j in range(len(y_val)): |
|
if y_val[j] > 0: |
|
loss -= math.log(p[j] + 1e-12) |
|
print(f"Epoch {epoch}, Loss: {loss / len(X):.6f}") |
|
|
|
def export_to_python(self, filename): |
|
lines = [] |
|
lines.append("import math\n") |
|
lines.append("import time\n\n") |
|
lines.append("def relu(x):\n return max(0.0, x)\n\n") |
|
lines.append("def softmax(x_list):\n") |
|
lines.append(" if not x_list:\n") |
|
lines.append(" return []\n") |
|
lines.append(" m = max(x_list)\n") |
|
lines.append(" exps = [math.exp(i - m) for i in x_list]\n") |
|
lines.append(" s = sum(exps)\n") |
|
lines.append(" if s == 0:\n") |
|
lines.append(" return [1.0 / len(x_list)] * len(x_list)\n") |
|
lines.append(" return [e / s for e in exps]\n\n") |
|
|
|
neuron_id = 0 |
|
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)): |
|
in_dim, out_dim = len(W), len(W[0]) |
|
for j in range(out_dim): |
|
terms = " + ".join([f"{W[i][j]:.8f}*inputs[{i}]" for i in range(in_dim)]) or "0.0" |
|
b_term = f"{b[j]:.8f}" |
|
if layer_idx != len(self.weights) - 1: |
|
lines.append(f"def neuron_{neuron_id}(inputs):\n return relu({terms} + {b_term})\n\n") |
|
else: |
|
lines.append(f"def neuron_{neuron_id}(inputs):\n return {terms} + {b_term}\n\n") |
|
neuron_id += 1 |
|
|
|
neuron_counter = 0 |
|
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)): |
|
out_dim = len(W[0]) |
|
lines.append(f"def layer_{layer_idx}(inputs):\n") |
|
inner = ", ".join([f"neuron_{neuron_counter + j}(inputs)" for j in range(out_dim)]) |
|
lines.append(f" return [{inner}]\n\n") |
|
neuron_counter += out_dim |
|
|
|
lines.append("def predict(inputs):\n") |
|
lines.append(" a = inputs\n") |
|
for i in range(len(self.weights)): |
|
lines.append(f" a = layer_{i}(a)\n") |
|
lines.append(" return softmax(a)\n\n") |
|
|
|
lines.append(f"vocab = {self.vocab}\n") |
|
lines.append(f"word_to_idx = {{w: i for i, w in enumerate(vocab)}}\n") |
|
lines.append(f"context_window = {self.context_window}\n\n") |
|
|
|
lines.append("if __name__ == '__main__':\n") |
|
lines.append(" print('Interactive multi-word text completion.')\n") |
|
lines.append(" print(f'Model context window: {context_window} words. Type text or empty to exit.')\n") |
|
lines.append(" while True:\n") |
|
lines.append(" inp = input('> ').strip()\n") |
|
lines.append(" if not inp:\n") |
|
lines.append(" break\n") |
|
lines.append(" words = [w.strip() for w in inp.split(' ') if w.strip()]\n") |
|
lines.append(" generated_words = words[:]\n") |
|
lines.append(" print('Input:', ' '.join(generated_words), end='', flush=True)\n") |
|
lines.append(" for _ in range(20):\n") |
|
lines.append(" context = generated_words[-context_window:]\n") |
|
lines.append(" x = [0.0] * len(vocab)\n") |
|
lines.append(" for word in context:\n") |
|
lines.append(" if word in word_to_idx:\n") |
|
lines.append(" x[word_to_idx[word]] = 1.0\n") |
|
lines.append(" out = predict(x)\n") |
|
lines.append(" idx = out.index(max(out))\n") |
|
lines.append(" next_word = vocab[idx]\n") |
|
lines.append(" if next_word == '<|endoftext|>': break\n") |
|
lines.append(" generated_words.append(next_word)\n") |
|
lines.append(" print(' ' + next_word, end='', flush=True)\n") |
|
lines.append(" time.sleep(0.1)\n") |
|
lines.append(" print('\\n')\n") |
|
|
|
with open(filename, "w") as f: |
|
f.writelines(lines) |
|
print(f"Exported network to {filename}") |
|
|
|
def export_to_js(self, base_filename): |
|
js_filename = base_filename + ".js" |
|
html_filename = base_filename + ".html" |
|
|
|
|
|
js_lines = [] |
|
js_lines.append("'use strict';\n\n") |
|
js_lines.append("function relu(x) {\n return Math.max(0.0, x);\n}\n\n") |
|
js_lines.append("function softmax(x_list) {\n") |
|
js_lines.append(" if (!x_list || x_list.length === 0) return [];\n") |
|
js_lines.append(" const m = Math.max(...x_list);\n") |
|
js_lines.append(" const exps = x_list.map(x => Math.exp(x - m));\n") |
|
js_lines.append(" const s = exps.reduce((a, b) => a + b, 0);\n") |
|
js_lines.append(" if (s === 0) return Array(x_list.length).fill(1.0 / x_list.length);\n") |
|
js_lines.append(" return exps.map(e => e / s);\n}\n\n") |
|
|
|
neuron_id = 0 |
|
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)): |
|
in_dim, out_dim = len(W), len(W[0]) |
|
for j in range(out_dim): |
|
terms = " + ".join([f"{W[i][j]:.8f} * inputs[{i}]" for i in range(in_dim)]) or "0.0" |
|
b_term = f"{b[j]:.8f}" |
|
js_lines.append(f"function neuron_{neuron_id}(inputs) {{\n") |
|
if layer_idx != len(self.weights) - 1: |
|
js_lines.append(f" return relu({terms} + {b_term});\n}}\n\n") |
|
else: |
|
js_lines.append(f" return {terms} + {b_term};\n}}\n\n") |
|
neuron_id += 1 |
|
|
|
neuron_counter = 0 |
|
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)): |
|
out_dim = len(W[0]) |
|
js_lines.append(f"function layer_{layer_idx}(inputs) {{\n") |
|
inner = ", ".join([f"neuron_{neuron_counter + j}(inputs)" for j in range(out_dim)]) |
|
js_lines.append(f" return [{inner}];\n}}\n\n") |
|
neuron_counter += out_dim |
|
|
|
js_lines.append("function predict(inputs) {\n") |
|
js_lines.append(" let a = inputs;\n") |
|
for i in range(len(self.weights)): |
|
js_lines.append(f" a = layer_{i}(a);\n") |
|
js_lines.append(" return softmax(a);\n}\n\n") |
|
|
|
js_lines.append(f"const vocab = {self.vocab};\n") |
|
js_lines.append("const word_to_idx = {};\n") |
|
js_lines.append("vocab.forEach((w, i) => { word_to_idx[w] = i; });\n") |
|
js_lines.append(f"const context_window = {self.context_window};\n\n") |
|
|
|
|
|
js_lines.append(""" |
|
function completeText(inputText) { |
|
const words = inputText.trim().split(/\\s+/).filter(w => w.length > 0); |
|
let generatedWords = [...words]; |
|
|
|
for (let i = 0; i < 20; i++) { |
|
const context = generatedWords.slice(-context_window); |
|
const x = Array(vocab.length).fill(0.0); |
|
|
|
context.forEach(word => { |
|
if (word in word_to_idx) { |
|
x[word_to_idx[word]] = 1.0; |
|
} |
|
}); |
|
|
|
const out = predict(x); |
|
const maxProb = Math.max(...out); |
|
const idx = out.indexOf(maxProb); |
|
const nextWord = vocab[idx]; |
|
|
|
if (nextWord === '<|endoftext|>') { |
|
break; |
|
} |
|
generatedWords.push(nextWord); |
|
} |
|
return generatedWords.join(' '); |
|
} |
|
|
|
document.addEventListener('DOMContentLoaded', () => { |
|
const generateButton = document.getElementById('generateButton'); |
|
const userInput = document.getElementById('userInput'); |
|
const outputText = document.getElementById('outputText'); |
|
|
|
generateButton.addEventListener('click', () => { |
|
const text = userInput.value; |
|
if (text) { |
|
const result = completeText(text); |
|
outputText.textContent = result; |
|
} |
|
}); |
|
}); |
|
""") |
|
with open(js_filename, "w") as f: |
|
f.writelines(js_lines) |
|
|
|
|
|
html_lines = [ |
|
'<!DOCTYPE html>\n', |
|
'<html lang="en">\n', |
|
'<head>\n', |
|
' <meta charset="UTF-8">\n', |
|
' <meta name="viewport" content="width=device-width, initial-scale=1.0">\n', |
|
' <title>Neural Network Text Completion</title>\n', |
|
f' <script src="{os.path.basename(js_filename)}"></script>\n', |
|
' <style>\n', |
|
' body { font-family: sans-serif; margin: 2em; background: #f0f0f0; }\n', |
|
' .container { max-width: 600px; margin: auto; background: white; padding: 2em; border-radius: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }\n', |
|
' h1 { text-align: center; color: #333; }\n', |
|
' textarea { width: 95%; height: 80px; padding: 10px; margin-bottom: 1em; border-radius: 4px; border: 1px solid #ccc; font-size: 1em; }\n', |
|
' button { display: block; width: 100%; padding: 10px; background: #007bff; color: white; border: none; border-radius: 4px; font-size: 1.1em; cursor: pointer; }\n', |
|
' button:hover { background: #0056b3; }\n', |
|
' #outputText { margin-top: 1.5em; padding: 1em; background: #e9ecef; border-radius: 4px; min-height: 40px; white-space: pre-wrap; word-wrap: break-word; }\n', |
|
' </style>\n', |
|
'</head>\n', |
|
'<body>\n', |
|
' <div class="container">\n', |
|
' <h1>Text Completion Model</h1>\n', |
|
' <textarea id="userInput" placeholder="Enter starting text... (e.g., user: hi)"></textarea>\n', |
|
' <button id="generateButton">Generate</button>\n', |
|
' <h3>Output:</h3>\n', |
|
' <div id="outputText"></div>\n', |
|
' </div>\n', |
|
'</body>\n', |
|
'</html>\n' |
|
] |
|
with open(html_filename, "w") as f: |
|
f.writelines(html_lines) |
|
|
|
print(f"Exported browser-runnable model to {js_filename} and {html_filename}") |
|
|
|
@staticmethod |
|
def load_network(filename): |
|
ns = {"__name__": "__loaded_model__"} |
|
with open(filename, "r") as f: |
|
code = f.read() |
|
exec(code, ns) |
|
class ModelWrapper: |
|
def __init__(self, ns): |
|
self.ns = ns |
|
self.vocab = ns.get("vocab", []) |
|
self.word_to_idx = ns.get("word_to_idx", {}) |
|
self.context_window = ns.get("context_window", 5) |
|
|
|
def complete(self, input_text, max_new_words=20): |
|
words = [w.strip() for w in input_text.strip().split(' ') if w.strip()] |
|
generated = words[:] |
|
for _ in range(max_new_words): |
|
context = generated[-self.context_window:] |
|
x = [0.0] * len(self.vocab) |
|
for word in context: |
|
if word in self.word_to_idx: |
|
x[self.word_to_idx[word]] = 1.0 |
|
|
|
out = self.ns["predict"](x) |
|
idx = out.index(max(out)) |
|
next_word = self.vocab[idx] |
|
|
|
if next_word == '<|endoftext|>': |
|
break |
|
generated.append(next_word) |
|
return ' '.join(generated) |
|
|
|
return ModelWrapper(ns) |
|
|
|
|
|
if __name__ == "__main__": |
|
sample_text = """ |
|
user: hi |
|
ai: Hello! How can I help you today? |
|
<|endoftext|> |
|
user: hi |
|
ai: Hi! What can I do for you today? |
|
<|endoftext|> |
|
user: hello |
|
ai: Hello! How can I help you today? |
|
<|endoftext|> |
|
user: hey |
|
ai: Hi! What can I do for you today? |
|
<|endoftext|> |
|
user: How's your day going? |
|
ai: It's been great! Thanks for asking! How about yours? |
|
<|endoftext|> |
|
user: What's new with you? |
|
ai: Not much, just here and ready to help! What's new with you? |
|
<|endoftext|> |
|
user: What can you do? |
|
ai: I can help you with a variety of tasks. What's on your mind? |
|
<|endoftext|> |
|
user: Tell me a joke. |
|
ai: Why did the scarecrow win an award? Because he was outstanding in his field! |
|
<|endoftext|> |
|
""" |
|
nn = NeuralNetwork(context_window=CONTEXT_WINDOW, seed=42) |
|
nn.train(training_data=sample_text, lr=LR, epochs=EPOCHS, verbose_every=50) |
|
|
|
|
|
nn.export_to_python("exported_model.py") |
|
nn.export_to_js("web_model") |
|
|
|
model = NeuralNetwork.load_network("exported_model.py") |
|
print("\n--- Testing loaded Python model ---") |
|
print(f"Vocabulary size: {len(model.vocab)}") |
|
|
|
test_inputs = ["user: hi", "user: What's new", "ai: It's been"] |
|
for test_input in test_inputs: |
|
completion = model.complete(test_input, max_new_words=10) |
|
print(f"Input: '{test_input}'\nOutput: '{completion}'\n") |
|
|
|
print("\nTo test the JavaScript model, open 'web_model.html' in your browser.") |