English
NeuralCode / NeuralCode7.py
AGofficial's picture
Upload NeuralCode7.py
839a654 verified
import math
import random
import time
# CUSTOMIZATION
CONTEXT_WINDOW = 5
EPOCHS = 500
LR = 0.01
def relu(x):
return max(0.0, x)
def stable_softmax(x_list):
if not x_list:
return []
m = max(x_list)
exps = [math.exp(i - m) for i in x_list]
s = sum(exps)
if s == 0:
return [1.0 / len(x_list)] * len(x_list)
return [e / s for e in exps]
class NeuralNetwork:
def __init__(self, layer_sizes=None, activation='relu', output_activation='softmax',
init_range=0.1, grad_clip=1.0, seed=None, context_window=5):
if seed is not None:
random.seed(seed)
self.layer_sizes = layer_sizes[:] if layer_sizes is not None else None
self.activation = relu if activation == 'relu' else (lambda x: x)
self.output_activation = stable_softmax if output_activation == 'softmax' else (lambda x: x)
self.init_range = float(init_range)
self.grad_clip = grad_clip
self.context_window = context_window
self.weights = []
self.biases = []
self.vocab = []
self.word_to_idx = {}
self.idx_to_word = {}
def prepare_data_with_context(self, text):
words = [w.strip() for w in text.replace('\n', ' ').split(' ') if w.strip()]
self.vocab = sorted(list(set(words)))
self.word_to_idx = {w: i for i, w in enumerate(self.vocab)}
self.idx_to_word = {i: w for w, i in self.word_to_idx.items()}
vocab_size = len(self.vocab)
X = []
Y = []
for i in range(len(words) - self.context_window):
context_words = words[i : i + self.context_window]
target_word = words[i + self.context_window]
x = [0.0] * vocab_size
for word in context_words:
if word in self.word_to_idx:
x[self.word_to_idx[word]] = 1.0
y = [0.0] * vocab_size
if target_word in self.word_to_idx:
y[self.word_to_idx[target_word]] = 1.0
X.append(x)
Y.append(y)
return X, Y
def initialize_weights(self):
if self.layer_sizes is None:
raise ValueError("layer_sizes must be set before initializing weights.")
if self.weights:
return
for i in range(len(self.layer_sizes) - 1):
in_dim = self.layer_sizes[i]
out_dim = self.layer_sizes[i + 1]
W = [[random.uniform(-self.init_range, self.init_range) for _ in range(out_dim)] for _ in range(in_dim)]
b = [0.0 for _ in range(out_dim)]
self.weights.append(W)
self.biases.append(b)
def forward(self, x):
a = x[:]
for i in range(len(self.weights) - 1):
next_a = []
W = self.weights[i]
b = self.biases[i]
out_dim = len(W[0])
for j in range(out_dim):
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j]
next_a.append(self.activation(s))
a = next_a
W = self.weights[-1]
b = self.biases[-1]
out = []
out_dim = len(W[0])
for j in range(out_dim):
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j]
out.append(s)
return self.output_activation(out)
def train(self, training_data, lr=0.01, epochs=500, verbose_every=50):
X, Y = self.prepare_data_with_context(training_data)
if not X:
raise ValueError("Not enough tokens in training data to create context windows.")
vocab_size = len(self.vocab)
if self.layer_sizes is None:
self.layer_sizes = [vocab_size, 64, vocab_size]
else:
self.layer_sizes[0] = vocab_size
self.layer_sizes[-1] = vocab_size
self.initialize_weights()
for epoch in range(epochs):
total_loss = 0.0
indices = list(range(len(X)))
random.shuffle(indices)
for idx in indices:
x = X[idx]
y = Y[idx]
activations = [x[:]]
pre_acts = []
a = x[:]
for i in range(len(self.weights) - 1):
W, b = self.weights[i], self.biases[i]
z = []
out_dim = len(W[0])
for j in range(out_dim):
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j]
z.append(s)
pre_acts.append(z)
a = [self.activation(val) for val in z]
activations.append(a)
W, b = self.weights[-1], self.biases[-1]
z_final = []
out_dim = len(W[0])
for j in range(out_dim):
s = sum(a[k] * W[k][j] for k in range(len(a))) + b[j]
z_final.append(s)
pre_acts.append(z_final)
out = self.output_activation(z_final)
delta = [out[j] - y[j] for j in range(len(y))]
for i in reversed(range(len(self.weights))):
in_act = activations[i]
in_dim = len(in_act)
out_dim = len(delta)
db = delta[:]
if self.grad_clip is not None:
db = [max(-self.grad_clip, min(self.grad_clip, g)) for g in db]
for j in range(len(self.biases[i])):
self.biases[i][j] -= lr * db[j]
for k in range(in_dim):
for j in range(out_dim):
grad_w = in_act[k] * delta[j]
if self.grad_clip is not None:
grad_w = max(-self.grad_clip, min(self.grad_clip, grad_w))
self.weights[i][k][j] -= lr * grad_w
if i != 0:
prev_delta = [0.0] * in_dim
for p in range(in_dim):
s = sum(self.weights[i][p][j] * delta[j] for j in range(out_dim))
if pre_acts[i-1][p] > 0:
prev_delta[p] = s
delta = prev_delta
if epoch % verbose_every == 0 or epoch == epochs - 1:
loss = 0.0
for x_val, y_val in zip(X, Y):
p = self.forward(x_val)
for j in range(len(y_val)):
if y_val[j] > 0:
loss -= math.log(p[j] + 1e-12)
print(f"Epoch {epoch}, Loss: {loss / len(X):.6f}")
def export_to_python(self, filename):
lines = []
lines.append("import math\n")
lines.append("import time\n\n")
lines.append("def relu(x):\n return max(0.0, x)\n\n")
lines.append("def softmax(x_list):\n")
lines.append(" if not x_list:\n")
lines.append(" return []\n")
lines.append(" m = max(x_list)\n")
lines.append(" exps = [math.exp(i - m) for i in x_list]\n")
lines.append(" s = sum(exps)\n")
lines.append(" if s == 0:\n")
lines.append(" return [1.0 / len(x_list)] * len(x_list)\n")
lines.append(" return [e / s for e in exps]\n\n")
neuron_id = 0
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)):
in_dim, out_dim = len(W), len(W[0])
for j in range(out_dim):
terms = " + ".join([f"{W[i][j]:.8f}*inputs[{i}]" for i in range(in_dim)]) or "0.0"
b_term = f"{b[j]:.8f}"
if layer_idx != len(self.weights) - 1:
lines.append(f"def neuron_{neuron_id}(inputs):\n return relu({terms} + {b_term})\n\n")
else:
lines.append(f"def neuron_{neuron_id}(inputs):\n return {terms} + {b_term}\n\n")
neuron_id += 1
neuron_counter = 0
for layer_idx, (W, b) in enumerate(zip(self.weights, self.biases)):
out_dim = len(W[0])
lines.append(f"def layer_{layer_idx}(inputs):\n")
inner = ", ".join([f"neuron_{neuron_counter + j}(inputs)" for j in range(out_dim)])
lines.append(f" return [{inner}]\n\n")
neuron_counter += out_dim
lines.append("def predict(inputs):\n")
lines.append(" a = inputs\n")
for i in range(len(self.weights)):
lines.append(f" a = layer_{i}(a)\n")
lines.append(" return softmax(a)\n\n")
lines.append(f"vocab = {self.vocab}\n")
lines.append(f"word_to_idx = {{w: i for i, w in enumerate(vocab)}}\n")
lines.append(f"context_window = {self.context_window}\n\n")
lines.append("if __name__ == '__main__':\n")
lines.append(" print('Interactive multi-word text completion.')\n")
lines.append(" print(f'Model context window: {context_window} words. Type text or empty to exit.')\n")
lines.append(" while True:\n")
lines.append(" inp = input('> ').strip()\n")
lines.append(" if not inp:\n")
lines.append(" break\n")
lines.append(" words = [w.strip() for w in inp.split(' ') if w.strip()]\n")
lines.append(" generated_words = words[:]\n")
lines.append(" print('Input:', ' '.join(generated_words), end='', flush=True)\n")
lines.append(" for _ in range(20):\n")
lines.append(" context = generated_words[-context_window:]\n")
lines.append(" x = [0.0] * len(vocab)\n")
lines.append(" for word in context:\n")
lines.append(" if word in word_to_idx:\n")
lines.append(" x[word_to_idx[word]] = 1.0\n")
lines.append(" out = predict(x)\n")
lines.append(" idx = out.index(max(out))\n")
lines.append(" next_word = vocab[idx]\n")
lines.append(" if next_word == '<|endoftext|>': break\n")
lines.append(" generated_words.append(next_word)\n")
lines.append(" print(' ' + next_word, end='', flush=True)\n")
lines.append(" time.sleep(0.1)\n")
lines.append(" print('\\n')\n")
with open(filename, "w") as f:
f.writelines(lines)
print(f"Exported network to {filename}")
@staticmethod
def load_network(filename):
ns = {"__name__": "__loaded_model__"}
with open(filename, "r") as f:
code = f.read()
exec(code, ns)
class ModelWrapper:
def __init__(self, ns):
self.ns = ns
self.vocab = ns.get("vocab", [])
self.word_to_idx = ns.get("word_to_idx", {})
self.context_window = ns.get("context_window", 5)
def complete(self, input_text, max_new_words=20):
words = [w.strip() for w in input_text.strip().split(' ') if w.strip()]
generated = words[:]
for _ in range(max_new_words):
context = generated[-self.context_window:]
x = [0.0] * len(self.vocab)
for word in context:
if word in self.word_to_idx:
x[self.word_to_idx[word]] = 1.0
out = self.ns["predict"](x)
idx = out.index(max(out))
next_word = self.vocab[idx]
if next_word == '<|endoftext|>':
break
generated.append(next_word)
return ' '.join(generated)
return ModelWrapper(ns)
if __name__ == "__main__":
sample_text = """
user: hi
ai: Hello! How can I help you today?
<|endoftext|>
user: hi
ai: Hi! What can I do for you today?
<|endoftext|>
user: hello
ai: Hello! How can I help you today?
<|endoftext|>
user: hey
ai: Hi! What can I do for you today?
<|endoftext|>
user: How's your day going?
ai: It's been great! Thanks for asking! How about yours?
<|endoftext|>
user: What's new with you?
ai: Not much, just here and ready to help! What's new with you?
<|endoftext|>
user: What can you do?
ai: I can help you with a variety of tasks. What's on your mind?
<|endoftext|>
user: Tell me a joke.
ai: Why did the scarecrow win an award? Because he was outstanding in his field!
<|endoftext|>
"""
nn = NeuralNetwork(context_window=CONTEXT_WINDOW, seed=42)
nn.train(training_data=sample_text, lr=LR, epochs=EPOCHS, verbose_every=100)
nn.export_to_python("exported_model.py")
model = NeuralNetwork.load_network("exported_model.py")
print("\n--- Testing loaded model ---")
print(f"Vocabulary size: {len(model.vocab)}")
test_inputs = ["user: hi", "user: What's new", "ai: It's been"]
for test_input in test_inputs:
completion = model.complete(test_input, max_new_words=10)
print(f"Input: '{test_input}'\nOutput: '{completion}'\n")