{ "cells": [ { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [], "source": [ "words = open('names.txt', 'r').read().splitlines()" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [], "source": [ "import torch\n", "\n", "N = torch.zeros((27, 27), dtype = torch.int32)\n", "\n", "chars = sorted(list(set(''.join(words))))\n", "\n", "stoi = {s:i+1 for i,s in enumerate(chars)}\n", "stoi['.'] = 0\n", "\n", "itos = {i:s for s,i in stoi.items()}" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [], "source": [ "P = N.float()\n", "P /= P.sum(1, keepdim=True)" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ ". e\n", "e m\n", "m m\n", "m a\n", "a .\n" ] } ], "source": [ "#Creating the training set of bigrams (x,y)\n", "xs, ys = [], []\n", "\n", "for word in words[:1]:\n", " chs = ['.'] + list(word) + ['.']\n", " for ch1, ch2 in zip(chs, chs[1:]):\n", " ix1 = stoi[ch1]\n", " ix2 = stoi[ch2]\n", " print(ch1, ch2)\n", " xs.append(ix1)\n", " ys.append(ix2)\n", "\n", "xs = torch.tensor(xs)\n", "ys = torch.tensor(ys)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([ 0, 5, 13, 13, 1])" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xs" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([ 5, 13, 13, 1, 0])" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ys" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([[1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", " 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", " [0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", " 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,\n", " 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.,\n", " 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", " [0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", " 0., 0., 0., 0., 0., 0., 0., 0., 0.]])" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#Feeding these examples into a neural network\n", "import torch.nn.functional as F\n", "xenc = F.one_hot(xs, num_classes=27).float() #IMP: manual type casting\n", "xenc" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "torch.Size([5, 27])" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xenc.shape" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhYAAACHCAYAAABK4hAcAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAN2klEQVR4nO3df2hV9ePH8dfd2q4/urs6137cNufUUmpukrolkgkbTgvJ9A8r/1hDjOoqzlHJAl1CsDAIqSQjKP/xV0ImyQdDlpsE8wcTMaH21SFfr8xtKR/vdOZcu+/PH3263+9Nnd7tvXt2r88HHLj33Df3vHjzlr0899x7XMYYIwAAAAuSnA4AAAASB8UCAABYQ7EAAADWUCwAAIA1FAsAAGANxQIAAFhDsQAAANY8EsuDhUIhtbe3y+PxyOVyxfLQAABgkIwxun79unw+n5KSBj4nEdNi0d7erry8vFgeEgAAWBIIBJSbmzvgmJgWC4/HI0n631OTlPbo0D6FefnJGTYiAQCA+/hTffpZ/wr/HR9ITIvF3x9/pD2apDTP0IrFI64UG5EAAMD9/PfmHw9yGQMXbwIAAGsoFgAAwBqKBQAAsGZQxWLbtm2aNGmSRo0apdLSUp04ccJ2LgAAEIeiLhZ79+5VTU2N6urqdOrUKRUXF6uiokJdXV3DkQ8AAMSRqIvFJ598otWrV6uqqkpPPfWUtm/frjFjxujrr78ejnwAACCORFUsbt++rZaWFpWXl//fGyQlqby8XM3NzXeM7+3tVXd3d8QGAAASV1TF4sqVK+rv71dWVlbE/qysLHV0dNwxvr6+Xl6vN7zxq5sAACS2Yf1WSG1trYLBYHgLBALDeTgAAOCwqH55MyMjQ8nJyers7IzY39nZqezs7DvGu91uud3uoSUEAABxI6ozFqmpqZo1a5YaGhrC+0KhkBoaGjR37lzr4QAAQHyJ+l4hNTU1qqys1OzZs1VSUqKtW7eqp6dHVVVVw5EPAADEkaiLxYoVK/T7779r06ZN6ujo0MyZM3Xo0KE7LugEAAAPH5cxxsTqYN3d3fJ6vfr3/0we8t1NK3wz7YQCAAAD+tP0qVEHFAwGlZaWNuBY7hUCAACsifqjEBtefnKGHnGlOHHoh86P7aetvA9niAAAD4IzFgAAwBqKBQAAsIZiAQAArKFYAAAAaygWAADAGooFAACwhmIBAACsoVgAAABrKBYAAMAaigUAALCGYgEAAKyhWAAAAGsoFgAAwBqKBQAAsIZiAQAArKFYAAAAaygWAADAGooFAACw5hGnA2B4VfhmOh0BCeLH9tNW3oc1CSQ2zlgAAABrKBYAAMAaigUAALCGYgEAAKyJqljU19drzpw58ng8yszM1NKlS9Xa2jpc2QAAQJyJqlg0NTXJ7/fr2LFjOnz4sPr6+rRw4UL19PQMVz4AABBHovq66aFDhyKe79ixQ5mZmWppadH8+fOtBgMAAPFnSL9jEQwGJUnp6el3fb23t1e9vb3h593d3UM5HAAAGOEGffFmKBRSdXW15s2bp8LCwruOqa+vl9frDW95eXmDDgoAAEa+QRcLv9+vs2fPas+ePfccU1tbq2AwGN4CgcBgDwcAAOLAoD4KWbNmjQ4ePKijR48qNzf3nuPcbrfcbvegwwEAgPgSVbEwxmjt2rXav3+/GhsbVVBQMFy5AABAHIqqWPj9fu3atUsHDhyQx+NRR0eHJMnr9Wr06NHDEhAAAMSPqK6x+OKLLxQMBrVgwQLl5OSEt7179w5XPgAAEEei/igEAADgXrhXCAAAsIZiAQAArKFYAAAAaygWAADAGooFAACwhmIBAACsoVgAAABrKBYAAMAaigUAALCGYgEAAKyhWAAAAGsoFgAAwBqKBQAAsIZiAQAArKFYAAAAaygWAADAGooFAACwhmIBAACsoVgAAABrKBYAAMAaigUAALDmEacDDNaP7aetvVeFb6a19wISFf9OADwIzlgAAABrKBYAAMAaigUAALCGYgEAAKwZUrH46KOP5HK5VF1dbSkOAACIZ4MuFidPntSXX36poqIim3kAAEAcG1SxuHHjhlauXKmvvvpK48ePt50JAADEqUEVC7/frxdffFHl5eUDjuvt7VV3d3fEBgAAElfUP5C1Z88enTp1SidPnrzv2Pr6em3evHlQwQAAQPyJ6oxFIBDQunXrtHPnTo0aNeq+42traxUMBsNbIBAYdFAAADDyRXXGoqWlRV1dXXrmmWfC+/r7+3X06FF9/vnn6u3tVXJycvg1t9stt9ttLy0AABjRoioWZWVl+uWXXyL2VVVVafr06dqwYUNEqQAAAA+fqIqFx+NRYWFhxL6xY8dqwoQJd+wHAAAPH355EwAAWDPk26Y3NjZaiAEAABIBZywAAIA1Qz5jEQ1jjCTpT/VJZmjv1X09ZCHRX/40fdbeCwCARPOn/vo7+fff8YG4zIOMsuTSpUvKy8uL1eEAAIBFgUBAubm5A46JabEIhUJqb2+Xx+ORy+W657ju7m7l5eUpEAgoLS0tVvEeWsx37DDXscV8xxbzHVuxnG9jjK5fvy6fz6ekpIGvoojpRyFJSUn3bTr/X1paGoszhpjv2GGuY4v5ji3mO7ZiNd9er/eBxnHxJgAAsIZiAQAArBmRxcLtdquuro77jMQI8x07zHVsMd+xxXzH1kid75hevAkAABLbiDxjAQAA4hPFAgAAWEOxAAAA1lAsAACANRQLAABgzYgrFtu2bdOkSZM0atQolZaW6sSJE05HSkgffPCBXC5XxDZ9+nSnYyWMo0ePasmSJfL5fHK5XPr+++8jXjfGaNOmTcrJydHo0aNVXl6uc+fOORM2Adxvvl9//fU71vuiRYucCRvn6uvrNWfOHHk8HmVmZmrp0qVqbW2NGHPr1i35/X5NmDBBjz76qJYvX67Ozk6HEse3B5nvBQsW3LG+33zzTYcSj7BisXfvXtXU1Kiurk6nTp1ScXGxKioq1NXV5XS0hPT000/r8uXL4e3nn392OlLC6OnpUXFxsbZt23bX17ds2aJPP/1U27dv1/HjxzV27FhVVFTo1q1bMU6aGO4335K0aNGiiPW+e/fuGCZMHE1NTfL7/Tp27JgOHz6svr4+LVy4UD09PeEx69ev1w8//KB9+/apqalJ7e3tWrZsmYOp49eDzLckrV69OmJ9b9myxaHEkswIUlJSYvx+f/h5f3+/8fl8pr6+3sFUiamurs4UFxc7HeOhIMns378//DwUCpns7Gzz8ccfh/ddu3bNuN1us3v3bgcSJpZ/zrcxxlRWVpqXXnrJkTyJrqury0gyTU1Nxpi/1nJKSorZt29feMyvv/5qJJnm5manYiaMf863McY8//zzZt26dc6F+ocRc8bi9u3bamlpUXl5eXhfUlKSysvL1dzc7GCyxHXu3Dn5fD5NnjxZK1eu1MWLF52O9FC4cOGCOjo6Ita61+tVaWkpa30YNTY2KjMzU9OmTdNbb72lq1evOh0pIQSDQUlSenq6JKmlpUV9fX0R63v69OmaOHEi69uCf87333bu3KmMjAwVFhaqtrZWN2/edCKepBjf3XQgV65cUX9/v7KysiL2Z2Vl6bfffnMoVeIqLS3Vjh07NG3aNF2+fFmbN2/Wc889p7Nnz8rj8TgdL6F1dHRI0l3X+t+vwa5FixZp2bJlKigoUFtbm95//30tXrxYzc3NSk5Odjpe3AqFQqqurta8efNUWFgo6a/1nZqaqnHjxkWMZX0P3d3mW5Jee+015efny+fz6cyZM9qwYYNaW1v13XffOZJzxBQLxNbixYvDj4uKilRaWqr8/Hx9++23WrVqlYPJAPteeeWV8OMZM2aoqKhIU6ZMUWNjo8rKyhxMFt/8fr/Onj3L9Vkxcq/5fuONN8KPZ8yYoZycHJWVlamtrU1TpkyJdcyRc/FmRkaGkpOT77hyuLOzU9nZ2Q6leniMGzdOTz75pM6fP+90lIT393pmrTtn8uTJysjIYL0PwZo1a3Tw4EEdOXJEubm54f3Z2dm6ffu2rl27FjGe9T0095rvuyktLZUkx9b3iCkWqampmjVrlhoaGsL7QqGQGhoaNHfuXAeTPRxu3LihtrY25eTkOB0l4RUUFCg7OztirXd3d+v48eOs9Ri5dOmSrl69ynofBGOM1qxZo/379+unn35SQUFBxOuzZs1SSkpKxPpubW3VxYsXWd+DcL/5vpvTp09LkmPre0R9FFJTU6PKykrNnj1bJSUl2rp1q3p6elRVVeV0tITzzjvvaMmSJcrPz1d7e7vq6uqUnJysV1991eloCeHGjRsR/1u4cOGCTp8+rfT0dE2cOFHV1dX68MMP9cQTT6igoEAbN26Uz+fT0qVLnQsdxwaa7/T0dG3evFnLly9Xdna22tra9N5772nq1KmqqKhwMHV88vv92rVrlw4cOCCPxxO+bsLr9Wr06NHyer1atWqVampqlJ6errS0NK1du1Zz587Vs88+63D6+HO/+W5ra9OuXbv0wgsvaMKECTpz5ozWr1+v+fPnq6ioyJnQTn8t5Z8+++wzM3HiRJOammpKSkrMsWPHnI6UkFasWGFycnJMamqqefzxx82KFSvM+fPnnY6VMI4cOWIk3bFVVlYaY/76yunGjRtNVlaWcbvdpqyszLS2tjobOo4NNN83b940CxcuNI899phJSUkx+fn5ZvXq1aajo8Pp2HHpbvMsyXzzzTfhMX/88Yd5++23zfjx482YMWPMyy+/bC5fvuxc6Dh2v/m+ePGimT9/vklPTzdut9tMnTrVvPvuuyYYDDqW2fXf4AAAAEM2Yq6xAAAA8Y9iAQAArKFYAAAAaygWAADAGooFAACwhmIBAACsoVgAAABrKBYAAMAaigUAALCGYgEAAKyhWAAAAGv+A6sEjbDe9GoiAAAAAElFTkSuQmCC", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plt.imshow(xenc)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([[ 0.5838, -0.8614, 0.1874, -0.5662, 0.2449, 1.4738, 1.8403, 0.3233,\n", " 1.0014, 0.0263, -0.5269, -0.8413, 0.0329, -0.0670, -0.7272, -0.2977,\n", " -0.5083, 0.1050, -0.5482, 1.0237, 1.2359, 1.6366, -1.6188, 0.3283,\n", " 0.7180, -0.9729, -1.5425],\n", " [ 1.4868, -0.0457, 0.2224, 1.5423, -0.0151, -0.2254, 0.7613, -0.4738,\n", " -0.2175, -0.9024, 0.0148, 0.6673, -0.1291, -1.4357, 0.2100, -0.5559,\n", " -0.0711, -0.1631, 0.1704, 0.5689, -1.2534, -0.0207, 0.2485, 0.9525,\n", " 0.1465, 0.1339, 0.1875],\n", " [-0.3253, 0.6007, 1.3449, 0.0990, -0.6273, 0.4972, -0.2262, 0.4910,\n", " -1.6546, 0.5298, -0.3165, -0.7659, 0.9075, -0.4458, 0.9129, -2.7461,\n", " 0.0098, 0.9013, 0.7363, -0.7745, -0.8155, 1.5463, 0.0723, -0.5926,\n", " -0.2548, 0.4572, -0.9398],\n", " [-0.3253, 0.6007, 1.3449, 0.0990, -0.6273, 0.4972, -0.2262, 0.4910,\n", " -1.6546, 0.5298, -0.3165, -0.7659, 0.9075, -0.4458, 0.9129, -2.7461,\n", " 0.0098, 0.9013, 0.7363, -0.7745, -0.8155, 1.5463, 0.0723, -0.5926,\n", " -0.2548, 0.4572, -0.9398],\n", " [-0.6620, 0.3081, 0.4002, 1.4361, -0.9089, -0.3304, 0.1364, -1.0887,\n", " 0.6219, 0.6222, -0.6723, 0.9616, -0.4970, 0.2513, -0.2499, 1.1944,\n", " 0.7755, 1.2483, 0.8315, -0.1463, 0.2847, -0.4837, -0.7275, -2.0723,\n", " -2.0994, -0.3072, -1.8622]])" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "W = torch.randn((27, 27)) #Generating the weights\n", "xenc @ W #Doing matrix multiplication" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor(-0.4458)" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#Checking for one element\n", "(xenc @ W)[3, 13]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor(-0.4458)" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#Doing manual multiplication for verifying\n", "(xenc[3] * W[:,13]).sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([[0.0415, 0.0098, 0.0279, 0.0132, 0.0296, 0.1012, 0.1459, 0.0320, 0.0631,\n", " 0.0238, 0.0137, 0.0100, 0.0239, 0.0217, 0.0112, 0.0172, 0.0139, 0.0257,\n", " 0.0134, 0.0645, 0.0797, 0.1190, 0.0046, 0.0322, 0.0475, 0.0088, 0.0050],\n", " [0.1218, 0.0263, 0.0344, 0.1287, 0.0271, 0.0220, 0.0589, 0.0171, 0.0221,\n", " 0.0112, 0.0279, 0.0537, 0.0242, 0.0066, 0.0340, 0.0158, 0.0256, 0.0234,\n", " 0.0326, 0.0486, 0.0079, 0.0270, 0.0353, 0.0714, 0.0319, 0.0315, 0.0332],\n", " [0.0199, 0.0501, 0.1055, 0.0303, 0.0147, 0.0452, 0.0219, 0.0449, 0.0053,\n", " 0.0467, 0.0200, 0.0128, 0.0681, 0.0176, 0.0685, 0.0018, 0.0278, 0.0677,\n", " 0.0574, 0.0127, 0.0122, 0.1290, 0.0295, 0.0152, 0.0213, 0.0434, 0.0107],\n", " [0.0199, 0.0501, 0.1055, 0.0303, 0.0147, 0.0452, 0.0219, 0.0449, 0.0053,\n", " 0.0467, 0.0200, 0.0128, 0.0681, 0.0176, 0.0685, 0.0018, 0.0278, 0.0677,\n", " 0.0574, 0.0127, 0.0122, 0.1290, 0.0295, 0.0152, 0.0213, 0.0434, 0.0107],\n", " [0.0146, 0.0385, 0.0422, 0.1188, 0.0114, 0.0203, 0.0324, 0.0095, 0.0526,\n", " 0.0526, 0.0144, 0.0739, 0.0172, 0.0363, 0.0220, 0.0933, 0.0614, 0.0985,\n", " 0.0649, 0.0244, 0.0376, 0.0174, 0.0137, 0.0036, 0.0035, 0.0208, 0.0044]])" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "logits = xenc @ W #log-counts\n", "counts = logits.exp() #equivalent to N, as done in A-Main-Notebook\n", "probs = counts / counts.sum(1, keepdims=True) #Normalising the rows (as we had done in A-Main as well. To calculate the probability)\n", "probs" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "-------------" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "-----------" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# SUMMARY ------------------------------>>>>\n", "#Run the first 4 cells of this notebook and then continue" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([ 0, 5, 13, 13, 1])" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "xs" ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([ 5, 13, 13, 1, 0])" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ys" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [], "source": [ "# randomly initialize 27 neurons' weights. each neuron receives 27 inputs\n", "g = torch.Generator().manual_seed(2147483647)\n", "W = torch.randn((27, 27), generator=g)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [], "source": [ "\n", "xenc = F.one_hot(xs, num_classes=27).float() # input to the network: one-hot encoding\n", "logits = xenc @ W # predict log-counts\n", "counts = logits.exp() # counts, equivalent to N\n", "probs = counts / counts.sum(1, keepdims=True) # probabilities for next character\n", "# btw: the last 2 lines here are together called a 'softmax'" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "torch.Size([5, 27])" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "probs.shape" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--------\n", "bigram example 1: .e (indexes 0,5)\n", "input to the neural net: 0\n", "output probabilities from the neural net: tensor([0.0607, 0.0100, 0.0123, 0.0042, 0.0168, 0.0123, 0.0027, 0.0232, 0.0137,\n", " 0.0313, 0.0079, 0.0278, 0.0091, 0.0082, 0.0500, 0.2378, 0.0603, 0.0025,\n", " 0.0249, 0.0055, 0.0339, 0.0109, 0.0029, 0.0198, 0.0118, 0.1537, 0.1459])\n", "label (actual next character): 5\n", "probability assigned by the net to the the correct character: 0.01228625513613224\n", "log likelihood: -4.399273872375488\n", "negative log likelihood: 4.399273872375488\n", "--------\n", "bigram example 2: em (indexes 5,13)\n", "input to the neural net: 5\n", "output probabilities from the neural net: tensor([0.0290, 0.0796, 0.0248, 0.0521, 0.1989, 0.0289, 0.0094, 0.0335, 0.0097,\n", " 0.0301, 0.0702, 0.0228, 0.0115, 0.0181, 0.0108, 0.0315, 0.0291, 0.0045,\n", " 0.0916, 0.0215, 0.0486, 0.0300, 0.0501, 0.0027, 0.0118, 0.0022, 0.0472])\n", "label (actual next character): 13\n", "probability assigned by the net to the the correct character: 0.018050700426101685\n", "log likelihood: -4.014570713043213\n", "negative log likelihood: 4.014570713043213\n", "--------\n", "bigram example 3: mm (indexes 13,13)\n", "input to the neural net: 13\n", "output probabilities from the neural net: tensor([0.0312, 0.0737, 0.0484, 0.0333, 0.0674, 0.0200, 0.0263, 0.0249, 0.1226,\n", " 0.0164, 0.0075, 0.0789, 0.0131, 0.0267, 0.0147, 0.0112, 0.0585, 0.0121,\n", " 0.0650, 0.0058, 0.0208, 0.0078, 0.0133, 0.0203, 0.1204, 0.0469, 0.0126])\n", "label (actual next character): 13\n", "probability assigned by the net to the the correct character: 0.026691533625125885\n", "log likelihood: -3.623408794403076\n", "negative log likelihood: 3.623408794403076\n", "--------\n", "bigram example 4: ma (indexes 13,1)\n", "input to the neural net: 13\n", "output probabilities from the neural net: tensor([0.0312, 0.0737, 0.0484, 0.0333, 0.0674, 0.0200, 0.0263, 0.0249, 0.1226,\n", " 0.0164, 0.0075, 0.0789, 0.0131, 0.0267, 0.0147, 0.0112, 0.0585, 0.0121,\n", " 0.0650, 0.0058, 0.0208, 0.0078, 0.0133, 0.0203, 0.1204, 0.0469, 0.0126])\n", "label (actual next character): 1\n", "probability assigned by the net to the the correct character: 0.07367686182260513\n", "log likelihood: -2.6080665588378906\n", "negative log likelihood: 2.6080665588378906\n", "--------\n", "bigram example 5: a. (indexes 1,0)\n", "input to the neural net: 1\n", "output probabilities from the neural net: tensor([0.0150, 0.0086, 0.0396, 0.0100, 0.0606, 0.0308, 0.1084, 0.0131, 0.0125,\n", " 0.0048, 0.1024, 0.0086, 0.0988, 0.0112, 0.0232, 0.0207, 0.0408, 0.0078,\n", " 0.0899, 0.0531, 0.0463, 0.0309, 0.0051, 0.0329, 0.0654, 0.0503, 0.0091])\n", "label (actual next character): 0\n", "probability assigned by the net to the the correct character: 0.014977526850998402\n", "log likelihood: -4.201204299926758\n", "negative log likelihood: 4.201204299926758\n", "=========\n", "average negative log likelihood, i.e. loss = 3.7693049907684326\n" ] } ], "source": [ "nlls = torch.zeros(5)\n", "for i in range(5):\n", " # i-th bigram:\n", " x = xs[i].item() # input character index\n", " y = ys[i].item() # label character index\n", " print('--------')\n", " print(f'bigram example {i+1}: {itos[x]}{itos[y]} (indexes {x},{y})')\n", " print('input to the neural net:', x)\n", " print('output probabilities from the neural net:', probs[i])\n", " print('label (actual next character):', y)\n", " p = probs[i, y]\n", " print('probability assigned by the net to the the correct character:', p.item())\n", " logp = torch.log(p)\n", " print('log likelihood:', logp.item())\n", " nll = -logp\n", " print('negative log likelihood:', nll.item())\n", " nlls[i] = nll\n", "\n", "print('=========')\n", "print('average negative log likelihood, i.e. loss =', nlls.mean().item())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "--------------------" ] } ], "metadata": { "kernelspec": { "display_name": "venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.0" } }, "nbformat": 4, "nbformat_minor": 2 }