nanoGPT / app.py
Shilpaj's picture
Update app.py
09d89c5
#!/usr/bin/env python3
"""
Script to run Gradio app for nanoGPT
Author: Shilpaj Bhalerao
Date: Nov 01, 2023
"""
# Third-Party Impots
import gradio as gr
import torch
# Local Imports
from gpt import GPTLanguageModel
device = 'cpu'
def inference(number_of_characters):
"""
Function to print the model output
:param number_of_characters: Number of characters to be generated by the model
"""
context = torch.zeros((1, 1), dtype=torch.long, device=device)
output = decode(model.generate(context, max_new_tokens=number_of_characters)[0].tolist())
return output
if __name__ == '__main__':
# Extracted characters from the training set
chars = ['\n', ' ', '!', '$', '&', "'", ',', '-', '.', '3', ':', ';', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
vocab_size = len(chars)
# create a mapping from characters to integers
stoi = {ch: i for i, ch in enumerate(chars)}
itos = {i: ch for i, ch in enumerate(chars)}
encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
# Load Trained Model
model = GPTLanguageModel(vocab_size)
model.load_state_dict(torch.load('nanoGPT.pth', map_location=torch.device('cpu')))
model.eval()
with gr.Blocks() as demo:
gr.Markdown(
"""
# NanoGPT
- This application is based on the nanoGPT model trained in this [video](https://www.youtube.com/watch?v=kCc8FmEb1nY&ab_channel=AndrejKarpathy) by Andrej Karpathy
- Model is trained to output Shakespeare text
- It is the first step in the training ChatGPT where the aim of the model is to generate text
- This model is a Decoder only model
- The model prints out next character based on the previous characters
- Select the total number of characters to be printed, then the model will generate text
"""
)
# Input
gradcam_inputs = [gr.Slider(1, 3000, value=100,
label="Enter number of Characters to be generated by the model in the Shakespeare style")]
# Output
gradcam_outputs = [gr.Text()]
# Button
gradcam_button = gr.Button("Generate")
# Inference on Button Click
gradcam_button.click(inference, inputs=gradcam_inputs, outputs=gradcam_outputs)
gr.close_all()
demo.launch()