chbsaikiran commited on
Commit
9b3af55
·
1 Parent(s): 1ba7aba

bug in app.py fixed

Browse files
Files changed (1) hide show
  1. app.py +4 -11
app.py CHANGED
@@ -1,18 +1,8 @@
1
  import torch
2
- from torch import nn
3
  from transformers import AutoTokenizer
4
  import gradio as gr
5
  from model import LlamaForCausalLM # Import your custom model class
6
 
7
- # Load tokenizer and model
8
- tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer")
9
- if tokenizer.pad_token is None:
10
- tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else "[PAD]"
11
-
12
- # Load the model from a local .pth file
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- checkpoint_path = "model_bin.pth"
15
-
16
  # Load tokenizer and model
17
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer")
18
  if tokenizer.pad_token is None:
@@ -24,8 +14,11 @@ model = LlamaForCausalLM(
24
  dim=576,
25
  num_layers=30,
26
  hidden_dim=1536,
27
- num_heads=8
28
  )
 
 
 
29
  checkpoint = torch.load(checkpoint_path, map_location=device)
30
  model.load_state_dict(checkpoint['model_state_dict'])
31
  model.to(device)
 
1
  import torch
 
2
  from transformers import AutoTokenizer
3
  import gradio as gr
4
  from model import LlamaForCausalLM # Import your custom model class
5
 
 
 
 
 
 
 
 
 
 
6
  # Load tokenizer and model
7
  tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer")
8
  if tokenizer.pad_token is None:
 
14
  dim=576,
15
  num_layers=30,
16
  hidden_dim=1536,
17
+ num_heads=9
18
  )
19
+ device = "cpu"
20
+
21
+ checkpoint_path = "model_bin.pth"
22
  checkpoint = torch.load(checkpoint_path, map_location=device)
23
  model.load_state_dict(checkpoint['model_state_dict'])
24
  model.to(device)