Spaces:
Sleeping
Sleeping
Commit
·
ff99ead
1
Parent(s):
f497773
fixed bug in app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,10 @@ from tokenizers import Tokenizer
|
|
4 |
import gradio as gr
|
5 |
from model import LlamaForCausalLM # Import your custom model class
|
6 |
|
7 |
-
# Load
|
8 |
-
tokenizer =
|
|
|
|
|
9 |
|
10 |
# Load the model from a local .pth file
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
4 |
import gradio as gr
|
5 |
from model import LlamaForCausalLM # Import your custom model class
|
6 |
|
7 |
+
# Load tokenizer and model
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo2-tokenizer")
|
9 |
+
if tokenizer.pad_token is None:
|
10 |
+
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else "[PAD]"
|
11 |
|
12 |
# Load the model from a local .pth file
|
13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|