fix: disable fast tokenizer to avoid SentencePiece parse error
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
8 |
|
9 |
# ✅ Public, compatible model
|
10 |
model_id = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO"
|
11 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
13 |
|
14 |
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
|
|
8 |
|
9 |
# ✅ Public, compatible model
|
10 |
model_id = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
|
12 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
13 |
|
14 |
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|