Update für Chat Template
Browse files
README.md
CHANGED
|
@@ -109,12 +109,13 @@ import torch
|
|
| 109 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 110 |
model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix", torch_dtype=torch.bfloat16, device_map="auto")
|
| 111 |
tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix")
|
| 112 |
-
prompt =
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
"""
|
|
|
|
| 118 |
inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
|
| 119 |
outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 120 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 109 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 110 |
model = AutoModelForCausalLM.from_pretrained("DRXD1000/Phoenix", torch_dtype=torch.bfloat16, device_map="auto")
|
| 111 |
tokenizer = AutoTokenizer.from_pretrained("DRXD1000/Phoenix")
|
| 112 |
+
prompt = messages = [
|
| 113 |
+
{
|
| 114 |
+
"role": "system",
|
| 115 |
+
"content": "", #Not recommended. Phoenix does not react well on system prompts
|
| 116 |
+
},
|
| 117 |
+
{"role": "user", "content": "Erkläre mir was KI ist"},
|
| 118 |
+
]
|
| 119 |
inputs = tokenizer.apply_chat_template(prompt, return_tensors="pt").to("cuda")
|
| 120 |
outputs = model.generate(inputs, num_return_sequences=1, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 121 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|