|
|
|
text = "ଓଡ଼ିଶାରେ କେଉଁ ପ୍ରକାରର ଜଙ୍ଗଲ ରହିଛି?" |
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Ranjit/llama_v2_or") |
|
model = AutoModelForCausalLM.from_pretrained("Ranjit/llama_v2_or", trust_remote_code=True, torch_dtype=torch.float16).to(device) |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt").to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model.generate(input_ids=inputs["input_ids"], |
|
attention_mask=inputs["attention_mask"], |
|
max_new_tokens=1024, |
|
pad_token_id=tokenizer.eos_token_id) |
|
|
|
|
|
print(tokenizer.decode(outputs[0], skip_special_tokens=True)) |