File size: 504 Bytes
45c96b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
from transformers import AutoModel, AutoTokenizer
import sentencepiece as spm
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model

def main():
    tokenizer = AutoTokenizer.from_pretrained('./', trust_remote_code=True)
    model = AutoModel.from_pretrained('./', trust_remote_code=True)
    # print(f"model: {model}")
    model = model.eval()
    response, history = model.chat(tokenizer, "你好", history=[], max_new_tokens=16)
    print(response)



if __name__ == "__main__":
    main()