File size: 850 Bytes
5881fae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

# Assuming 'client' is an instance of InferenceClient and 'model_id' is the model name
# You might need to import AutoTokenizer from transformers
# from transformers import AutoTokenizer

# Before the line causing the error:
# try:
#     tokenizer = client.tokenizer # This line caused the error
# except AttributeError:
#     # If client.tokenizer is not available, load the tokenizer separately
#     # Ensure you have the 'transformers' library installed
#     # You might need to pass authentication_token to AutoTokenizer if your model is private
#     tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.environ.get("HF_TOKEN"))

# Then, use the 'tokenizer' object to apply the chat template:
# prompt_for_generation = tokenizer.apply_chat_template(
#     messages,
#     tokenize=False,
#     add_generation_prompt=True
# )