Mr.Events / fixed_app.py
Futuresony's picture
Upload fixed_app.py
5881fae verified
raw
history blame contribute delete
850 Bytes
# Assuming 'client' is an instance of InferenceClient and 'model_id' is the model name
# You might need to import AutoTokenizer from transformers
# from transformers import AutoTokenizer
# Before the line causing the error:
# try:
# tokenizer = client.tokenizer # This line caused the error
# except AttributeError:
# # If client.tokenizer is not available, load the tokenizer separately
# # Ensure you have the 'transformers' library installed
# # You might need to pass authentication_token to AutoTokenizer if your model is private
# tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=os.environ.get("HF_TOKEN"))
# Then, use the 'tokenizer' object to apply the chat template:
# prompt_for_generation = tokenizer.apply_chat_template(
# messages,
# tokenize=False,
# add_generation_prompt=True
# )