File size: 231 Bytes
9156969
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, pipeline
import torch

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True,
)