File size: 2,059 Bytes
b95f6dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st

x = st.slider('Select a value')
st.write(x, 'squared is', x * x)


'''

!pip install git+https://github.com/huggingface/transformers
! pip install -q peft  accelerate bitsandbytes safetensors
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import transformers
adapters_name = "atharvapawar/flaskCodemistral-7b-mj-finetuned"
# model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded" #"mistralai/Mistral-7B-Instruct-v0.1"
model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"


device = "cuda" # the device to load the model onto
bnb_config = transformers.BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    load_in_4bit=True,
    torch_dtype=torch.bfloat16,
    quantization_config=bnb_config,
    device_map='auto'
)
model = PeftModel.from_pretrained(model, adapters_name)
#model = model.merge_and_unload()

tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.bos_token_id = 1
stop_token_ids = [0]
print(f"Successfully loaded the model {model_name} into memory")

def MistralModel(prompt, tokenLimit):
    # text = "Identify the changes made to the given code, Common Weakness Enumeration (CWE) associated with the code, and the severity level of the CWE."
    # "task": "Translate","source_language": "English","target_language": "French","text_to_translate": "Hello, how are you?"

    text = "[INST]" + prompt + "[/INST]"

    # text = "[INST] find code vulnerability [cwe] analysis of following code " + text + "[/INST]"

    encoded = tokenizer(text, return_tensors="pt", add_special_tokens=False)
    model_input = encoded
    model.to(device)
    generated_ids = model.generate(**model_input, max_new_tokens=tokenLimit, do_sample=True)
    decoded = tokenizer.batch_decode(generated_ids)
    # print(decoded[0])
    return decoded[0]

responses = MistralModel(instruction, 250)
print(responses)

'''