File size: 3,950 Bytes
dfe177d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34cbacb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17fdc74
34cbacb
dfe177d
 
34cbacb
 
 
e94ab80
34cbacb
 
 
 
 
dfe177d
 
 
 
 
 
34cbacb
 
 
 
dfe177d
 
34cbacb
dfe177d
 
 
34cbacb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# from transformers import pipeline

# #load Codet5 model
# code_pipeline = pipeline("text-generation", model="Salesforce/codet5-base")

# def generate_code(instruction: str) -> str:
#   result = code_pipeline(instruction, max_length=256, clean_up_tokenization_spaces=True)
#   return result[0]["generated_text"]


# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# tokenizer = AutoTokenizer.from_pretrained("./models", trust_remote_code=True)
# model = AutoModelForSeq2SeqLM.from_pretrained("./models", trust_remote_code=True)


# def generate_code(instruction: str) -> str:
#     prompt = f"Generate Python code for the following instruction: {instruction}"

#     inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
#     output = model.generate(**inputs, max_length=256)
#     generated_code = tokenizer.decode(output[0], skip_special_tokens=True)

#     return generated_code

# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# tokenizer = AutoTokenizer.from_pretrained("./models")
# model = AutoModelForSeq2SeqLM.from_pretrained("./models")

# def generate_code(instruction: str) -> str:
#     prompt = f"Generate Python code: {instruction}"
#     inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
#     output = model.generate(**inputs, max_length=256)
#     return tokenizer.decode(output[0], skip_special_tokens=True)

# import os
# from huggingface_hub import snapshot_download
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# MODEL_DIR = snapshot_download(repo_id="Salesforce/codet5p-770m-py", cache_dir="./data")

# os.makedirs(MODEL_DIR, exist_ok=True)
# # Global variables but not initialized
# tokenizer = None
# model = None

# def load_model():
#     global tokenizer, model
#     if tokenizer is None or model is None:
#         if not os.path.exists(MODEL_DIR):
#             print("Downloading CodeT5 model to persistent /data directory...")
#             tokenizer = AutoTokenizer.from_pretrained("Salesforce/codet5p-770m-py", cache_dir=MODEL_DIR)
#             model = AutoModelForSeq2SeqLM.from_pretrained("Salesforce/codet5p-770m-py", cache_dir=MODEL_DIR)
#         else:
#             print("Loading model from /data directory...")    
#             tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
#             model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_DIR)
#         print("Model loaded.")

# def generate_code(instruction: str) -> str:
#     load_model()  # Ensure model is loaded before use
#     prompt = f"# Instruction: {instruction}\n# Python Code:\n"

#     inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
#     output = model.generate(**inputs, max_length=256)
#     return tokenizer.decode(output[0], skip_special_tokens=True)


import os
from huggingface_hub import snapshot_download
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Constants
HF_MODEL_ID = "Salesforce/codet5p-770m-py"
MODEL_DIR = "./data/codet5_model"  # More explicit and organized
os.makedirs(MODEL_DIR, exist_ok=True)

# Snapshot download (will cache in MODEL_DIR)
snapshot_download(repo_id=HF_MODEL_ID, cache_dir=MODEL_DIR, local_dir=MODEL_DIR, local_dir_use_symlinks=False)

# Globals
tokenizer = None
model = None

def load_model():
    global tokenizer, model
    if tokenizer is None or model is None:
        print("Loading CodeT5 model from persistent directory...")
        tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
        model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_DIR)
        print("Model loaded successfully.")

def generate_code(instruction: str) -> str:
    load_model()
    prompt = f"# Instruction: {instruction}\n# Python Code:\n"

    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    outputs = model.generate(**inputs, max_length=256)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)