Spaces:
Runtime error
Runtime error
Commit
Β·
dd8b944
1
Parent(s):
e6c5f04
application deployed
Browse files
app.py
CHANGED
@@ -8,43 +8,56 @@
|
|
8 |
|
9 |
#######################
|
10 |
|
11 |
-
import
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
prompt = f"""### Task:
|
26 |
Convert the following Python code to equivalent R code.
|
27 |
|
28 |
### Python code:
|
29 |
{python_code}
|
30 |
|
31 |
### R code:"""
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
#######################
|
10 |
|
11 |
+
import spaces
|
12 |
+
|
13 |
+
@spaces.GPU
|
14 |
+
def main():
|
15 |
+
# Your existing Gradio app code here
|
16 |
+
import gradio as gr
|
17 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
18 |
+
import torch
|
19 |
+
|
20 |
+
model_id = "codellama/CodeLlama-7b-Instruct-hf"
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
22 |
+
model = AutoModelForCausalLM.from_pretrained(
|
23 |
+
model_id,
|
24 |
+
device_map="auto",
|
25 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
|
26 |
+
)
|
27 |
+
tokenizer.pad_token = tokenizer.eos_token
|
28 |
|
29 |
+
def convert_python_to_r(python_code):
|
30 |
+
prompt = f"""### Task:
|
|
|
31 |
Convert the following Python code to equivalent R code.
|
32 |
|
33 |
### Python code:
|
34 |
{python_code}
|
35 |
|
36 |
### R code:"""
|
37 |
+
input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids
|
38 |
+
if torch.cuda.is_available():
|
39 |
+
input_ids = input_ids.to("cuda")
|
40 |
+
|
41 |
+
outputs = model.generate(
|
42 |
+
input_ids,
|
43 |
+
max_length=1024,
|
44 |
+
do_sample=True,
|
45 |
+
temperature=0.2,
|
46 |
+
pad_token_id=tokenizer.eos_token_id,
|
47 |
+
num_return_sequences=1
|
48 |
+
)
|
49 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
50 |
+
if "### R code:" in generated_text:
|
51 |
+
generated_text = generated_text.split("### R code:")[-1].strip()
|
52 |
+
return generated_text
|
53 |
+
|
54 |
+
gr.Interface(
|
55 |
+
fn=convert_python_to_r,
|
56 |
+
inputs=gr.Textbox(lines=10, placeholder="Paste your Python code here..."),
|
57 |
+
outputs="text",
|
58 |
+
title="Python to R Code Converter using CodeLlama 7B Instruct",
|
59 |
+
description="Enter Python code below, and the tool will convert it to R code using the CodeLlama 7B Instruct model."
|
60 |
+
).launch()
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
main()
|