Spaces:
Runtime error
Runtime error
#import gradio as gr | |
#def greet(name): | |
# return "Hello " + name + "!!" | |
#demo = gr.Interface(fn=greet, inputs="text", outputs="text") | |
#demo.launch() | |
####################### | |
import spaces | |
def main(): | |
# Your existing Gradio app code here | |
import os | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
os.system("pip install gradio==3.50.2") | |
model_id = "codellama/CodeLlama-7b-Instruct-hf" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
device_map="auto", | |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
) | |
tokenizer.pad_token = tokenizer.eos_token | |
def convert_python_to_r(python_code): | |
prompt = f"""### Task: | |
Convert the following Python code to equivalent R code. | |
### Python code: | |
{python_code} | |
### R code:""" | |
input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids | |
if torch.cuda.is_available(): | |
input_ids = input_ids.to("cuda") | |
outputs = model.generate( | |
input_ids, | |
max_length=1024, | |
do_sample=True, | |
temperature=0.2, | |
pad_token_id=tokenizer.eos_token_id, | |
num_return_sequences=1 | |
) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
if "### R code:" in generated_text: | |
generated_text = generated_text.split("### R code:")[-1].strip() | |
return generated_text | |
gr.Interface( | |
fn=convert_python_to_r, | |
inputs=gr.Textbox(lines=10, placeholder="Paste your Python code here..."), | |
outputs="text", | |
title="Python to R Code Converter using CodeLlama 7B Instruct", | |
description="Enter Python code below, and the tool will convert it to R code using the CodeLlama 7B Instruct model." | |
).launch() | |
if __name__ == "__main__": | |
main() | |