Spaces:
Running
Running
| # Load dependencies | |
| import time | |
| import transformers | |
| import torch | |
| import spaces # Optional: run our model on the GPU (this will be much faster inference) | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from transformers import pipeline | |
| # Optional: run our model on the GPU (this will be much faster inference) | |
| def pred_on_text(input_text): | |
| start_time = time.time() | |
| raw_output = loaded_model_pipeline(text_inputs=[{"role": "user", | |
| "content": input_text}], | |
| max_new_tokens=256, | |
| disable_compile=True) | |
| end_time = time.time() | |
| total_time = round(end_time - start_time, 4) | |
| generated_text = raw_output[0]["generated_text"][1]["content"] | |
| return generated_text, raw_output, total_time | |
| # Load the model (from our Hugging Face Repo) | |
| # Note: You may have to replace my username `objects76` for your own | |
| MODEL_PATH = "objects76/FoodExtract-gemma-3-270m-fine-tune-v1" | |
| # Load the tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| MODEL_PATH | |
| ) | |
| # # Load the model into a pipeline | |
| # loaded_model = AutoModelForCausalLM.from_pretrained( | |
| # pretrained_model_name_or_path=MODEL_PATH, | |
| # dtype="auto", | |
| # device_map="auto", | |
| # attn_implementation="eager" | |
| # ) | |
| # loaded_model_pipeline = pipeline("text-generation", | |
| # model=loaded_model, | |
| # tokenizer=tokenizer) | |
| loaded_model_pipeline = pipeline( | |
| "text-generation", | |
| model=MODEL_PATH, # β pass path, let pipeline load | |
| tokenizer=tokenizer, | |
| torch_dtype="auto", | |
| device_map="auto", | |
| model_kwargs={"attn_implementation": "eager"} | |
| ) | |
| # Create the demo | |
| description = """ν μ€νΈμμ μμκ³Ό μλ£ νλͺ©μ μΆμΆνλ νμΈνλλ SLM(Small Language Model) | |
| - basemodel: [Gemma 3 270M](https://huggingface.co/google/gemma-3-270m-it) | |
| - dataset: [FoodExtract-1k λ°μ΄ν°μ ](https://huggingface.co/datasets/objects76/FoodExtract-1k) | |
| * μ λ ₯ (str): μμ ν μ€νΈ λ¬Έμμ΄ λλ μ΄λ―Έμ§ μΊ‘μ (μ: "νν΄ μμ μλ κ°μ μ¬μ§" λλ "λ² μ΄μ»¨, κ³λ, ν μ€νΈκ° μλ μμΉ¨ μμ¬") | |
| * μΆλ ₯ (str): μμ/λΉμμ λΆλ₯μ μΆμΆλ λͺ μ¬ν μμ λ° μλ£ νλͺ©, λ€μν μμ νκ·Έκ° ν¬ν¨λ μμ± ν μ€νΈ | |
| For example: | |
| * Input: "For breakfast I had eggs, bacon and toast and a glass of orange juice" | |
| * Output: | |
| ``` | |
| food_or_drink: 1 | |
| tags: fi, di | |
| foods: eggs, bacon, toast | |
| drinks: orange juice | |
| ``` | |
| """ | |
| demo = gr.Interface(fn=pred_on_text, | |
| inputs=gr.TextArea(lines=4, label="Input Text"), | |
| outputs=[gr.TextArea(lines=4, label="Generated Text"), | |
| gr.TextArea(lines=7, label="Raw Output"), | |
| gr.Number(label="Generation Time (s)")], | |
| title="π³ Structured FoodExtract with a Fine-Tuned Gemma 3 270M", | |
| description=description, | |
| examples=[["Hello world! This is my first fine-tuned LLM!"], | |
| ["그릴μ κ΅¬μ΄ λ°λΌλ¬Έλμ μ보카λ, μ¬λ¦¬λΈ, ν λ§ν , μ΄ν리μ λλ μ±μ΄ κ³λ€μ¬μ§ μλ¬λκ° μλ ν μ μ μμ"], | |
| ["British Breakfast with baked beans, fried eggs, black pudding, sausages, bacon, mushrooms, a cup of tea and toast and fried tomatoes"], | |
| ["Steak tacos"], | |
| ["A photo of a dog sitting on a beach"]] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=False) | |