FireRed-Image-Edit-1.0-8bit

FireRed-Image-Edit-1.0-8bit is an 8-bit quantized edition of FireRed-Image-Edit-1.0 (FireRedTeam), engineered to deliver the same instruction-driven diffusion transformer image editing capabilities with significantly reduced memory footprint and improved inference efficiency. Built upon the original 1.6B-sample training corpus refined into over 100M high-quality text-to-image and editing pairs through cleaning, stratification, auto-labeling, and dual-stage semantic filtering, this quantized release preserves the model’s multi-stage training pipeline, including large-scale pre-training, supervised fine-tuning, and reinforcement learning with techniques such as Multi-Condition Aware Bucket Sampling for variable resolutions, Stochastic Instruction Alignment, Asymmetric Gradient Optimization for stable DPO, DiffusionNFT with layout-OCR rewards for precise text editing, and differentiable Consistency Loss for strong identity preservation. The 8-bit quantization reduces VRAM requirements and accelerates deployment while maintaining high alignment, semantic consistency, and visual fidelity across diverse editing scenarios such as photo restoration, object insertion and modification, style transfer with text fidelity, multi-image virtual try-on, and layout-aware text editing. Optimized for practical workflows and ComfyUI integration, this version enables broader accessibility on consumer-grade GPUs without substantial quality degradation, making it suitable for research, production, and lightweight deployment environments.


Quick Start with Diffusers 🧨

Install the required packages

transformers # - transformers@v4.57.6
torch        # - torch@v2.9.1+cu128
diffusers    # - diffusers@v0.37.0.dev0
bitsandbytes # - bitsandbytes@v0.49.2
gradio       # - gradio@v6.6.0
accelerate   # - accelerate@v1.12.0

Run FireRed-Image-Edit-1.0-8bit [Demo]

import os
import gc
import gradio as gr
import numpy as np
#import spaces # Uncomment the Spaces-related modules if you are using HF ZeroGPU

import torch
import random
from PIL import Image

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
print("torch.__version__ =", torch.__version__)
print("Using device:", device)

from diffusers.models import QwenImageTransformer2DModel
from diffusers import QwenImageEditPlusPipeline
from diffusers.utils import load_image

dtype = torch.bfloat16

transformer = QwenImageTransformer2DModel.from_pretrained(
    "prithivMLmods/FireRed-Image-Edit-1.0-8bit",
    subfolder="transformer",
    torch_dtype=dtype
)

pipe = QwenImageEditPlusPipeline.from_pretrained(
    "prithivMLmods/FireRed-Image-Edit-1.0-8bit",
    transformer=transformer,
    torch_dtype=dtype
).to(device)

MAX_SEED = np.iinfo(np.int32).max

def update_dimensions_on_upload(image):
    if image is None:
        return 1024, 1024
    
    original_width, original_height = image.size
    
    if original_width > original_height:
        new_width = 1024
        aspect_ratio = original_height / original_width
        new_height = int(new_width * aspect_ratio)
    else:
        new_height = 1024
        aspect_ratio = original_width / original_height
        new_width = int(new_height * aspect_ratio)
        
    new_width = (new_width // 8) * 8
    new_height = (new_height // 8) * 8
    
    return new_width, new_height

#@spaces.GPU
def infer(
    images,
    prompt,
    seed,
    randomize_seed,
    guidance_scale,
    steps,
    progress=gr.Progress(track_tqdm=True)
):
    gc.collect()
    torch.cuda.empty_cache()

    if not images:
        raise gr.Error("Please upload at least one image to edit.")

    pil_images = []
    if images is not None:
        for item in images:
            try:
                if isinstance(item, tuple) or isinstance(item, list):
                    path_or_img = item[0]
                else:
                    path_or_img = item

                if isinstance(path_or_img, str):
                    pil_images.append(Image.open(path_or_img).convert("RGB"))
                elif isinstance(path_or_img, Image.Image):
                    pil_images.append(path_or_img.convert("RGB"))
                else:
                    pil_images.append(Image.open(path_or_img.name).convert("RGB"))
            except Exception as e:
                print(f"Skipping invalid image item: {e}")
                continue

    if not pil_images:
        raise gr.Error("Could not process uploaded images.")

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    generator = torch.Generator(device=device).manual_seed(seed)
    negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"

    width, height = update_dimensions_on_upload(pil_images[0])

    try:
        result_image = pipe(
            image=pil_images,
            prompt=prompt,
            negative_prompt=negative_prompt,
            height=height,
            width=width,
            num_inference_steps=steps,
            generator=generator,
            true_cfg_scale=guidance_scale,
        ).images[0]
        
        return result_image, seed

    except Exception as e:
        raise e
    finally:
        gc.collect()
        torch.cuda.empty_cache()

#@spaces.GPU
def infer_example(images, prompt):
    if not images:
        return None, 0
    
    if isinstance(images, str):
        images_list = [images]
    else:
        images_list = images
        
    result, seed = infer(
        images=images_list,
        prompt=prompt,
        seed=0,
        randomize_seed=True,
        guidance_scale=1.0,
        steps=20
    )
    return result, seed

css="""
#col-container {
    margin: 0 auto;
    max-width: 1000px;
}
#main-title h1 {font-size: 2.4em !important;}
"""

with gr.Blocks() as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("# **FireRed-Image-Edit-1.0-8bit**", elem_id="main-title")

        with gr.Row(equal_height=True):
            with gr.Column():
                images = gr.Gallery(
                    label="Upload Images", 
                    type="filepath", 
                    columns=2, 
                    rows=1, 
                    height=300,
                    allow_preview=True
                )

                with gr.Row():
                    prompt = gr.Text(
                        label="Edit Prompt",
                        show_label=True,
                        placeholder="e.g., transform into anime..",
                    )

                with gr.Row():
                    run_button = gr.Button("Edit Image", variant="primary")

            with gr.Column():
                output_image = gr.Image(label="Output Image", interactive=False, format="png", height=390)
                
        with gr.Accordion("Advanced Settings", open=False, visible=True):
            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
            randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
            guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
            steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=20)
        
    run_button.click(
        fn=infer,
        inputs=[images, prompt, seed, randomize_seed, guidance_scale, steps],
        outputs=[output_image, seed]
    )

if __name__ == "__main__":
    demo.queue(max_size=30).launch(css=css, mcp_server=True, ssr_mode=False, show_error=True)

This repository follows the same release notes, terms and conditions, and license as the original model page, FireRed-Image-Edit-1.0.

Downloads last month
-
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for prithivMLmods/FireRed-Image-Edit-1.0-8bit

Finetuned
(3)
this model

Collection including prithivMLmods/FireRed-Image-Edit-1.0-8bit