Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from diffusers import DiffusionPipeline | |
| from accelerate import init_empty_weights, load_checkpoint_and_dispatch | |
| # Detección y configuración del dispositivo para compatibilidad con GPU o CPU | |
| if torch.cuda.is_available(): | |
| device = "cuda" # Para GPUs NVIDIA | |
| elif hasattr(torch.backends, "mps") and torch.backends.mps.is_built(): | |
| device = "mps" # Para GPUs Apple Silicon (M1/M2) y otras GPUs con soporte Metal | |
| elif hasattr(torch.backends, "rocm") and torch.backends.rocm.is_available(): | |
| device = "rocm" # Para GPUs AMD con ROCm, si está disponible | |
| else: | |
| device = "cpu" # En caso de no tener GPU disponible | |
| # Definir el tipo de dato, usando bfloat16 si es compatible, si no, usar float32 | |
| dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32 | |
| # Inicializar el modelo solo una vez y cargarlo en RAM y GPU/CPU | |
| pipe = None | |
| def load_model(): | |
| global pipe | |
| if pipe is None: | |
| # Inicializar ZeroGPU antes de cargar el modelo | |
| init_empty_weights() | |
| # Cargar el modelo y configurarlo para usar el dispositivo adecuado | |
| pipe = DiffusionPipeline.from_pretrained( | |
| "black-forest-labs/FLUX.1-schnell", | |
| torch_dtype=dtype | |
| ) | |
| # Despachar los pesos al dispositivo adecuado (GPU o CPU) | |
| pipe = load_checkpoint_and_dispatch( | |
| pipe, | |
| "black-forest-labs/FLUX.1-schnell", | |
| device_map="auto", # Automatiza el uso de RAM, GPU o CPU | |
| offload_folder=None # Evita que se almacenen los pesos temporalmente en el disco | |
| ) | |
| pipe.to(device) | |
| MAX_SEED = torch.iinfo(torch.int32).max | |
| def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, num_images=1): | |
| load_model() # Asegurarse de que el modelo esté cargado antes de la inferencia | |
| if randomize_seed: | |
| seed = torch.randint(0, MAX_SEED, (1,)).item() | |
| generator = torch.Generator(device).manual_seed(seed) | |
| images = [] | |
| for _ in range(num_images): | |
| image = pipe( | |
| prompt=prompt, | |
| width=width, | |
| height=height, | |
| num_inference_steps=num_inference_steps, | |
| generator=generator, | |
| guidance_scale=0.0 | |
| ).images[0] | |
| images.append(image) | |
| return images, seed | |
| examples = [ | |
| "a tiny astronaut hatching from an egg on the moon", | |
| "a cat holding a sign that says hello world", | |
| "an anime illustration of a wiener schnitzel", | |
| ] | |
| css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 520px; | |
| } | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown(f"""# FLUX.1 [schnell] | |
| 12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation | |
| [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)] | |
| """) | |
| with gr.Row(): | |
| prompt = gr.Text( | |
| label="Prompt", | |
| show_label=False, | |
| max_lines=1, | |
| placeholder="Enter your prompt", | |
| container=False | |
| ) | |
| run_button = gr.Button("Run", scale=0) | |
| # Usamos gr.Gallery para mostrar múltiples imágenes | |
| results = gr.Gallery(label="Results", show_label=False, elem_id="image-gallery") | |
| with gr.Accordion("Advanced Settings", open=False): | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=0, | |
| ) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="Width", | |
| minimum=256, | |
| maximum=2048, # Ajusta el tamaño máximo según sea necesario | |
| step=32, | |
| value=1024, | |
| ) | |
| height = gr.Slider( | |
| label="Height", | |
| minimum=256, | |
| maximum=2048, # Ajusta el tamaño máximo según sea necesario | |
| step=32, | |
| value=1024, | |
| ) | |
| with gr.Row(): | |
| num_inference_steps = gr.Slider( | |
| label="Number of inference steps", | |
| minimum=1, | |
| maximum=50, | |
| step=1, | |
| value=4, | |
| ) | |
| # Control para el número de imágenes a generar | |
| num_images = gr.Slider( | |
| label="Number of images", | |
| minimum=1, | |
| maximum=10, # Ajusta el número máximo de imágenes según sea necesario | |
| step=1, | |
| value=1, | |
| ) | |
| gr.Examples( | |
| examples = examples, | |
| fn = infer, | |
| inputs = [prompt], | |
| outputs = [results, seed], | |
| cache_examples="lazy" | |
| ) | |
| # Conectar el botón y el campo de texto a la función infer | |
| run_button.click( | |
| fn=infer, | |
| inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps, num_images], | |
| outputs=[results, seed] | |
| ) | |
| # Crear un enlace público con share=True | |
| demo.launch(share=True) |