rahul7star commited on
Commit
e7a9d63
Β·
verified Β·
1 Parent(s): 2aef3fc

Create app_quant_latent1.py

Browse files
Files changed (1) hide show
  1. app_quant_latent1.py +614 -0
app_quant_latent1.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import spaces
3
+ import gradio as gr
4
+ import sys
5
+ import platform
6
+ import diffusers
7
+ import transformers
8
+ import psutil
9
+ import os
10
+ import time
11
+
12
+ from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
13
+ from diffusers import ZImagePipeline, AutoModel
14
+ from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
15
+ latent_history = []
16
+
17
+ # ============================================================
18
+ # LOGGING BUFFER
19
+ # ============================================================
20
+ LOGS = ""
21
+ def log(msg):
22
+ global LOGS
23
+ print(msg)
24
+ LOGS += msg + "\n"
25
+ return msg
26
+
27
+
28
+ # ============================================================
29
+ # SYSTEM METRICS β€” LIVE GPU + CPU MONITORING
30
+ # ============================================================
31
+ def log_system_stats(tag=""):
32
+ try:
33
+ log(f"\n===== πŸ”₯ SYSTEM STATS {tag} =====")
34
+
35
+ # ============= GPU STATS =============
36
+ if torch.cuda.is_available():
37
+ allocated = torch.cuda.memory_allocated(0) / 1e9
38
+ reserved = torch.cuda.memory_reserved(0) / 1e9
39
+ total = torch.cuda.get_device_properties(0).total_memory / 1e9
40
+ free = total - allocated
41
+
42
+ log(f"πŸ’  GPU Total : {total:.2f} GB")
43
+ log(f"πŸ’  GPU Allocated : {allocated:.2f} GB")
44
+ log(f"πŸ’  GPU Reserved : {reserved:.2f} GB")
45
+ log(f"πŸ’  GPU Free : {free:.2f} GB")
46
+
47
+ # ============= CPU STATS ============
48
+ cpu = psutil.cpu_percent()
49
+ ram_used = psutil.virtual_memory().used / 1e9
50
+ ram_total = psutil.virtual_memory().total / 1e9
51
+
52
+ log(f"🧠 CPU Usage : {cpu}%")
53
+ log(f"🧠 RAM Used : {ram_used:.2f} GB / {ram_total:.2f} GB")
54
+
55
+ except Exception as e:
56
+ log(f"⚠️ Failed to log system stats: {e}")
57
+
58
+
59
+ # ============================================================
60
+ # ENVIRONMENT INFO
61
+ # ============================================================
62
+ log("===================================================")
63
+ log("πŸ” Z-IMAGE-TURBO DEBUGGING + LIVE METRIC LOGGER")
64
+ log("===================================================\n")
65
+
66
+ log(f"πŸ“Œ PYTHON VERSION : {sys.version.replace(chr(10),' ')}")
67
+ log(f"πŸ“Œ PLATFORM : {platform.platform()}")
68
+ log(f"πŸ“Œ TORCH VERSION : {torch.__version__}")
69
+ log(f"πŸ“Œ TRANSFORMERS VERSION : {transformers.__version__}")
70
+ log(f"πŸ“Œ DIFFUSERS VERSION : {diffusers.__version__}")
71
+ log(f"πŸ“Œ CUDA AVAILABLE : {torch.cuda.is_available()}")
72
+
73
+ log_system_stats("AT STARTUP")
74
+
75
+ if not torch.cuda.is_available():
76
+ raise RuntimeError("❌ CUDA Required")
77
+
78
+ device = "cuda"
79
+ gpu_id = 0
80
+
81
+ # ============================================================
82
+ # MODEL SETTINGS
83
+ # ============================================================
84
+ model_cache = "./weights/"
85
+ model_id = "Tongyi-MAI/Z-Image-Turbo"
86
+ torch_dtype = torch.bfloat16
87
+ USE_CPU_OFFLOAD = False
88
+
89
+ log("\n===================================================")
90
+ log("🧠 MODEL CONFIGURATION")
91
+ log("===================================================")
92
+ log(f"Model ID : {model_id}")
93
+ log(f"Model Cache Directory : {model_cache}")
94
+ log(f"torch_dtype : {torch_dtype}")
95
+ log(f"USE_CPU_OFFLOAD : {USE_CPU_OFFLOAD}")
96
+
97
+ log_system_stats("BEFORE TRANSFORMER LOAD")
98
+
99
+
100
+ # ============================================================
101
+ # FUNCTION TO CONVERT LATENTS TO IMAGE
102
+ # ============================================================
103
+ def latent_to_image(latent):
104
+ try:
105
+ img_tensor = pipe.vae.decode(latent)
106
+ img_tensor = (img_tensor / 2 + 0.5).clamp(0, 1)
107
+ pil_img = T.ToPILImage()(img_tensor[0])
108
+ return pil_img
109
+ except Exception as e:
110
+ log(f"⚠️ Failed to decode latent: {e}")
111
+ return None
112
+
113
+
114
+
115
+ # ============================================================
116
+ # SAFE TRANSFORMER INSPECTION
117
+ # ============================================================
118
+ def inspect_transformer(model, name):
119
+ log(f"\nπŸ” Inspecting {name}")
120
+ try:
121
+ candidates = ["transformer_blocks", "blocks", "layers", "encoder", "model"]
122
+ blocks = None
123
+
124
+ for attr in candidates:
125
+ if hasattr(model, attr):
126
+ blocks = getattr(model, attr)
127
+ break
128
+
129
+ if blocks is None:
130
+ log(f"⚠️ No block structure found in {name}")
131
+ return
132
+
133
+ if hasattr(blocks, "__len__"):
134
+ log(f"Total Blocks = {len(blocks)}")
135
+ else:
136
+ log("⚠️ Blocks exist but are not iterable")
137
+
138
+ for i in range(min(10, len(blocks) if hasattr(blocks, "__len__") else 0)):
139
+ log(f"Block {i} = {blocks[i].__class__.__name__}")
140
+
141
+ except Exception as e:
142
+ log(f"⚠️ Transformer inspect error: {e}")
143
+
144
+
145
+ # ============================================================
146
+ # LOAD TRANSFORMER β€” WITH LIVE STATS
147
+ # ============================================================
148
+ log("\n===================================================")
149
+ log("πŸ”§ LOADING TRANSFORMER BLOCK")
150
+ log("===================================================")
151
+
152
+ log("πŸ“Œ Logging memory before load:")
153
+ log_system_stats("START TRANSFORMER LOAD")
154
+
155
+ try:
156
+ quant_cfg = DiffusersBitsAndBytesConfig(
157
+ load_in_4bit=True,
158
+ bnb_4bit_quant_type="nf4",
159
+ bnb_4bit_compute_dtype=torch_dtype,
160
+ bnb_4bit_use_double_quant=True,
161
+ )
162
+
163
+ transformer = AutoModel.from_pretrained(
164
+ model_id,
165
+ cache_dir=model_cache,
166
+ subfolder="transformer",
167
+ quantization_config=quant_cfg,
168
+ torch_dtype=torch_dtype,
169
+ device_map=device,
170
+ )
171
+ log("βœ… Transformer loaded successfully.")
172
+
173
+ except Exception as e:
174
+ log(f"❌ Transformer load failed: {e}")
175
+ transformer = None
176
+
177
+ log_system_stats("AFTER TRANSFORMER LOAD")
178
+
179
+ if transformer:
180
+ inspect_transformer(transformer, "Transformer")
181
+
182
+
183
+ # ============================================================
184
+ # LOAD TEXT ENCODER
185
+ # ============================================================
186
+ log("\n===================================================")
187
+ log("πŸ”§ LOADING TEXT ENCODER")
188
+ log("===================================================")
189
+
190
+ log_system_stats("START TEXT ENCODER LOAD")
191
+
192
+ try:
193
+ quant_cfg2 = TransformersBitsAndBytesConfig(
194
+ load_in_4bit=True,
195
+ bnb_4bit_quant_type="nf4",
196
+ bnb_4bit_compute_dtype=torch_dtype,
197
+ bnb_4bit_use_double_quant=True,
198
+ )
199
+
200
+ text_encoder = AutoModel.from_pretrained(
201
+ model_id,
202
+ cache_dir=model_cache,
203
+ subfolder="text_encoder",
204
+ quantization_config=quant_cfg2,
205
+ torch_dtype=torch_dtype,
206
+ device_map=device,
207
+ )
208
+ log("βœ… Text encoder loaded successfully.")
209
+
210
+ except Exception as e:
211
+ log(f"❌ Text encoder load failed: {e}")
212
+ text_encoder = None
213
+
214
+ log_system_stats("AFTER TEXT ENCODER LOAD")
215
+
216
+ if text_encoder:
217
+ inspect_transformer(text_encoder, "Text Encoder")
218
+
219
+
220
+ # ============================================================
221
+ # BUILD PIPELINE
222
+ # ============================================================
223
+ log("\n===================================================")
224
+ log("πŸ”§ BUILDING PIPELINE")
225
+ log("===================================================")
226
+
227
+ log_system_stats("START PIPELINE BUILD")
228
+
229
+ try:
230
+ pipe = ZImagePipeline.from_pretrained(
231
+ model_id,
232
+ transformer=transformer,
233
+ text_encoder=text_encoder,
234
+ torch_dtype=torch_dtype,
235
+ attn_implementation="kernels-community/vllm-flash-attn3",
236
+ )
237
+ pipe.to(device)
238
+ log("βœ… Pipeline built successfully.")
239
+
240
+ except Exception as e:
241
+ log(f"❌ Pipeline build failed: {e}")
242
+ pipe = None
243
+
244
+ log_system_stats("AFTER PIPELINE BUILD")
245
+
246
+
247
+
248
+
249
+
250
+ from PIL import Image
251
+ import torch
252
+
253
+ def safe_generate_with_latents(
254
+ transformer,
255
+ vae,
256
+ text_encoder,
257
+ tokenizer,
258
+ scheduler,
259
+ pipe,
260
+ prompt,
261
+ height,
262
+ width,
263
+ steps,
264
+ guidance_scale,
265
+ negative_prompt,
266
+ num_images_per_prompt,
267
+ generator,
268
+ cfg_normalization,
269
+ cfg_truncation,
270
+ max_sequence_length,
271
+ ):
272
+
273
+ try:
274
+
275
+ latents_or_images = generate(
276
+ transformer=transformer,
277
+ vae=vae,
278
+ text_encoder=text_encoder,
279
+ tokenizer=tokenizer,
280
+ scheduler=scheduler,
281
+ prompt=prompt,
282
+ height=height,
283
+ width=width,
284
+ num_inference_steps=steps,
285
+ guidance_scale=guidance_scale,
286
+ negative_prompt=negative_prompt,
287
+ num_images_per_prompt=num_images_per_prompt,
288
+ generator=generator,
289
+ cfg_normalization=cfg_normalization,
290
+ cfg_truncation=cfg_truncation,
291
+ max_sequence_length=max_sequence_length,
292
+ output_type="latent", # IMPORTANT
293
+ )
294
+
295
+
296
+ return latents_or_images, None
297
+
298
+ except Exception as e:
299
+ return None, e
300
+
301
+
302
+
303
+
304
+
305
+
306
+
307
+
308
+
309
+
310
+
311
+
312
+
313
+
314
+
315
+
316
+
317
+ def safe_get_latents(pipe, height, width, generator, device, LOGS):
318
+ """
319
+ Attempts multiple ways to get latents.
320
+ Returns a valid tensor even if pipeline hides UNet.
321
+ """
322
+ # Try official prepare_latents
323
+ try:
324
+ if hasattr(pipe, "unet") and hasattr(pipe.unet, "in_channels"):
325
+ num_channels = pipe.unet.in_channels
326
+ latents = pipe.prepare_latents(
327
+ batch_size=1,
328
+ num_channels=num_channels,
329
+ height=height,
330
+ width=width,
331
+ dtype=torch.float32,
332
+ device=device,
333
+ generator=generator
334
+ )
335
+ LOGS.append("βœ… Latents extracted using official prepare_latents.")
336
+ return latents
337
+ except Exception as e:
338
+ LOGS.append(f"⚠️ Official latent extraction failed: {e}")
339
+
340
+ # Try hidden internal attribute
341
+ try:
342
+ if hasattr(pipe, "_default_latents"):
343
+ LOGS.append("⚠️ Using hidden _default_latents.")
344
+ return pipe._default_latents
345
+ except:
346
+ pass
347
+
348
+ # Fallback: raw Gaussian tensor
349
+ try:
350
+ LOGS.append("⚠️ Using raw Gaussian latents fallback.")
351
+ return torch.randn(
352
+ (1, 4, height // 8, width // 8),
353
+ generator=generator,
354
+ device=device,
355
+ dtype=torch.float32
356
+ )
357
+ except Exception as e:
358
+ LOGS.append(f"⚠️ Gaussian fallback failed: {e}")
359
+
360
+ LOGS.append("❗ Using CPU hard fallback latents.")
361
+ return torch.randn((1, 4, height // 8, width // 8))
362
+
363
+
364
+ # --------------------------
365
+ # Main generation function
366
+ # --------------------------
367
+ @spaces.GPU
368
+ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
369
+ LOGS = []
370
+ latents = None
371
+ image = None
372
+ gallery = []
373
+
374
+ # placeholder image if all fails
375
+ placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
376
+ print(prompt)
377
+
378
+ try:
379
+ generator = torch.Generator(device).manual_seed(int(seed))
380
+
381
+ # -------------------------------
382
+ # Try advanced latent extraction
383
+ # -------------------------------
384
+ try:
385
+ latents, latent_err = safe_generate_with_latents(
386
+ transformer=transformer,
387
+ vae=vae,
388
+ text_encoder=text_encoder,
389
+ tokenizer=tokenizer,
390
+ scheduler=scheduler,
391
+ pipe=pipe,
392
+ prompt=prompt,
393
+ height=height,
394
+ width=width,
395
+ steps=steps,
396
+ guidance_scale=guidance_scale,
397
+ negative_prompt="",
398
+ num_images_per_prompt=1,
399
+ generator=generator,
400
+ cfg_normalization=False,
401
+ cfg_truncation=1.0,
402
+ max_sequence_length=4096,
403
+ )
404
+
405
+ if latent_err is None:
406
+ log("βœ… Latent generator succeeded.")
407
+ try:
408
+ # Decode latents to image
409
+ shift_factor = getattr(vae.config, "shift_factor", 0.0) or 0.0
410
+ dec = (latents.to(vae.dtype) / vae.config.scaling_factor) + shift_factor
411
+ image = vae.decode(dec, return_dict=False)[0]
412
+
413
+ image = (image / 2 + 0.5).clamp(0, 1)
414
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
415
+ image = (image * 255).round().astype("uint8")
416
+ from PIL import Image
417
+ image = Image.fromarray(image[0])
418
+
419
+ log("🟒 Final image decoded from latent generator.")
420
+ return image, latents, LOGS
421
+
422
+
423
+ except Exception as decode_error:
424
+ log(f"⚠️ Latent decode failed: {decode_error}")
425
+ log("πŸ” Falling back to standard pipeline...")
426
+
427
+ image = output.images[0]
428
+ gallery = [image]
429
+ LOGS.append("βœ… Advanced latent pipeline succeeded.")
430
+
431
+ except Exception as e:
432
+ LOGS.append(f"⚠️ Latent mode failed: {e}")
433
+ LOGS.append("πŸ” Switching to standard pipeline...")
434
+ image = placeholder
435
+ gallery = [image]
436
+ # ========================================================== # 🟩 STANDARD PIPELINE FALLBACK (Never fails) # ==========================================================
437
+ try:
438
+ output = pipe(
439
+ prompt=prompt,
440
+ height=height,
441
+ width=width,
442
+ num_inference_steps=steps,
443
+ guidance_scale=guidance_scale,
444
+ generator=generator,
445
+ )
446
+ image = output.images[0]
447
+ gallery = [image]
448
+ LOGS.append("βœ… Standard pipeline succeeded.")
449
+
450
+ except Exception as e2:
451
+ LOGS.append(f"❌ Standard pipeline failed: {e2}")
452
+ image = placeholder
453
+ gallery = [image]
454
+
455
+ return image, gallery, LOGS
456
+
457
+ except Exception as e:
458
+ LOGS.append(f"❌ Total failure: {e}")
459
+ return placeholder, [placeholder], LOGS
460
+
461
+
462
+
463
+ # --------------------------
464
+ # Helper: Safe latent extractor
465
+ # --------------------------
466
+ def safe_get_latents0(pipe, height, width, generator, device, LOGS):
467
+ """
468
+ Attempts multiple ways to get latents.
469
+ Returns a valid tensor even if pipeline hides UNet.
470
+ """
471
+ # Try official prepare_latents
472
+ try:
473
+ if hasattr(pipe, "unet") and hasattr(pipe.unet, "in_channels"):
474
+ num_channels = pipe.unet.in_channels
475
+ latents = pipe.prepare_latents(
476
+ batch_size=1,
477
+ num_channels=num_channels,
478
+ height=height,
479
+ width=width,
480
+ dtype=torch.float32,
481
+ device=device,
482
+ generator=generator
483
+ )
484
+ LOGS.append("βœ… Latents extracted using official prepare_latents.")
485
+ return latents
486
+ except Exception as e:
487
+ LOGS.append(f"⚠️ Official latent extraction failed: {e}")
488
+
489
+ # Try hidden internal attribute
490
+ try:
491
+ if hasattr(pipe, "_default_latents"):
492
+ LOGS.append("⚠️ Using hidden _default_latents.")
493
+ return pipe._default_latents
494
+ except:
495
+ pass
496
+
497
+ # Fallback: raw Gaussian tensor
498
+ try:
499
+ LOGS.append("⚠️ Using raw Gaussian latents fallback.")
500
+ return torch.randn(
501
+ (1, 4, height // 8, width // 8),
502
+ generator=generator,
503
+ device=device,
504
+ dtype=torch.float32
505
+ )
506
+ except Exception as e:
507
+ LOGS.append(f"⚠️ Gaussian fallback failed: {e}")
508
+
509
+ LOGS.append("❗ Using CPU hard fallback latents.")
510
+ return torch.randn((1, 4, height // 8, width // 8))
511
+
512
+
513
+ # --------------------------
514
+ # Main generation function
515
+ # --------------------------
516
+ @spaces.GPU
517
+ def generate_image0(prompt, height, width, steps, seed, guidance_scale=0.0):
518
+ LOGS = []
519
+ latents = None
520
+ image = None
521
+ gallery = []
522
+
523
+ # placeholder image if all fails
524
+ placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
525
+ print(prompt)
526
+
527
+ try:
528
+ generator = torch.Generator(device).manual_seed(int(seed))
529
+
530
+ # -------------------------------
531
+ # Try advanced latent extraction
532
+ # -------------------------------
533
+ try:
534
+ latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
535
+
536
+ output = pipe(
537
+ prompt=prompt,
538
+ height=height,
539
+ width=width,
540
+ num_inference_steps=steps,
541
+ guidance_scale=guidance_scale,
542
+ generator=generator,
543
+ latents=latents
544
+ )
545
+
546
+ image = output.images[0]
547
+ gallery = [image]
548
+ LOGS.append("βœ… Advanced latent pipeline succeeded.")
549
+
550
+ except Exception as e:
551
+ LOGS.append(f"⚠️ Latent mode failed: {e}")
552
+ LOGS.append("πŸ” Switching to standard pipeline...")
553
+
554
+ try:
555
+ output = pipe(
556
+ prompt=prompt,
557
+ height=height,
558
+ width=width,
559
+ num_inference_steps=steps,
560
+ guidance_scale=guidance_scale,
561
+ generator=generator,
562
+ )
563
+ image = output.images[0]
564
+ gallery = [image]
565
+ LOGS.append("βœ… Standard pipeline succeeded.")
566
+
567
+ except Exception as e2:
568
+ LOGS.append(f"❌ Standard pipeline failed: {e2}")
569
+ image = placeholder
570
+ gallery = [image]
571
+
572
+ return image, gallery, LOGS
573
+
574
+ except Exception as e:
575
+ LOGS.append(f"❌ Total failure: {e}")
576
+ return placeholder, [placeholder], LOGS
577
+
578
+ # ============================================================
579
+ # UI
580
+ # ============================================================
581
+
582
+ with gr.Blocks(title="Z-Image- experiment - dont run")as demo:
583
+ gr.Markdown("# **πŸš€ do not run Z-Image-Turbo β€” Final Image & Latents**")
584
+
585
+
586
+ with gr.Row():
587
+ with gr.Column(scale=1):
588
+ prompt = gr.Textbox(label="Prompt", value="boat in Ocean")
589
+ height = gr.Slider(256, 2048, value=1024, step=8, label="Height")
590
+ width = gr.Slider(256, 2048, value=1024, step=8, label="Width")
591
+ steps = gr.Slider(1, 50, value=20, step=1, label="Inference Steps")
592
+ seed = gr.Number(value=42, label="Seed")
593
+ run_btn = gr.Button("Generate Image")
594
+
595
+ with gr.Column(scale=1):
596
+ final_image = gr.Image(label="Final Image")
597
+ latent_gallery = gr.Gallery(
598
+ label="Latent Steps",
599
+ columns=4,
600
+ height=256,
601
+ preview=True
602
+ )
603
+
604
+ logs_box = gr.Textbox(label="Logs", lines=15)
605
+
606
+ run_btn.click(
607
+ generate_image,
608
+ inputs=[prompt, height, width, steps, seed],
609
+ outputs=[final_image, latent_gallery, logs_box]
610
+ )
611
+
612
+
613
+
614
+ demo.launch()