rahul7star commited on
Commit
25905dd
Β·
verified Β·
1 Parent(s): 696c058

Update app_quant_latent.py

Browse files
Files changed (1) hide show
  1. app_quant_latent.py +10 -51
app_quant_latent.py CHANGED
@@ -248,52 +248,6 @@ log_system_stats("AFTER PIPELINE BUILD")
248
 
249
  @spaces.GPU
250
  def generate_image(prompt, height, width, steps, seed):
251
-
252
- try:
253
- generator = torch.Generator(device).manual_seed(seed)
254
- latent_history = []
255
-
256
- # Callback to save latents and GPU info
257
- def save_latents(step, timestep, latents):
258
- latent_history.append(latents.detach().clone())
259
- gpu_mem = torch.cuda.memory_allocated(0)/1e9
260
- log(f"Step {step} - GPU Memory Used: {gpu_mem:.2f} GB")
261
-
262
- # Step-wise loop just for latent capture
263
- for step, _ in pipe(
264
- prompt=prompt,
265
- height=height,
266
- width=width,
267
- num_inference_steps=steps,
268
- guidance_scale=0.0,
269
- generator=generator,
270
- callback=save_latents,
271
- callback_steps=1
272
- ).iter():
273
- pass # only capturing latents, ignoring intermediate images
274
-
275
- # Original final image generation
276
- output = pipe(
277
- prompt=prompt,
278
- height=height,
279
- width=width,
280
- num_inference_steps=steps,
281
- guidance_scale=0.0,
282
- generator=generator,
283
- )
284
-
285
- log("βœ… Inference finished.")
286
- log_system_stats("AFTER INFERENCE")
287
-
288
- return output.images[0], latent_history, LOGS
289
-
290
- except Exception as e:
291
- log(f"❌ Inference error: {e}")
292
- return None, None, LOGS
293
-
294
- @spaces.GPU
295
- def generate_image(prompt, height, width, steps, seed):
296
-
297
  try:
298
  generator = torch.Generator(device).manual_seed(seed)
299
  latent_history = []
@@ -353,9 +307,10 @@ def generate_image(prompt, height, width, steps, seed):
353
  # ============================================================
354
 
355
  with gr.Blocks(title="Z-Image-Turbo Generator") as demo:
356
- gr.Markdown("# **πŸš€ Z-Image-Turbo β€” Final Image & Latents**")
357
 
358
- with gr.Row():
 
359
  with gr.Column(scale=1):
360
  prompt = gr.Textbox(label="Prompt", value="Realistic mid-aged male image")
361
  height = gr.Slider(256, 2048, value=1024, step=8, label="Height")
@@ -366,11 +321,15 @@ with gr.Blocks(title="Z-Image-Turbo Generator") as demo:
366
 
367
  with gr.Column(scale=1):
368
  final_image = gr.Image(label="Final Image")
369
- latent_gallery = gr.Gallery(label="Latent Steps", grid=[4], height=256)
370
  logs_box = gr.Textbox(label="Logs", lines=15)
371
 
372
-
373
- run_btn.click( generate_image, inputs=[prompt, height, width, steps, seed], outputs=[final_image, latent_gallery, logs_box] )
 
 
 
 
374
 
375
 
376
  demo.launch()
 
248
 
249
  @spaces.GPU
250
  def generate_image(prompt, height, width, steps, seed):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  try:
252
  generator = torch.Generator(device).manual_seed(seed)
253
  latent_history = []
 
307
  # ============================================================
308
 
309
  with gr.Blocks(title="Z-Image-Turbo Generator") as demo:
310
+ gr.Markdown("# **πŸš€ Z-Image-Turbo β€” Final Image & Latents**")
311
 
312
+
313
+ with gr.Row():
314
  with gr.Column(scale=1):
315
  prompt = gr.Textbox(label="Prompt", value="Realistic mid-aged male image")
316
  height = gr.Slider(256, 2048, value=1024, step=8, label="Height")
 
321
 
322
  with gr.Column(scale=1):
323
  final_image = gr.Image(label="Final Image")
324
+ latent_gallery = gr.Gallery(label="Latent Steps").style(grid=4, height=256)
325
  logs_box = gr.Textbox(label="Logs", lines=15)
326
 
327
+ run_btn.click(
328
+ generate_image,
329
+ inputs=[prompt, height, width, steps, seed],
330
+ outputs=[final_image, latent_gallery, logs_box]
331
+ )
332
+
333
 
334
 
335
  demo.launch()