sherwin6180 commited on
Commit
319d0b3
·
verified ·
1 Parent(s): a30b826

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -16
app.py CHANGED
@@ -8,15 +8,28 @@ from typing import Iterable
8
  from gradio.themes import Soft
9
  from gradio.themes.utils import colors, fonts, sizes
10
 
11
- # --- Mock Spaces (保持不变) ---
12
- class MockSpaces:
13
- def GPU(self, duration=0):
 
 
 
 
 
 
 
 
 
 
14
  def decorator(func):
15
  return func
16
  return decorator
17
- spaces = MockSpaces()
 
 
 
 
18
 
19
- # --- Theme Setup (保持不变) ---
20
  colors.steel_blue = colors.Color(
21
  name="steel_blue",
22
  c50="#EBF3F8",
@@ -84,21 +97,17 @@ class SteelBlueTheme(Soft):
84
  )
85
  steel_blue_theme = SteelBlueTheme()
86
 
87
- # --- 关键修改:按需加载 ---
88
  from diffusers import FlowMatchEulerDiscreteScheduler
89
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
90
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
91
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
92
 
93
- pipe = None # 全局变量初始化为空
94
 
95
- # 检测逻辑:只有在真正有 GPU 的时候才加载模型
96
- # 这样 HF 的 CPU 构建服务器会直接跳过这里,瞬间完成构建
97
  if torch.cuda.is_available():
98
- print("GPU detected! Initializing model for 2x A40 Environment...")
99
  dtype = torch.bfloat16
100
 
101
- # 1. Load Transformer (device_map="auto" for multi-gpu split)
102
  print("Loading Transformer...")
103
  transformer_model = QwenImageTransformer2DModel.from_pretrained(
104
  "linoyts/Qwen-Image-Edit-Rapid-AIO",
@@ -107,7 +116,7 @@ if torch.cuda.is_available():
107
  device_map="auto"
108
  )
109
 
110
- # 2. Load Pipeline (device_map="balanced" compatible with diffusers)
111
  print("Loading Pipeline...")
112
  pipe = QwenImageEditPlusPipeline.from_pretrained(
113
  "Qwen/Qwen-Image-Edit-2509",
@@ -132,7 +141,7 @@ if torch.cuda.is_available():
132
  except Exception as e:
133
  print(f"Warning: FA3 set skipped: {e}")
134
  else:
135
- print("No GPU detected (likely HF Build Environment). SKIPPING MODEL LOAD to save memory.")
136
 
137
  MAX_SEED = np.iinfo(np.int32).max
138
 
@@ -154,9 +163,8 @@ def update_dimensions_on_upload(image):
154
 
155
  @spaces.GPU(duration=30)
156
  def infer(input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, progress=gr.Progress(track_tqdm=True)):
157
- # 运行时检查:如果 pipe 没加载(说明没 GPU),直接报错
158
  if pipe is None:
159
- raise gr.Error("Error: Model not loaded. Is a GPU available?")
160
 
161
  if input_image is None:
162
  raise gr.Error("Please upload an image to edit.")
@@ -210,7 +218,7 @@ css="""
210
 
211
  with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
212
  with gr.Column(elem_id="col-container"):
213
- gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast (RunPod Optimized)**", elem_id="main-title")
214
 
215
  with gr.Row(equal_height=True):
216
  with gr.Column():
 
8
  from gradio.themes import Soft
9
  from gradio.themes.utils import colors, fonts, sizes
10
 
11
+ try:
12
+ import spaces
13
+ except ImportError:
14
+ class MockSpaces:
15
+ def GPU(self, duration=0):
16
+ def decorator(func):
17
+ return func
18
+ return decorator
19
+ spaces = MockSpaces()
20
+
21
+ if torch.cuda.is_available():
22
+ print("🚀 RunPod/Local GPU detected: Bypassing Hugging Face Spaces queue.")
23
+ def gpu_bypass_decorator(duration=0):
24
  def decorator(func):
25
  return func
26
  return decorator
27
+ spaces.GPU = gpu_bypass_decorator
28
+ else:
29
+ print("🐢 No GPU detected: Using standard Spaces logic (or Build Mode).")
30
+
31
+ # ----------------------------------------
32
 
 
33
  colors.steel_blue = colors.Color(
34
  name="steel_blue",
35
  c50="#EBF3F8",
 
97
  )
98
  steel_blue_theme = SteelBlueTheme()
99
 
 
100
  from diffusers import FlowMatchEulerDiscreteScheduler
101
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
102
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
103
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
104
 
105
+ pipe = None
106
 
 
 
107
  if torch.cuda.is_available():
108
+ print("🚀 GPU detected! Initializing model for RunPod Environment...")
109
  dtype = torch.bfloat16
110
 
 
111
  print("Loading Transformer...")
112
  transformer_model = QwenImageTransformer2DModel.from_pretrained(
113
  "linoyts/Qwen-Image-Edit-Rapid-AIO",
 
116
  device_map="auto"
117
  )
118
 
119
+ # 2. Load Pipeline (device_map="balanced")
120
  print("Loading Pipeline...")
121
  pipe = QwenImageEditPlusPipeline.from_pretrained(
122
  "Qwen/Qwen-Image-Edit-2509",
 
141
  except Exception as e:
142
  print(f"Warning: FA3 set skipped: {e}")
143
  else:
144
+ print("🐢 No GPU detected (HF Build Environment). SKIPPING MODEL LOAD.")
145
 
146
  MAX_SEED = np.iinfo(np.int32).max
147
 
 
163
 
164
  @spaces.GPU(duration=30)
165
  def infer(input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, progress=gr.Progress(track_tqdm=True)):
 
166
  if pipe is None:
167
+ raise gr.Error("Model not loaded. Are you running on GPU?")
168
 
169
  if input_image is None:
170
  raise gr.Error("Please upload an image to edit.")
 
218
 
219
  with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
220
  with gr.Column(elem_id="col-container"):
221
+ gr.Markdown("# **Qwen-Image-Edit-2509 (2x A40 Ready)**", elem_id="main-title")
222
 
223
  with gr.Row(equal_height=True):
224
  with gr.Column():