Spaces:
Running
on
Zero
Running
on
Zero
update app
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import numpy as np
|
| 4 |
import torch
|
|
@@ -9,17 +10,6 @@ from gradio.themes import Soft
|
|
| 9 |
from gradio.themes.utils import colors, fonts, sizes
|
| 10 |
from transformers import Sam3Processor, Sam3Model
|
| 11 |
|
| 12 |
-
# --- Handle optional 'spaces' import for local compatibility ---
|
| 13 |
-
try:
|
| 14 |
-
import spaces
|
| 15 |
-
except ImportError:
|
| 16 |
-
class spaces:
|
| 17 |
-
@staticmethod
|
| 18 |
-
def GPU(duration=60):
|
| 19 |
-
def decorator(func):
|
| 20 |
-
return func
|
| 21 |
-
return decorator
|
| 22 |
-
|
| 23 |
colors.steel_blue = colors.Color(
|
| 24 |
name="steel_blue",
|
| 25 |
c50="#EBF3F8",
|
|
@@ -82,7 +72,6 @@ class SteelBlueTheme(Soft):
|
|
| 82 |
|
| 83 |
steel_blue_theme = SteelBlueTheme()
|
| 84 |
|
| 85 |
-
# --- Hardware Setup ---
|
| 86 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 87 |
print(f"Using device: {device}")
|
| 88 |
|
|
@@ -100,7 +89,7 @@ except Exception as e:
|
|
| 100 |
model = None
|
| 101 |
processor = None
|
| 102 |
|
| 103 |
-
@spaces.GPU
|
| 104 |
def segment_image(input_image, text_prompt, threshold=0.5):
|
| 105 |
if input_image is None:
|
| 106 |
raise gr.Error("Please upload an image.")
|
|
@@ -167,7 +156,6 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
|
| 167 |
gr.Markdown("Segment objects in images using **SAM3** (Segment Anything Model 3) with text prompts.")
|
| 168 |
|
| 169 |
with gr.Row():
|
| 170 |
-
# Left Column: Inputs
|
| 171 |
with gr.Column(scale=1):
|
| 172 |
input_image = gr.Image(label="Input Image", type="pil", height=300)
|
| 173 |
text_prompt = gr.Textbox(
|
|
@@ -178,15 +166,12 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
|
|
| 178 |
|
| 179 |
run_button = gr.Button("Segment", variant="primary")
|
| 180 |
|
| 181 |
-
# Right Column: Output
|
| 182 |
with gr.Column(scale=1.5):
|
| 183 |
-
# AnnotatedImage creates a nice overlay visualization
|
| 184 |
output_image = gr.AnnotatedImage(label="Segmented Output", height=400)
|
| 185 |
|
| 186 |
with gr.Row():
|
| 187 |
threshold = gr.Slider(label="Confidence Threshold", minimum=0.0, maximum=1.0, value=0.4, step=0.05)
|
| 188 |
|
| 189 |
-
# Examples
|
| 190 |
gr.Examples(
|
| 191 |
examples=[
|
| 192 |
["examples/cat.jpg", "cat", 0.5],
|
|
|
|
| 1 |
import os
|
| 2 |
+
import spaces
|
| 3 |
import gradio as gr
|
| 4 |
import numpy as np
|
| 5 |
import torch
|
|
|
|
| 10 |
from gradio.themes.utils import colors, fonts, sizes
|
| 11 |
from transformers import Sam3Processor, Sam3Model
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
colors.steel_blue = colors.Color(
|
| 14 |
name="steel_blue",
|
| 15 |
c50="#EBF3F8",
|
|
|
|
| 72 |
|
| 73 |
steel_blue_theme = SteelBlueTheme()
|
| 74 |
|
|
|
|
| 75 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 76 |
print(f"Using device: {device}")
|
| 77 |
|
|
|
|
| 89 |
model = None
|
| 90 |
processor = None
|
| 91 |
|
| 92 |
+
@spaces.GPU
|
| 93 |
def segment_image(input_image, text_prompt, threshold=0.5):
|
| 94 |
if input_image is None:
|
| 95 |
raise gr.Error("Please upload an image.")
|
|
|
|
| 156 |
gr.Markdown("Segment objects in images using **SAM3** (Segment Anything Model 3) with text prompts.")
|
| 157 |
|
| 158 |
with gr.Row():
|
|
|
|
| 159 |
with gr.Column(scale=1):
|
| 160 |
input_image = gr.Image(label="Input Image", type="pil", height=300)
|
| 161 |
text_prompt = gr.Textbox(
|
|
|
|
| 166 |
|
| 167 |
run_button = gr.Button("Segment", variant="primary")
|
| 168 |
|
|
|
|
| 169 |
with gr.Column(scale=1.5):
|
|
|
|
| 170 |
output_image = gr.AnnotatedImage(label="Segmented Output", height=400)
|
| 171 |
|
| 172 |
with gr.Row():
|
| 173 |
threshold = gr.Slider(label="Confidence Threshold", minimum=0.0, maximum=1.0, value=0.4, step=0.05)
|
| 174 |
|
|
|
|
| 175 |
gr.Examples(
|
| 176 |
examples=[
|
| 177 |
["examples/cat.jpg", "cat", 0.5],
|