prasatee's picture
Add files using upload-large-folder tool
7d93555 verified
"""
Lines Dataset Visualizer - Flask app to visualize line segments on cropped P&ID images
Supports train/validation/test splits.
"""
import json
from flask import Flask, render_template, jsonify, send_from_directory
from pathlib import Path
app = Flask(__name__)
# Configuration
BASE_DIR = Path(__file__).parent.parent
SPLITS = ["train", "validation", "test"]
# Cache for metadata per split
_metadata_cache = {}
def get_split_dir(split: str) -> Path:
"""Get the directory for a split."""
return BASE_DIR / split
def load_metadata(split: str):
"""Load all metadata from the jsonl file for a split."""
if split in _metadata_cache:
return _metadata_cache[split]
split_dir = get_split_dir(split)
metadata_file = split_dir / "metadata.jsonl"
if not metadata_file.exists():
print(f"Metadata file not found: {metadata_file}")
_metadata_cache[split] = []
return []
metadata = []
with open(metadata_file, 'r') as f:
for line in f:
if line.strip():
metadata.append(json.loads(line))
_metadata_cache[split] = metadata
return metadata
def get_available_splits():
"""Get list of available splits that have data."""
available = []
for split in SPLITS:
split_dir = get_split_dir(split)
if split_dir.exists() and (split_dir / "metadata.jsonl").exists():
available.append(split)
return available
def get_sample_count(split: str):
"""Get total number of samples in a split."""
return len(load_metadata(split))
def get_sample(split: str, idx: int) -> dict:
"""Get a single sample by index from a split."""
metadata = load_metadata(split)
if 0 <= idx < len(metadata):
return metadata[idx]
return None
def get_pipelines_in_sample(sample: dict) -> list:
"""Get unique pipelines in a sample."""
pipelines = sample.get("lines", {}).get("pipelines", [])
unique = sorted(set(p for p in pipelines if p))
return unique
@app.route("/")
def index():
"""Serve the main page."""
return render_template("index.html")
@app.route("/api/splits")
def get_splits():
"""Get available splits with their sample counts."""
splits = get_available_splits()
split_info = []
for split in splits:
metadata = load_metadata(split)
total_lines = sum(len(m.get("lines", {}).get("segments", [])) for m in metadata)
split_info.append({
"name": split,
"sample_count": len(metadata),
"line_count": total_lines
})
return jsonify({"splits": split_info})
@app.route("/api/stats")
def get_stats():
"""Get overall dataset statistics."""
total_samples = 0
total_lines = 0
solid_count = 0
dashed_count = 0
source_images = set()
for split in get_available_splits():
metadata = load_metadata(split)
total_samples += len(metadata)
for m in metadata:
source_images.add(m.get("source_image_idx", 0))
lines_data = m.get("lines", {})
segments = lines_data.get("segments", [])
line_types = lines_data.get("line_types", [])
total_lines += len(segments)
solid_count += sum(1 for t in line_types if t == "solid")
dashed_count += sum(1 for t in line_types if t == "dashed")
return jsonify({
"total_samples": total_samples,
"total_lines": total_lines,
"source_images": len(source_images),
"solid_lines": solid_count,
"dashed_lines": dashed_count
})
@app.route("/api/<split>/stats")
def get_split_stats(split):
"""Get statistics for a specific split."""
if split not in get_available_splits():
return jsonify({"error": "Split not found"}), 404
metadata = load_metadata(split)
total_lines = sum(len(m.get("lines", {}).get("segments", [])) for m in metadata)
solid_count = 0
dashed_count = 0
source_images = set()
for m in metadata:
source_images.add(m.get("source_image_idx", 0))
line_types = m.get("lines", {}).get("line_types", [])
solid_count += sum(1 for t in line_types if t == "solid")
dashed_count += sum(1 for t in line_types if t == "dashed")
return jsonify({
"split": split,
"total_samples": len(metadata),
"total_lines": total_lines,
"source_images": len(source_images),
"solid_lines": solid_count,
"dashed_lines": dashed_count
})
@app.route("/api/<split>/samples")
def get_all_samples(split):
"""Get list of all sample indices and basic info for a split."""
if split not in get_available_splits():
return jsonify({"error": "Split not found"}), 404
metadata = load_metadata(split)
samples = []
for idx, m in enumerate(metadata):
samples.append({
"idx": idx,
"file_name": m.get("file_name"),
"source_image_idx": m.get("source_image_idx"),
"width": m.get("width"),
"height": m.get("height"),
"line_count": len(m.get("lines", {}).get("segments", []))
})
return jsonify({"samples": samples, "count": len(samples)})
@app.route("/api/<split>/sample/<int:idx>")
def get_sample_data(split, idx):
"""Get detailed data for a single sample."""
if split not in get_available_splits():
return jsonify({"error": "Split not found"}), 404
sample = get_sample(split, idx)
if sample is None:
return jsonify({"error": "Sample not found"}), 404
lines_data = sample.get("lines", {})
segments = lines_data.get("segments", [])
line_types = lines_data.get("line_types", [])
pipelines = lines_data.get("pipelines", [])
# Build detailed line list
lines = []
for i, seg in enumerate(segments):
lines.append({
"idx": i,
"segment": seg,
"type": line_types[i] if i < len(line_types) else "solid",
"pipeline": pipelines[i] if i < len(pipelines) else ""
})
return jsonify({
"file_name": sample.get("file_name"),
"source_image_idx": sample.get("source_image_idx"),
"crop_idx": sample.get("crop_idx"),
"width": sample.get("width"),
"height": sample.get("height"),
"lines": lines,
"line_count": len(lines),
"unique_pipelines": get_pipelines_in_sample(sample)
})
@app.route("/images/<split>/<path:filename>")
def serve_image(split, filename):
"""Serve images from a split directory."""
split_dir = get_split_dir(split)
return send_from_directory(split_dir, filename)
if __name__ == "__main__":
print("Starting Lines Dataset Visualizer...")
print(f"Base directory: {BASE_DIR}")
available_splits = get_available_splits()
print(f"Available splits: {available_splits}")
for split in available_splits:
count = get_sample_count(split)
print(f" {split}: {count} samples")
app.run(debug=True, port=5051)