|
|
""" |
|
|
Script to split the lines_dataset into train/validation/test splits. |
|
|
- 80% train |
|
|
- 10% validation |
|
|
- 10% test |
|
|
|
|
|
The split is done by source image to prevent data leakage - all crops from the |
|
|
same source image go into the same split. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import shutil |
|
|
import random |
|
|
from pathlib import Path |
|
|
from collections import defaultdict |
|
|
|
|
|
|
|
|
RANDOM_SEED = 42 |
|
|
TRAIN_RATIO = 0.8 |
|
|
VAL_RATIO = 0.1 |
|
|
TEST_RATIO = 0.1 |
|
|
|
|
|
BASE_DIR = Path(__file__).parent |
|
|
CURRENT_TRAIN_DIR = BASE_DIR / "train" |
|
|
METADATA_FILE = CURRENT_TRAIN_DIR / "metadata.jsonl" |
|
|
|
|
|
|
|
|
def load_metadata(): |
|
|
"""Load all metadata from the jsonl file.""" |
|
|
if not METADATA_FILE.exists(): |
|
|
print(f"Metadata file not found: {METADATA_FILE}") |
|
|
return [] |
|
|
|
|
|
metadata = [] |
|
|
with open(METADATA_FILE, 'r') as f: |
|
|
for line in f: |
|
|
if line.strip(): |
|
|
metadata.append(json.loads(line)) |
|
|
return metadata |
|
|
|
|
|
|
|
|
def group_by_source(metadata): |
|
|
"""Group samples by their source image index.""" |
|
|
groups = defaultdict(list) |
|
|
for item in metadata: |
|
|
source_idx = item.get("source_image_idx", 0) |
|
|
groups[source_idx].append(item) |
|
|
return groups |
|
|
|
|
|
|
|
|
def split_sources(source_indices, train_ratio, val_ratio, test_ratio): |
|
|
"""Split source indices into train/val/test sets.""" |
|
|
random.shuffle(source_indices) |
|
|
|
|
|
n = len(source_indices) |
|
|
n_train = int(n * train_ratio) |
|
|
n_val = int(n * val_ratio) |
|
|
|
|
|
train_sources = source_indices[:n_train] |
|
|
val_sources = source_indices[n_train:n_train + n_val] |
|
|
test_sources = source_indices[n_train + n_val:] |
|
|
|
|
|
return train_sources, val_sources, test_sources |
|
|
|
|
|
|
|
|
def create_split_directory(split_name, samples, base_dir, source_dir): |
|
|
"""Create a split directory with images and metadata.""" |
|
|
split_dir = base_dir / split_name |
|
|
split_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
metadata_entries = [] |
|
|
|
|
|
for sample in samples: |
|
|
file_name = sample["file_name"] |
|
|
src_path = source_dir / file_name |
|
|
dst_path = split_dir / file_name |
|
|
|
|
|
if src_path.exists(): |
|
|
shutil.copy2(src_path, dst_path) |
|
|
metadata_entries.append(sample) |
|
|
else: |
|
|
print(f"Warning: Image not found: {src_path}") |
|
|
|
|
|
|
|
|
metadata_path = split_dir / "metadata.jsonl" |
|
|
with open(metadata_path, 'w') as f: |
|
|
for entry in metadata_entries: |
|
|
f.write(json.dumps(entry) + '\n') |
|
|
|
|
|
return len(metadata_entries) |
|
|
|
|
|
|
|
|
def main(): |
|
|
random.seed(RANDOM_SEED) |
|
|
|
|
|
print("Loading metadata...") |
|
|
metadata = load_metadata() |
|
|
print(f"Total samples: {len(metadata)}") |
|
|
|
|
|
|
|
|
print("Grouping by source image...") |
|
|
groups = group_by_source(metadata) |
|
|
source_indices = list(groups.keys()) |
|
|
print(f"Total source images: {len(source_indices)}") |
|
|
|
|
|
|
|
|
print(f"\nSplitting sources ({TRAIN_RATIO:.0%} train, {VAL_RATIO:.0%} val, {TEST_RATIO:.0%} test)...") |
|
|
train_sources, val_sources, test_sources = split_sources( |
|
|
source_indices, TRAIN_RATIO, VAL_RATIO, TEST_RATIO |
|
|
) |
|
|
|
|
|
print(f" Train sources: {len(train_sources)}") |
|
|
print(f" Validation sources: {len(val_sources)}") |
|
|
print(f" Test sources: {len(test_sources)}") |
|
|
|
|
|
|
|
|
train_samples = [] |
|
|
val_samples = [] |
|
|
test_samples = [] |
|
|
|
|
|
for src_idx in train_sources: |
|
|
train_samples.extend(groups[src_idx]) |
|
|
for src_idx in val_sources: |
|
|
val_samples.extend(groups[src_idx]) |
|
|
for src_idx in test_sources: |
|
|
test_samples.extend(groups[src_idx]) |
|
|
|
|
|
print(f"\nSamples per split:") |
|
|
print(f" Train: {len(train_samples)} ({len(train_samples)/len(metadata)*100:.1f}%)") |
|
|
print(f" Validation: {len(val_samples)} ({len(val_samples)/len(metadata)*100:.1f}%)") |
|
|
print(f" Test: {len(test_samples)} ({len(test_samples)/len(metadata)*100:.1f}%)") |
|
|
|
|
|
|
|
|
temp_dir = BASE_DIR / "_temp_splits" |
|
|
temp_dir.mkdir(exist_ok=True) |
|
|
|
|
|
print("\nCreating split directories...") |
|
|
|
|
|
|
|
|
n_train = create_split_directory("train", train_samples, temp_dir, CURRENT_TRAIN_DIR) |
|
|
print(f" Created train split: {n_train} samples") |
|
|
|
|
|
n_val = create_split_directory("validation", val_samples, temp_dir, CURRENT_TRAIN_DIR) |
|
|
print(f" Created validation split: {n_val} samples") |
|
|
|
|
|
n_test = create_split_directory("test", test_samples, temp_dir, CURRENT_TRAIN_DIR) |
|
|
print(f" Created test split: {n_test} samples") |
|
|
|
|
|
|
|
|
print("\nReorganizing directory structure...") |
|
|
|
|
|
|
|
|
shutil.rmtree(CURRENT_TRAIN_DIR) |
|
|
|
|
|
|
|
|
for split_name in ["train", "validation", "test"]: |
|
|
src = temp_dir / split_name |
|
|
dst = BASE_DIR / split_name |
|
|
shutil.move(str(src), str(dst)) |
|
|
|
|
|
|
|
|
temp_dir.rmdir() |
|
|
|
|
|
print("\nDone! New directory structure:") |
|
|
print(f" {BASE_DIR}/train/ ({n_train} samples)") |
|
|
print(f" {BASE_DIR}/validation/ ({n_val} samples)") |
|
|
print(f" {BASE_DIR}/test/ ({n_test} samples)") |
|
|
|
|
|
|
|
|
def count_lines(samples): |
|
|
return sum(len(s.get("lines", {}).get("segments", [])) for s in samples) |
|
|
|
|
|
print(f"\nTotal lines per split:") |
|
|
print(f" Train: {count_lines(train_samples)}") |
|
|
print(f" Validation: {count_lines(val_samples)}") |
|
|
print(f" Test: {count_lines(test_samples)}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|