Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,19 +4,24 @@ import torch
|
|
| 4 |
import soundfile as sf
|
| 5 |
from pathlib import Path
|
| 6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 7 |
-
from peft import PeftModel
|
| 8 |
from snac import SNAC
|
| 9 |
|
| 10 |
# -----------------------------
|
| 11 |
# CONFIG
|
| 12 |
# -----------------------------
|
| 13 |
-
MODEL_NAME = "rahul7star/nava1.0"
|
| 14 |
-
LORA_NAME
|
| 15 |
SEQ_LEN = 2048
|
| 16 |
TARGET_SR = 24000
|
| 17 |
OUT_ROOT = Path("/tmp/data")
|
| 18 |
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# -----------------------------
|
| 21 |
# GENERATE AUDIO (LoRA)
|
| 22 |
# -----------------------------
|
|
@@ -35,12 +40,12 @@ def generate_audio_cpu_lora(text: str):
|
|
| 35 |
)
|
| 36 |
logs.append("✅ Loaded base Maya model")
|
| 37 |
|
| 38 |
-
# Load LoRA adapter
|
| 39 |
model = PeftModel.from_pretrained(base_model, LORA_NAME, device_map={"": DEVICE_CPU})
|
| 40 |
model.eval()
|
| 41 |
logs.append(f"✅ Applied LoRA adapter from {LORA_NAME}")
|
| 42 |
|
| 43 |
-
# Build prompt
|
| 44 |
soh_token = tokenizer.decode([128259])
|
| 45 |
eoh_token = tokenizer.decode([128260])
|
| 46 |
soa_token = tokenizer.decode([128261])
|
|
@@ -99,28 +104,45 @@ def generate_audio_cpu_lora(text: str):
|
|
| 99 |
sf.write(audio_path, audio, TARGET_SR)
|
| 100 |
logs.append(f"✅ Audio saved: {audio_path}, duration: {len(audio)/TARGET_SR:.2f}s")
|
| 101 |
|
| 102 |
-
return str(audio_path), "\n".join(logs)
|
| 103 |
|
| 104 |
except Exception as e:
|
| 105 |
import traceback
|
| 106 |
logs.append(f"[❌] CPU LoRA TTS error: {e}\n{traceback.format_exc()}")
|
| 107 |
-
return None, "\n".join(logs)
|
| 108 |
|
| 109 |
# -----------------------------
|
| 110 |
# GRADIO UI
|
| 111 |
# -----------------------------
|
| 112 |
with gr.Blocks() as demo:
|
| 113 |
gr.Markdown("# Maya LoRA TTS (CPU)")
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
run_button = gr.Button("🔊 Generate Audio")
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
| 117 |
logs_output = gr.Textbox(label="Logs", lines=12)
|
| 118 |
|
| 119 |
run_button.click(
|
| 120 |
fn=generate_audio_cpu_lora,
|
| 121 |
inputs=[input_text],
|
| 122 |
-
outputs=[audio_output, logs_output]
|
| 123 |
)
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
if __name__ == "__main__":
|
| 126 |
demo.launch()
|
|
|
|
| 4 |
import soundfile as sf
|
| 5 |
from pathlib import Path
|
| 6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 7 |
+
from peft import PeftModel
|
| 8 |
from snac import SNAC
|
| 9 |
|
| 10 |
# -----------------------------
|
| 11 |
# CONFIG
|
| 12 |
# -----------------------------
|
| 13 |
+
MODEL_NAME = "rahul7star/nava1.0"
|
| 14 |
+
LORA_NAME = "rahul7star/nava-audio"
|
| 15 |
SEQ_LEN = 2048
|
| 16 |
TARGET_SR = 24000
|
| 17 |
OUT_ROOT = Path("/tmp/data")
|
| 18 |
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
| 19 |
|
| 20 |
+
DEFAULT_TEXT = (
|
| 21 |
+
"राजनीतिज्ञों ने कहा कि उन्होंने निर्णायक मत को अनावश्यक रूप से "
|
| 22 |
+
"निर्धारित करने के लिए अफ़गान संविधान में काफी अस्पष्टता पाई थी"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
# -----------------------------
|
| 26 |
# GENERATE AUDIO (LoRA)
|
| 27 |
# -----------------------------
|
|
|
|
| 40 |
)
|
| 41 |
logs.append("✅ Loaded base Maya model")
|
| 42 |
|
| 43 |
+
# Load LoRA adapter
|
| 44 |
model = PeftModel.from_pretrained(base_model, LORA_NAME, device_map={"": DEVICE_CPU})
|
| 45 |
model.eval()
|
| 46 |
logs.append(f"✅ Applied LoRA adapter from {LORA_NAME}")
|
| 47 |
|
| 48 |
+
# Build prompt
|
| 49 |
soh_token = tokenizer.decode([128259])
|
| 50 |
eoh_token = tokenizer.decode([128260])
|
| 51 |
soa_token = tokenizer.decode([128261])
|
|
|
|
| 104 |
sf.write(audio_path, audio, TARGET_SR)
|
| 105 |
logs.append(f"✅ Audio saved: {audio_path}, duration: {len(audio)/TARGET_SR:.2f}s")
|
| 106 |
|
| 107 |
+
return str(audio_path), str(audio_path), "\n".join(logs)
|
| 108 |
|
| 109 |
except Exception as e:
|
| 110 |
import traceback
|
| 111 |
logs.append(f"[❌] CPU LoRA TTS error: {e}\n{traceback.format_exc()}")
|
| 112 |
+
return None, None, "\n".join(logs)
|
| 113 |
|
| 114 |
# -----------------------------
|
| 115 |
# GRADIO UI
|
| 116 |
# -----------------------------
|
| 117 |
with gr.Blocks() as demo:
|
| 118 |
gr.Markdown("# Maya LoRA TTS (CPU)")
|
| 119 |
+
|
| 120 |
+
# Input text
|
| 121 |
+
input_text = gr.Textbox(label="Enter text", lines=2, value=DEFAULT_TEXT)
|
| 122 |
+
|
| 123 |
+
# Generate button
|
| 124 |
run_button = gr.Button("🔊 Generate Audio")
|
| 125 |
+
|
| 126 |
+
# Outputs
|
| 127 |
+
audio_output = gr.Audio(label="Play Generated Audio", type="filepath")
|
| 128 |
+
download_output = gr.File(label="Download Audio")
|
| 129 |
logs_output = gr.Textbox(label="Logs", lines=12)
|
| 130 |
|
| 131 |
run_button.click(
|
| 132 |
fn=generate_audio_cpu_lora,
|
| 133 |
inputs=[input_text],
|
| 134 |
+
outputs=[audio_output, download_output, logs_output]
|
| 135 |
)
|
| 136 |
|
| 137 |
+
# -----------------------------
|
| 138 |
+
# Example section
|
| 139 |
+
# -----------------------------
|
| 140 |
+
gr.Markdown("### Example")
|
| 141 |
+
example_text = DEFAULT_TEXT
|
| 142 |
+
example_audio_path = "/audio.wav" # Placeholder for later upload
|
| 143 |
+
|
| 144 |
+
gr.Textbox(label="Example Text", value=example_text, lines=2, interactive=False)
|
| 145 |
+
gr.Audio(label="Example Audio", value=example_audio_path, type="filepath", interactive=False)
|
| 146 |
+
|
| 147 |
if __name__ == "__main__":
|
| 148 |
demo.launch()
|