Yoonc commited on
Commit
b103b07
ยท
verified ยท
1 Parent(s): 434ab69

Upload main.py

Browse files
Files changed (1) hide show
  1. main.py +655 -0
main.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vera - AI Coaching Dashboard
3
+ A real-time speech emotion analysis tool for coaching sessions.
4
+ """
5
+
6
+ import os
7
+ import io
8
+ import wave
9
+ import pyaudio
10
+ import threading
11
+ import time
12
+ import logging
13
+ from datetime import datetime
14
+ from collections import deque
15
+ from typing import Dict, Optional, List, Tuple
16
+ from dataclasses import dataclass
17
+ from contextlib import contextmanager
18
+
19
+ from dotenv import load_dotenv
20
+ from openai import OpenAI
21
+ import streamlit as st
22
+ from transformers import pipeline
23
+ import pandas as pd
24
+ import plotly.graph_objects as go
25
+
26
+ # Configure logging
27
+ logging.basicConfig(level=logging.INFO)
28
+ logger = logging.getLogger(__name__)
29
+
30
+ load_dotenv()
31
+
32
+
33
+ @dataclass
34
+ class SentimentResult:
35
+ """Data class for sentiment analysis results."""
36
+ label: str
37
+ score: float
38
+
39
+ def __post_init__(self):
40
+ """Validate sentiment result."""
41
+ if self.label not in ["POSITIVE", "NEGATIVE", "NEUTRAL"]:
42
+ self.label = "NEUTRAL"
43
+ self.score = max(0.0, min(1.0, self.score))
44
+
45
+
46
+ @dataclass
47
+ class TranscriptionEntry:
48
+ """Data class for a single transcription entry."""
49
+ text: str
50
+ sentiment: SentimentResult
51
+ timestamp: datetime
52
+
53
+
54
+ class AudioConfig:
55
+ """Configuration for audio recording."""
56
+ def __init__(
57
+ self,
58
+ chunk_duration: int = 3,
59
+ sample_rate: int = 16000,
60
+ channels: int = 1,
61
+ chunk_size: int = 1024,
62
+ format: int = pyaudio.paInt16
63
+ ):
64
+ self.chunk_duration = chunk_duration
65
+ self.sample_rate = sample_rate
66
+ self.channels = channels
67
+ self.chunk_size = chunk_size
68
+ self.format = format
69
+
70
+
71
+ class SentimentAnalyzer:
72
+ """Handles sentiment analysis with enhanced neutral detection."""
73
+
74
+ NEUTRAL_KEYWORDS = [
75
+ 'okay', 'ok', 'fine', 'alright', 'whatever', 'maybe', 'perhaps',
76
+ 'guess', 'not sure', "don't know", 'dunno', 'meh', 'so-so',
77
+ 'neither', 'middle', 'normal', 'average', 'moderate', 'fair'
78
+ ]
79
+
80
+ CONFIDENCE_THRESHOLD = 0.8
81
+ MIN_WORD_COUNT = 3
82
+
83
+ def __init__(self, model_name: str = "distilbert-base-uncased-finetuned-sst-2-english"):
84
+ """Initialize sentiment analyzer with specified model."""
85
+ self.model = pipeline("sentiment-analysis", model=model_name)
86
+
87
+ def analyze(self, text: str) -> SentimentResult:
88
+ """
89
+ Analyze sentiment of text with enhanced neutral detection.
90
+
91
+ Args:
92
+ text: Input text to analyze
93
+
94
+ Returns:
95
+ SentimentResult with label and confidence score
96
+ """
97
+ if not text or not text.strip():
98
+ return SentimentResult(label="NEUTRAL", score=0.5)
99
+
100
+ try:
101
+ # Get raw sentiment from model (truncate to avoid token limit)
102
+ result = self.model(text[:512])[0]
103
+ label = result["label"]
104
+ score = result["score"]
105
+
106
+ # Enhanced neutral detection
107
+ if self._should_be_neutral(text, score):
108
+ return SentimentResult(label="NEUTRAL", score=score)
109
+
110
+ return SentimentResult(label=label, score=score)
111
+
112
+ except Exception as e:
113
+ logger.error(f"Sentiment analysis error: {e}")
114
+ return SentimentResult(label="NEUTRAL", score=0.5)
115
+
116
+ def _should_be_neutral(self, text: str, score: float) -> bool:
117
+ """Determine if text should be classified as neutral."""
118
+ text_lower = text.lower()
119
+ word_count = len(text.split())
120
+
121
+ has_neutral_keyword = any(
122
+ keyword in text_lower for keyword in self.NEUTRAL_KEYWORDS
123
+ )
124
+
125
+ return (
126
+ has_neutral_keyword or
127
+ score < self.CONFIDENCE_THRESHOLD or
128
+ word_count < self.MIN_WORD_COUNT
129
+ )
130
+
131
+
132
+ @st.cache_resource
133
+ def get_sentiment_analyzer() -> SentimentAnalyzer:
134
+ """Get cached sentiment analyzer instance."""
135
+ return SentimentAnalyzer()
136
+
137
+
138
+ class AudioTranscriber:
139
+ """Handles audio transcription using OpenAI Whisper."""
140
+
141
+ def __init__(self, client: OpenAI, audio_config: AudioConfig):
142
+ """
143
+ Initialize transcriber.
144
+
145
+ Args:
146
+ client: OpenAI client instance
147
+ audio_config: Audio configuration
148
+ """
149
+ self.client = client
150
+ self.audio_config = audio_config
151
+ self._audio = pyaudio.PyAudio()
152
+
153
+ def transcribe(self, audio_data: bytes) -> Optional[str]:
154
+ """
155
+ Transcribe audio data to text.
156
+
157
+ Args:
158
+ audio_data: Raw audio bytes
159
+
160
+ Returns:
161
+ Transcribed text or None if transcription fails
162
+ """
163
+ try:
164
+ wav_buffer = self._create_wav_buffer(audio_data)
165
+ response = self.client.audio.transcriptions.create(
166
+ model="whisper-1",
167
+ file=("audio.wav", wav_buffer.read(), "audio/wav"),
168
+ language="en",
169
+ )
170
+ return response.text.strip() if response.text else None
171
+
172
+ except Exception as e:
173
+ logger.error(f"Transcription error: {e}")
174
+ return None
175
+
176
+ def _create_wav_buffer(self, audio_data: bytes) -> io.BytesIO:
177
+ """Create WAV format buffer from raw audio data."""
178
+ wav_buffer = io.BytesIO()
179
+ with wave.open(wav_buffer, "wb") as wav_file:
180
+ wav_file.setnchannels(self.audio_config.channels)
181
+ wav_file.setsampwidth(
182
+ self._audio.get_sample_size(self.audio_config.format)
183
+ )
184
+ wav_file.setframerate(self.audio_config.sample_rate)
185
+ wav_file.writeframes(audio_data)
186
+ wav_buffer.seek(0)
187
+ return wav_buffer
188
+
189
+ def cleanup(self):
190
+ """Clean up PyAudio resources."""
191
+ if self._audio:
192
+ self._audio.terminate()
193
+
194
+
195
+ class CoachingDashboard:
196
+ """Main dashboard for real-time coaching emotion analysis."""
197
+
198
+ def __init__(
199
+ self,
200
+ chunk_duration: int = 3,
201
+ sample_rate: int = 16000,
202
+ max_history: int = 50
203
+ ):
204
+ """
205
+ Initialize coaching dashboard.
206
+
207
+ Args:
208
+ chunk_duration: Duration of each audio chunk in seconds
209
+ sample_rate: Audio sample rate in Hz
210
+ max_history: Maximum number of transcriptions to keep
211
+ """
212
+ self.audio_config = AudioConfig(
213
+ chunk_duration=chunk_duration,
214
+ sample_rate=sample_rate
215
+ )
216
+ self.max_history = max_history
217
+
218
+ # Initialize API client
219
+ try:
220
+ api_key = st.secrets.get("OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY")
221
+ except Exception:
222
+ api_key = os.getenv("OPENAI_API_KEY")
223
+
224
+ if not api_key:
225
+ raise ValueError("OPENAI_API_KEY not found in environment or secrets")
226
+
227
+ self.client = OpenAI(api_key=api_key)
228
+
229
+ # Initialize components
230
+ self.transcriber = AudioTranscriber(self.client, self.audio_config)
231
+ self.sentiment_analyzer = get_sentiment_analyzer()
232
+
233
+ # Audio recording state
234
+ self.stream: Optional[pyaudio.Stream] = None
235
+ self.is_recording = False
236
+ self.audio_buffer_lock = threading.Lock()
237
+ self.audio_buffer: List[bytes] = []
238
+
239
+ # Session data
240
+ self.entries: deque[TranscriptionEntry] = deque(maxlen=max_history)
241
+ self.current_sentiment = SentimentResult(label="NEUTRAL", score=0.5)
242
+ self.session_start: Optional[datetime] = None
243
+
244
+ def start_recording(self) -> bool:
245
+ """
246
+ Start audio recording session.
247
+
248
+ Returns:
249
+ True if recording started successfully, False otherwise
250
+ """
251
+ if self.is_recording:
252
+ logger.warning("Recording already in progress")
253
+ return False
254
+
255
+ try:
256
+ audio = pyaudio.PyAudio()
257
+ self.stream = audio.open(
258
+ format=self.audio_config.format,
259
+ channels=self.audio_config.channels,
260
+ rate=self.audio_config.sample_rate,
261
+ input=True,
262
+ frames_per_buffer=self.audio_config.chunk_size,
263
+ )
264
+
265
+ self.is_recording = True
266
+ self.session_start = datetime.now()
267
+
268
+ # Start background threads
269
+ threading.Thread(target=self._record_audio, daemon=True).start()
270
+ threading.Thread(target=self._process_transcription, daemon=True).start()
271
+
272
+ logger.info("Recording started successfully")
273
+ return True
274
+
275
+ except Exception as e:
276
+ logger.error(f"Failed to start recording: {e}")
277
+ self.stop_recording()
278
+ raise
279
+
280
+ def stop_recording(self):
281
+ """Stop audio recording session."""
282
+ if not self.is_recording:
283
+ return
284
+
285
+ self.is_recording = False
286
+
287
+ if self.stream:
288
+ try:
289
+ self.stream.stop_stream()
290
+ self.stream.close()
291
+ except Exception as e:
292
+ logger.error(f"Error closing stream: {e}")
293
+
294
+ logger.info("Recording stopped")
295
+
296
+ def _record_audio(self):
297
+ """Background thread for recording audio chunks."""
298
+ frames = []
299
+ frames_per_chunk = int(
300
+ self.audio_config.sample_rate * self.audio_config.chunk_duration
301
+ )
302
+
303
+ while self.is_recording:
304
+ try:
305
+ if not self.stream:
306
+ break
307
+
308
+ data = self.stream.read(
309
+ self.audio_config.chunk_size,
310
+ exception_on_overflow=False
311
+ )
312
+ frames.append(data)
313
+
314
+ # When we have enough frames, add to buffer
315
+ if len(frames) * self.audio_config.chunk_size >= frames_per_chunk:
316
+ audio_chunk = b"".join(frames)
317
+ with self.audio_buffer_lock:
318
+ self.audio_buffer.append(audio_chunk)
319
+ frames = []
320
+
321
+ except Exception as e:
322
+ logger.error(f"Error recording audio: {e}")
323
+ break
324
+
325
+ def _process_transcription(self):
326
+ """Background thread for processing transcriptions."""
327
+ while self.is_recording:
328
+ # Get audio chunk from buffer
329
+ audio_data = None
330
+ with self.audio_buffer_lock:
331
+ if self.audio_buffer:
332
+ audio_data = self.audio_buffer.pop(0)
333
+
334
+ if audio_data:
335
+ self._process_audio_chunk(audio_data)
336
+ else:
337
+ time.sleep(0.1)
338
+
339
+ def _process_audio_chunk(self, audio_data: bytes):
340
+ """Process a single audio chunk through transcription and sentiment analysis."""
341
+ try:
342
+ # Transcribe
343
+ text = self.transcriber.transcribe(audio_data)
344
+ if not text:
345
+ return
346
+
347
+ # Analyze sentiment
348
+ sentiment = self.sentiment_analyzer.analyze(text)
349
+
350
+ # Store entry
351
+ entry = TranscriptionEntry(
352
+ text=text,
353
+ sentiment=sentiment,
354
+ timestamp=datetime.now()
355
+ )
356
+ self.entries.append(entry)
357
+ self.current_sentiment = sentiment
358
+
359
+ logger.info(f"Processed: {text[:50]}... ({sentiment.label})")
360
+
361
+ except Exception as e:
362
+ logger.error(f"Error processing audio chunk: {e}")
363
+
364
+ def get_session_duration(self) -> int:
365
+ """Get current session duration in seconds."""
366
+ if not self.session_start:
367
+ return 0
368
+ return int((datetime.now() - self.session_start).total_seconds())
369
+
370
+ def get_sentiment_stats(self) -> Dict[str, int]:
371
+ """Get count of each sentiment type."""
372
+ stats = {"POSITIVE": 0, "NEUTRAL": 0, "NEGATIVE": 0}
373
+ for entry in self.entries:
374
+ stats[entry.sentiment.label] += 1
375
+ return stats
376
+
377
+ def get_recent_entries(self, n: int = 5) -> List[TranscriptionEntry]:
378
+ """Get the n most recent transcription entries."""
379
+ return list(self.entries)[-n:]
380
+
381
+ def cleanup(self):
382
+ """Clean up all resources."""
383
+ self.stop_recording()
384
+ self.transcriber.cleanup()
385
+
386
+
387
+ class DashboardUI:
388
+ """Handles the Streamlit UI for the coaching dashboard."""
389
+
390
+ COLORS = {
391
+ "POSITIVE": "#00C853",
392
+ "NEUTRAL": "#FFC107",
393
+ "NEGATIVE": "#FF1744"
394
+ }
395
+
396
+ EMOJIS = {
397
+ "POSITIVE": {
398
+ 0.95: "๐Ÿฅณ",
399
+ 0.85: "๐Ÿ˜",
400
+ 0.70: "๐Ÿ˜Š",
401
+ 0.00: "๐Ÿ™‚"
402
+ },
403
+ "NEGATIVE": {
404
+ 0.95: "๐Ÿ˜ญ",
405
+ 0.85: "๐Ÿ˜ข",
406
+ 0.70: "๐Ÿ˜Ÿ",
407
+ 0.00: "๐Ÿ˜•"
408
+ },
409
+ "NEUTRAL": {
410
+ 0.60: "๐Ÿ˜",
411
+ 0.00: "๐Ÿคท"
412
+ }
413
+ }
414
+
415
+ def __init__(self, dashboard: CoachingDashboard):
416
+ """Initialize UI with dashboard instance."""
417
+ self.dashboard = dashboard
418
+
419
+ def render(self):
420
+ """Render the complete dashboard UI."""
421
+ st.set_page_config(page_title="Vera", layout="wide")
422
+ self._inject_custom_css()
423
+ st.title("๐ŸŽฏ Vera - Your Coaching Companion")
424
+
425
+ self._render_sidebar()
426
+ self._render_main_content()
427
+
428
+ # Auto-refresh when recording
429
+ if self.dashboard.is_recording:
430
+ time.sleep(2)
431
+ st.rerun()
432
+
433
+ def _inject_custom_css(self):
434
+ """Inject custom CSS styles."""
435
+ st.markdown("""
436
+ <style>
437
+ .sentiment-box {
438
+ padding: 30px;
439
+ border-radius: 15px;
440
+ text-align: center;
441
+ font-size: 20px;
442
+ font-weight: bold;
443
+ margin: 20px 0;
444
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
445
+ }
446
+ .transcription-card {
447
+ border-radius: 8px;
448
+ padding: 15px;
449
+ margin: 10px 0;
450
+ transition: transform 0.2s;
451
+ }
452
+ .transcription-card:hover {
453
+ transform: translateX(5px);
454
+ }
455
+ </style>
456
+ """, unsafe_allow_html=True)
457
+
458
+ def _render_sidebar(self):
459
+ """Render sidebar with controls and stats."""
460
+ with st.sidebar:
461
+ st.header("๐ŸŽฎ Controls")
462
+
463
+ col1, col2 = st.columns(2)
464
+ with col1:
465
+ if st.button("โ–ถ๏ธ Start", disabled=self.dashboard.is_recording, use_container_width=True):
466
+ try:
467
+ self.dashboard.start_recording()
468
+ st.rerun()
469
+ except Exception as e:
470
+ st.error(f"Failed to start: {e}")
471
+
472
+ with col2:
473
+ if st.button("โน๏ธ Stop", disabled=not self.dashboard.is_recording, use_container_width=True):
474
+ self.dashboard.stop_recording()
475
+ st.rerun()
476
+
477
+ st.divider()
478
+
479
+ # Recording status
480
+ if self.dashboard.is_recording:
481
+ st.success("๐Ÿ”ด Recording...")
482
+ duration = self.dashboard.get_session_duration()
483
+ st.metric("Duration", f"{duration // 60}m {duration % 60}s")
484
+ else:
485
+ st.info("โšช Stopped")
486
+
487
+ st.divider()
488
+
489
+ # Statistics
490
+ st.header("๐Ÿ“Š Statistics")
491
+ st.metric("Total Entries", len(self.dashboard.entries))
492
+
493
+ if self.dashboard.entries:
494
+ stats = self.dashboard.get_sentiment_stats()
495
+ total = len(self.dashboard.entries)
496
+
497
+ st.metric(
498
+ "๐Ÿ˜Š Positive",
499
+ f"{stats['POSITIVE']} ({stats['POSITIVE']/total*100:.0f}%)"
500
+ )
501
+ st.metric(
502
+ "๐Ÿ˜ Neutral",
503
+ f"{stats['NEUTRAL']} ({stats['NEUTRAL']/total*100:.0f}%)"
504
+ )
505
+ st.metric(
506
+ "๐Ÿ˜Ÿ Negative",
507
+ f"{stats['NEGATIVE']} ({stats['NEGATIVE']/total*100:.0f}%)"
508
+ )
509
+
510
+ def _render_main_content(self):
511
+ """Render main content area."""
512
+ col1, col2 = st.columns([2, 1])
513
+
514
+ with col1:
515
+ self._render_emotion_timeline()
516
+
517
+ with col2:
518
+ self._render_current_status()
519
+
520
+ st.divider()
521
+ self._render_recent_transcriptions()
522
+
523
+ def _render_emotion_timeline(self):
524
+ """Render emotion timeline chart."""
525
+ st.subheader("๐Ÿ“ˆ Emotion Timeline")
526
+
527
+ if not self.dashboard.entries:
528
+ st.info("Start a session to see the emotion timeline")
529
+ return
530
+
531
+ # Prepare data
532
+ timestamps = [entry.timestamp for entry in self.dashboard.entries]
533
+ scores = [self._sentiment_to_score(entry.sentiment) for entry in self.dashboard.entries]
534
+ labels = [entry.sentiment.label for entry in self.dashboard.entries]
535
+
536
+ # Create chart
537
+ fig = go.Figure()
538
+ fig.add_trace(go.Scatter(
539
+ x=timestamps,
540
+ y=scores,
541
+ mode='lines+markers',
542
+ line=dict(width=3, color='#2196F3'),
543
+ marker=dict(
544
+ size=12,
545
+ color=[self.COLORS[label] for label in labels],
546
+ line=dict(width=2, color='white')
547
+ ),
548
+ hovertemplate='<b>%{text}</b><br>Score: %{y:.2f}<br>%{x}<extra></extra>',
549
+ text=labels
550
+ ))
551
+
552
+ # Add reference zones
553
+ fig.add_hline(y=0, line_dash="dash", line_color="gray", opacity=0.5)
554
+ fig.add_hrect(y0=0.3, y1=1, fillcolor="green", opacity=0.1, line_width=0, annotation_text="Positive")
555
+ fig.add_hrect(y0=-0.3, y1=0.3, fillcolor="yellow", opacity=0.1, line_width=0, annotation_text="Neutral")
556
+ fig.add_hrect(y0=-1, y1=-0.3, fillcolor="red", opacity=0.1, line_width=0, annotation_text="Negative")
557
+
558
+ fig.update_layout(
559
+ height=400,
560
+ xaxis_title="Time",
561
+ yaxis_title="Emotional Valence",
562
+ yaxis=dict(range=[-1.1, 1.1]),
563
+ showlegend=False,
564
+ hovermode='closest'
565
+ )
566
+
567
+ st.plotly_chart(fig, use_container_width=True)
568
+
569
+ def _render_current_status(self):
570
+ """Render current emotional status."""
571
+ st.subheader("๐Ÿ’ญ Current Status")
572
+
573
+ sentiment = self.dashboard.current_sentiment
574
+ color = self.COLORS[sentiment.label]
575
+ emoji = self._get_emoji(sentiment)
576
+
577
+ st.markdown(f"""
578
+ <div class="sentiment-box" style="background-color: {color}; color: white;">
579
+ <div style="font-size: 48px;">{emoji}</div>
580
+ <div style="margin: 10px 0;">{sentiment.label}</div>
581
+ <div style="font-size: 16px; opacity: 0.9;">
582
+ Confidence: {sentiment.score:.0%}
583
+ </div>
584
+ </div>
585
+ """, unsafe_allow_html=True)
586
+
587
+ def _render_recent_transcriptions(self):
588
+ """Render recent transcription entries."""
589
+ st.subheader("๐Ÿ’ฌ Recent Transcriptions")
590
+
591
+ if not self.dashboard.entries:
592
+ st.info("No transcriptions yet. Start recording to see results.")
593
+ return
594
+
595
+ recent = self.dashboard.get_recent_entries(5)
596
+
597
+ for entry in reversed(recent):
598
+ color = self.COLORS[entry.sentiment.label]
599
+ time_str = entry.timestamp.strftime("%H:%M:%S")
600
+ emoji = self._get_emoji(entry.sentiment)
601
+
602
+ st.markdown(f"""
603
+ <div class="transcription-card" style="
604
+ background-color: {color}20;
605
+ border-left: 5px solid {color};
606
+ ">
607
+ <div style="color: {color}; font-weight: bold; margin-bottom: 8px;">
608
+ {emoji} [{time_str}] {entry.sentiment.label}
609
+ <span style="opacity: 0.8;">({entry.sentiment.score:.0%})</span>
610
+ </div>
611
+ <div style="font-size: 16px; color: #333;">
612
+ {entry.text}
613
+ </div>
614
+ </div>
615
+ """, unsafe_allow_html=True)
616
+
617
+ def _sentiment_to_score(self, sentiment: SentimentResult) -> float:
618
+ """Convert sentiment to -1 to 1 scale for visualization."""
619
+ if sentiment.label == "POSITIVE":
620
+ return sentiment.score
621
+ elif sentiment.label == "NEGATIVE":
622
+ return -sentiment.score
623
+ else:
624
+ return 0
625
+
626
+ def _get_emoji(self, sentiment: SentimentResult) -> str:
627
+ """Get appropriate emoji for sentiment and confidence."""
628
+ emoji_map = self.EMOJIS.get(sentiment.label, self.EMOJIS["NEUTRAL"])
629
+
630
+ for threshold, emoji in sorted(emoji_map.items(), reverse=True):
631
+ if sentiment.score >= threshold:
632
+ return emoji
633
+
634
+ return "๐Ÿ˜"
635
+
636
+
637
+ def main():
638
+ """Main application entry point."""
639
+ # Initialize dashboard in session state
640
+ if 'dashboard' not in st.session_state:
641
+ try:
642
+ st.session_state.dashboard = CoachingDashboard(chunk_duration=3)
643
+ except Exception as e:
644
+ st.error(f"Failed to initialize dashboard: {e}")
645
+ st.stop()
646
+
647
+ dashboard = st.session_state.dashboard
648
+
649
+ # Render UI
650
+ ui = DashboardUI(dashboard)
651
+ ui.render()
652
+
653
+
654
+ if __name__ == "__main__":
655
+ main()