import React, { useEffect, useRef } from 'react'; import type { VocalProfile, EQSetting } from '../types'; // Let TypeScript know D3 is available on the global scope declare const d3: any; interface ResultsViewProps { vocalProfile: VocalProfile; eqSettings: EQSetting[]; audioBlob: Blob; audacityXml: string; onReset: () => void; } const FrequencyVisualizer: React.FC<{ audioBlob: Blob; eqSettings: EQSetting[] }> = ({ audioBlob, eqSettings }) => { const d3Container = useRef(null); useEffect(() => { if (!audioBlob || !d3Container.current) return; let audioContext: AudioContext | null = null; const processAudio = async () => { audioContext = new (window.AudioContext || (window as any).webkitAudioContext)(); const arrayBuffer = await audioBlob.arrayBuffer(); let audioBuffer; try { audioBuffer = await audioContext.decodeAudioData(arrayBuffer); } catch (e) { console.error("Error decoding audio data:", e); drawChart(new Uint8Array(0), 0, "Error decoding audio file."); if (audioContext) audioContext.close(); return; } const source = audioContext.createBufferSource(); source.buffer = audioBuffer; const analyser = audioContext.createAnalyser(); analyser.fftSize = 2048; source.connect(analyser); // Not connecting to destination to play silently source.start(0, Math.min(0.1, audioBuffer.duration), 5); // Start at 0.1s, play for 5s max setTimeout(() => { if(!audioContext || audioContext.state === 'closed') return; const bufferLength = analyser.frequencyBinCount; const dataArray = new Uint8Array(bufferLength); analyser.getByteFrequencyData(dataArray); source.stop(); if (dataArray.some(d => d > 0)) { drawChart(dataArray, audioBuffer.sampleRate); } else { console.warn("Analyser returned no frequency data. The audio might be silent at the snapshot point."); drawChart(new Uint8Array(0), 0, "Could not visualize audio: sample may be silent."); } audioContext.close(); }, 100); // Take snapshot after 100ms }; const drawChart = (dataArray: Uint8Array, sampleRate: number, message?: string) => { const svg = d3.select(d3Container.current); svg.selectAll("*").remove(); const svgNode = d3Container.current; if (!svgNode) return; const { width: containerWidth, height: containerHeight } = svgNode.getBoundingClientRect(); const margin = { top: 20, right: 20, bottom: 40, left: 50 }; const width = containerWidth - margin.left - margin.right; const height = containerHeight - margin.top - margin.bottom; if (width <= 0 || height <= 0) return; const g = svg.append("g").attr("transform", `translate(${margin.left},${margin.top})`); const x = d3.scaleLog().domain([20, 20000]).range([0, width]); const y = d3.scaleLinear().domain([0, 255]).range([height, 0]); g.append("g") .attr("transform", `translate(0,${height})`) .call(d3.axisBottom(x).ticks(5, ".0s").tickSizeOuter(0)) .selectAll("text") .style("fill", "#9ca3af"); g.append("g") .call(d3.axisLeft(y).ticks(5).tickSizeOuter(0)) .selectAll("text") .style("fill", "#9ca3af"); g.append("text") .attr("text-anchor", "middle") .attr("x", width/2) .attr("y", height + margin.bottom - 5) .style("fill", "#9ca3af") .text("Frequency (Hz)"); g.append("text") .attr("text-anchor", "middle") .attr("transform", "rotate(-90)") .attr("y", -margin.left + 15) .attr("x", -height/2) .style("fill", "#9ca3af") .text("Amplitude"); if (dataArray.length === 0 || !sampleRate) { g.append("text") .attr("x", width / 2) .attr("y", height / 2) .attr("text-anchor", "middle") .style("fill", "#e5e7eb") .text(message || "No audio data to display."); return; } const data = Array.from(dataArray).map((d, i) => ({ frequency: (i * sampleRate) / (2 * dataArray.length), amplitude: d, })).filter(d => d.frequency >= 20 && d.frequency <= 20000); const barWidth = width / data.length; g.selectAll(".bar") .data(data) .enter() .append("rect") .attr("class", "bar") .attr("x", d => x(d.frequency)) .attr("y", d => y(d.amplitude)) .attr("width", barWidth > 0 ? barWidth : 1) .attr("height", d => height - y(d.amplitude)) .attr("fill", d => { const closestEq = eqSettings.find(eq => Math.abs(Math.log10(eq.frequency) - Math.log10(d.frequency)) < 0.1); if(closestEq) { return closestEq.gain > 0 ? '#22c55e' : '#ef4444'; } return '#00BFFF'; }); }; processAudio().catch(console.error); return () => { if (audioContext && audioContext.state !== 'closed') { audioContext.close(); } }; }, [audioBlob, eqSettings]); return (

Vocal Frequency Snapshot

); }; export const ResultsView: React.FC = ({ vocalProfile, eqSettings, audioBlob, audacityXml, onReset }) => { const handleCopyJson = () => { navigator.clipboard.writeText(JSON.stringify({ vocalProfile, eqSettings }, null, 2)); alert("EQ settings copied to clipboard as JSON!"); }; const handleDownloadXml = () => { const blob = new Blob([audacityXml], { type: 'application/xml' }); const url = URL.createObjectURL(blob); const a = document.createElement('a'); a.href = url; a.download = 'gemini-eq-preset.xml'; document.body.appendChild(a); a.click(); document.body.removeChild(a); URL.revokeObjectURL(url); }; return (

Analysis Complete

Vocal Profile

{vocalProfile.description}

Key Characteristics

  • Fundamental Range: {vocalProfile.fundamentalRange}
  • {vocalProfile.keyCharacteristics.map((char, i) => (
  • {char}
  • ))}

Generated EQ Preset

{eqSettings.map(({ frequency, gain }, i) => ( ))}
Frequency Gain (dB) Action
{frequency} Hz 0 ? 'text-green-400' : gain < 0 ? 'text-red-400' : 'text-gray-300'}`}> {gain > 0 ? '+' : ''}{gain.toFixed(1)} 0.1 ? 'bg-green-800/50 text-green-300' : gain < -0.1 ? 'bg-red-800/50 text-red-300' : 'bg-gray-600 text-gray-300'}`}> {gain > 0.1 ? 'Boost' : gain < -0.1 ? 'Cut' : 'Neutral'}
); };