diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 1397628f7f6e7b64c728a1b7f2b6d1ffe2da6e60..0000000000000000000000000000000000000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:79a20e69bb53755d95829b1a9a67f8b7ad9e6cad4859412d6e7d3bc7d5570c93 -size 1282 diff --git a/.gitignore b/.gitignore index 36ad903abd0f801235f7f1fe610c91a85c6ec41b..7d3f93e51bb7bec2de830179abaf0e2b95226547 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,8 @@ node_modules/ *.env *.cache -app/scripts/latex-converter/output/ +app/scripts/latex-to-mdx/output/ +app/src/content/embeds/typography/generated # PDF export app/public/*.pdf diff --git a/.temp-template-sync b/.temp-template-sync deleted file mode 160000 index ead50ba9028719475151d26696248ef6fe996b70..0000000000000000000000000000000000000000 --- a/.temp-template-sync +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ead50ba9028719475151d26696248ef6fe996b70 diff --git a/app/.astro/astro/content.d.ts b/app/.astro/astro/content.d.ts index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..eb236b062e47ff762326764dbd53546131697d54 100644 --- a/app/.astro/astro/content.d.ts +++ b/app/.astro/astro/content.d.ts @@ -0,0 +1,284 @@ +declare module 'astro:content' { + interface Render { + '.mdx': Promise<{ + Content: import('astro').MarkdownInstance<{}>['Content']; + headings: import('astro').MarkdownHeading[]; + remarkPluginFrontmatter: Record; + components: import('astro').MDXInstance<{}>['components']; + }>; + } +} + +declare module 'astro:content' { + interface RenderResult { + Content: import('astro/runtime/server/index.js').AstroComponentFactory; + headings: import('astro').MarkdownHeading[]; + remarkPluginFrontmatter: Record; + } + interface Render { + '.md': Promise; + } + + export interface RenderedContent { + html: string; + metadata?: { + imagePaths: Array; + [key: string]: unknown; + }; + } +} + +declare module 'astro:content' { + type Flatten = T extends { [K: string]: infer U } ? U : never; + + export type CollectionKey = keyof AnyEntryMap; + export type CollectionEntry = Flatten; + + export type ContentCollectionKey = keyof ContentEntryMap; + export type DataCollectionKey = keyof DataEntryMap; + + type AllValuesOf = T extends any ? T[keyof T] : never; + type ValidContentEntrySlug = AllValuesOf< + ContentEntryMap[C] + >['slug']; + + /** @deprecated Use `getEntry` instead. */ + export function getEntryBySlug< + C extends keyof ContentEntryMap, + E extends ValidContentEntrySlug | (string & {}), + >( + collection: C, + // Note that this has to accept a regular string too, for SSR + entrySlug: E, + ): E extends ValidContentEntrySlug + ? Promise> + : Promise | undefined>; + + /** @deprecated Use `getEntry` instead. */ + export function getDataEntryById( + collection: C, + entryId: E, + ): Promise>; + + export function getCollection>( + collection: C, + filter?: (entry: CollectionEntry) => entry is E, + ): Promise; + export function getCollection( + collection: C, + filter?: (entry: CollectionEntry) => unknown, + ): Promise[]>; + + export function getEntry< + C extends keyof ContentEntryMap, + E extends ValidContentEntrySlug | (string & {}), + >(entry: { + collection: C; + slug: E; + }): E extends ValidContentEntrySlug + ? Promise> + : Promise | undefined>; + export function getEntry< + C extends keyof DataEntryMap, + E extends keyof DataEntryMap[C] | (string & {}), + >(entry: { + collection: C; + id: E; + }): E extends keyof DataEntryMap[C] + ? Promise + : Promise | undefined>; + export function getEntry< + C extends keyof ContentEntryMap, + E extends ValidContentEntrySlug | (string & {}), + >( + collection: C, + slug: E, + ): E extends ValidContentEntrySlug + ? Promise> + : Promise | undefined>; + export function getEntry< + C extends keyof DataEntryMap, + E extends keyof DataEntryMap[C] | (string & {}), + >( + collection: C, + id: E, + ): E extends keyof DataEntryMap[C] + ? Promise + : Promise | undefined>; + + /** Resolve an array of entry references from the same collection */ + export function getEntries( + entries: { + collection: C; + slug: ValidContentEntrySlug; + }[], + ): Promise[]>; + export function getEntries( + entries: { + collection: C; + id: keyof DataEntryMap[C]; + }[], + ): Promise[]>; + + export function render( + entry: AnyEntryMap[C][string], + ): Promise; + + export function reference( + collection: C, + ): import('astro/zod').ZodEffects< + import('astro/zod').ZodString, + C extends keyof ContentEntryMap + ? { + collection: C; + slug: ValidContentEntrySlug; + } + : { + collection: C; + id: keyof DataEntryMap[C]; + } + >; + // Allow generic `string` to avoid excessive type errors in the config + // if `dev` is not running to update as you edit. + // Invalid collection names will be caught at build time. + export function reference( + collection: C, + ): import('astro/zod').ZodEffects; + + type ReturnTypeOrOriginal = T extends (...args: any[]) => infer R ? R : T; + type InferEntrySchema = import('astro/zod').infer< + ReturnTypeOrOriginal['schema']> + >; + + type ContentEntryMap = { + "chapters": { +"demo/best-pratices.mdx": { + id: "demo/best-pratices.mdx"; + slug: "demo/best-pratices"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/components.mdx": { + id: "demo/components.mdx"; + slug: "demo/components"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/debug-components.mdx": { + id: "demo/debug-components.mdx"; + slug: "demo/debug-components"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/getting-started.mdx": { + id: "demo/getting-started.mdx"; + slug: "demo/getting-started"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/greetings.mdx": { + id: "demo/greetings.mdx"; + slug: "demo/greetings"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/introduction.mdx": { + id: "demo/introduction.mdx"; + slug: "demo/introduction"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/latex-convertion.mdx": { + id: "demo/latex-convertion.mdx"; + slug: "demo/latex-convertion"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/markdown.mdx": { + id: "demo/markdown.mdx"; + slug: "demo/markdown"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/vibe-coding-charts.mdx": { + id: "demo/vibe-coding-charts.mdx"; + slug: "demo/vibe-coding-charts"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"demo/writing-your-content.mdx": { + id: "demo/writing-your-content.mdx"; + slug: "demo/writing-your-content"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +"your-first-chapter.mdx": { + id: "your-first-chapter.mdx"; + slug: "your-first-chapter"; + body: string; + collection: "chapters"; + data: any +} & { render(): Render[".mdx"] }; +}; +"embeds": { +"vibe-code-d3-embeds-directives.md": { + id: "vibe-code-d3-embeds-directives.md"; + slug: "vibe-code-d3-embeds-directives"; + body: string; + collection: "embeds"; + data: any +} & { render(): Render[".md"] }; +}; + + }; + + type DataEntryMap = { + "assets": { +"data/data": { + id: "data/data"; + collection: "assets"; + data: any +}; +"data/font-sprite-mapping": { + id: "data/font-sprite-mapping"; + collection: "assets"; + data: any +}; +"data/font_manifest": { + id: "data/font_manifest"; + collection: "assets"; + data: any +}; +"data/llm_benchmarks": { + id: "data/llm_benchmarks"; + collection: "assets"; + data: any +}; +"data/mnist-variant-model": { + id: "data/mnist-variant-model"; + collection: "assets"; + data: any +}; +"data/typography_data": { + id: "data/typography_data"; + collection: "assets"; + data: any +}; +}; + + }; + + type AnyEntryMap = ContentEntryMap & DataEntryMap; + + export type ContentConfig = never; +} diff --git a/app/package-lock.json b/app/package-lock.json index a8f08e754c5b26f7f022b14b3c40c3aae0ed1800..d5fe3e88134ec17e1321115d5e3a6324fc754a25 100644 Binary files a/app/package-lock.json and b/app/package-lock.json differ diff --git a/app/package.json b/app/package.json index 102686d9e5a6ed672c989d7c1103ac25eaf7ddf4..660e1a654be5ca1a45138240dd8f8851f726986b 100644 Binary files a/app/package.json and b/app/package.json differ diff --git a/app/public/scripts/color-palettes.js b/app/public/scripts/color-palettes.js index 7dd3223cfb8a3b6cf5b65121ef1e4eacf33232dd..370b1f464142e0d9280855b18f8f636db810ea6e 100644 --- a/app/public/scripts/color-palettes.js +++ b/app/public/scripts/color-palettes.js @@ -46,63 +46,95 @@ return { r, g, b: b3 }; }; const oklchToOklab = (L, C, hDeg) => { const h = (hDeg * Math.PI) / 180; return { L, a: C * Math.cos(h), b: C * Math.sin(h) }; }; - const oklabToOklch = (L, a, b) => { const C = Math.sqrt(a*a + b*b); let h = Math.atan2(b, a) * 180 / Math.PI; if (h < 0) h += 360; return { L, C, h }; }; + const oklabToOklch = (L, a, b) => { const C = Math.sqrt(a * a + b * b); let h = Math.atan2(b, a) * 180 / Math.PI; if (h < 0) h += 360; return { L, C, h }; }; const clamp01 = (x) => Math.min(1, Math.max(0, x)); const isInGamut = ({ r, g, b }) => r >= 0 && r <= 1 && g >= 0 && g <= 1 && b >= 0 && b <= 1; - const toHex = ({ r, g, b }) => { const R = Math.round(clamp01(r)*255), G = Math.round(clamp01(g)*255), B = Math.round(clamp01(b)*255); const h = (n) => n.toString(16).padStart(2,'0'); return `#${h(R)}${h(G)}${h(B)}`.toUpperCase(); }; - const oklchToHexSafe = (L, C, h) => { let c = C; for (let i=0;i<12;i++){ const { a, b } = oklchToOklab(L,c,h); const rgb = oklabToRgb(L,a,b); if (isInGamut(rgb)) return toHex(rgb); c = Math.max(0, c-0.02);} return toHex(oklabToRgb(L,0,0)); }; - const parseCssColorToRgb = (css) => { try { const el = document.createElement('span'); el.style.color = css; document.body.appendChild(el); const cs = getComputedStyle(el).color; document.body.removeChild(el); const m = cs.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/i); if (!m) return null; return { r: Number(m[1])/255, g: Number(m[2])/255, b: Number(m[3])/255 }; } catch { return null; } }; + const toHex = ({ r, g, b }) => { + const R = Math.round(clamp01(r) * 255), G = Math.round(clamp01(g) * 255), B = Math.round(clamp01(b) * 255); + const h = (n) => n.toString(16).padStart(2, '0'); + return `#${h(R)}${h(G)}${h(B)}`.toUpperCase(); + }; + const oklchToHexSafe = (L, C, h) => { let c = C; for (let i = 0; i < 12; i++) { const { a, b } = oklchToOklab(L, c, h); const rgb = oklabToRgb(L, a, b); if (isInGamut(rgb)) return toHex(rgb); c = Math.max(0, c - 0.02); } return toHex(oklabToRgb(L, 0, 0)); }; + const parseCssColorToRgb = (css) => { try { const el = document.createElement('span'); el.style.color = css; document.body.appendChild(el); const cs = getComputedStyle(el).color; document.body.removeChild(el); const m = cs.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/i); if (!m) return null; return { r: Number(m[1]) / 255, g: Number(m[2]) / 255, b: Number(m[3]) / 255 }; } catch { return null; } }; - const getPrimaryHex = () => { + // Get primary color in OKLCH format to preserve precision + const getPrimaryOKLCH = () => { const css = getCssVar('--primary-color'); - if (!css) return '#E889AB'; - if (/^#([0-9a-f]{3}|[0-9a-f]{6})$/i.test(css)) return css.toUpperCase(); + if (!css) return null; + + // For OKLCH colors, return the exact values without conversion + if (css.includes('oklch')) { + const oklchMatch = css.match(/oklch\(([^)]+)\)/); + if (oklchMatch) { + const values = oklchMatch[1].split(/\s+/).map(v => parseFloat(v.trim())); + if (values.length >= 3) { + const [L, C, h] = values; + return { L, C, h }; + } + } + } + + // For non-OKLCH colors, convert to OKLCH for consistency const rgb = parseCssColorToRgb(css); - if (rgb) return toHex(rgb); - return '#E889AB'; + if (rgb) { + const { L, a, b } = rgbToOklab(rgb.r, rgb.g, rgb.b); + const { C, h } = oklabToOklch(L, a, b); + return { L, C, h }; + } + return null; + }; + + // Keep getPrimaryHex for backward compatibility, but now it converts from OKLCH + const getPrimaryHex = () => { + const oklch = getPrimaryOKLCH(); + if (!oklch) return null; + + const { a, b } = oklchToOklab(oklch.L, oklch.C, oklch.h); + const rgb = oklabToRgb(oklch.L, a, b); + return toHex(rgb); }; // No count management via CSS anymore; counts are passed directly to the API const generators = { - categorical: (baseHex, count) => { - const parseHex = (h) => { const s = h.replace('#',''); const v = s.length===3 ? s.split('').map(ch=>ch+ch).join('') : s; return { r: parseInt(v.slice(0,2),16)/255, g: parseInt(v.slice(2,4),16)/255, b: parseInt(v.slice(4,6),16)/255 }; }; - const { r, g, b } = parseHex(baseHex); - const { L, a, b: bb } = rgbToOklab(r,g,b); - const { C, h } = oklabToOklch(L,a,bb); + categorical: (baseOKLCH, count) => { + const { L, C, h } = baseOKLCH; const L0 = Math.min(0.85, Math.max(0.4, L)); const C0 = Math.min(0.35, Math.max(0.1, C || 0.2)); const total = Math.max(1, Math.min(12, count || 8)); const hueStep = 360 / total; const results = []; - for (let i=0;i { - const parseHex = (h) => { const s = h.replace('#',''); const v = s.length===3 ? s.split('').map(ch=>ch+ch).join('') : s; return { r: parseInt(v.slice(0,2),16)/255, g: parseInt(v.slice(2,4),16)/255, b: parseInt(v.slice(4,6),16)/255 }; }; - const { r, g, b } = parseHex(baseHex); - const { L, a, b: bb } = rgbToOklab(r,g,b); - const { C, h } = oklabToOklch(L,a,bb); + sequential: (baseOKLCH, count) => { + const { L, C, h } = baseOKLCH; const total = Math.max(1, Math.min(12, count || 8)); const startL = Math.max(0.25, L - 0.18); const endL = Math.min(0.92, L + 0.18); const cBase = Math.min(0.33, Math.max(0.08, C * 0.9 + 0.06)); const out = []; - for (let i=0;i { - const parseHex = (h) => { const s = h.replace('#',''); const v = s.length===3 ? s.split('').map(ch=>ch+ch).join('') : s; return { r: parseInt(v.slice(0,2),16)/255, g: parseInt(v.slice(2,4),16)/255, b: parseInt(v.slice(4,6),16)/255 }; }; - const { r, g, b } = parseHex(baseHex); - const baseLab = rgbToOklab(r,g,b); - const baseLch = oklabToOklch(baseLab.L, baseLab.a, baseLab.b); + diverging: (baseOKLCH, count) => { + const { L, C, h } = baseOKLCH; const total = Math.max(1, Math.min(12, count || 8)); // Left endpoint: EXACT primary color (no darkening) - const leftLab = baseLab; + const leftLab = oklchToOklab(L, C, h); // Right endpoint: complement with same L and similar C (clamped safe) - const compH = (baseLch.h + 180) % 360; - const cSafe = Math.min(0.35, Math.max(0.08, baseLch.C)); - const rightLab = oklchToOklab(baseLab.L, cSafe, compH); + const compH = (h + 180) % 360; + const cSafe = Math.min(0.35, Math.max(0.08, C)); + const rightLab = oklchToOklab(L, cSafe, compH); const whiteLab = { L: 0.98, a: 0, b: 0 }; // center near‑white const hexFromOKLab = (L, a, b) => toHex(oklabToRgb(L, a, b)); @@ -152,18 +184,22 @@ let lastSignature = ''; const updatePalettes = () => { - const primary = getPrimaryHex(); - const signature = `${primary}`; + const primaryOKLCH = getPrimaryOKLCH(); + const primaryHex = getPrimaryHex(); + const signature = `${primaryOKLCH?.L},${primaryOKLCH?.C},${primaryOKLCH?.h}`; if (signature === lastSignature) return; lastSignature = signature; - try { document.dispatchEvent(new CustomEvent('palettes:updated', { detail: { primary } })); } catch {} + try { document.dispatchEvent(new CustomEvent('palettes:updated', { detail: { primary: primaryHex, primaryOKLCH } })); } catch { } }; const bootstrap = () => { + // Initial setup - only run once on page load updatePalettes(); + + // Observer will handle all subsequent changes const mo = new MutationObserver(() => updatePalettes()); mo.observe(MODE.cssRoot, { attributes: true, attributeFilter: ['style', 'data-theme'] }); - setInterval(updatePalettes, 400); + // Utility: choose high-contrast (or softened) text style against an arbitrary background color const pickTextStyleForBackground = (bgCss, opts = {}) => { const cssRoot = document.documentElement; @@ -175,13 +211,13 @@ if (!rgb) return null; return rgb; // already 0..1 }; - const mixRgb01 = (a, b, t) => ({ r: a.r*(1-t)+b.r*t, g: a.g*(1-t)+b.g*t, b: a.b*(1-t)+b.b*t }); + const mixRgb01 = (a, b, t) => ({ r: a.r * (1 - t) + b.r * t, g: a.g * (1 - t) + b.g * t, b: a.b * (1 - t) + b.b * t }); const relLum = (rgb) => { const f = (u) => srgbToLinear(u); - return 0.2126*f(rgb.r) + 0.7152*f(rgb.g) + 0.0722*f(rgb.b); + return 0.2126 * f(rgb.r) + 0.7152 * f(rgb.g) + 0.0722 * f(rgb.b); }; const contrast = (fg, bg) => { - const L1 = relLum(fg), L2 = relLum(bg); const a = Math.max(L1,L2), b = Math.min(L1,L2); + const L1 = relLum(fg), L2 = relLum(bg); const a = Math.max(L1, L2), b = Math.min(L1, L2); return (a + 0.05) / (b + 0.05); }; try { @@ -193,7 +229,7 @@ .filter(x => !!x.rgb); // Pick the max contrast let best = candidates[0]; let bestCR = contrast(best.rgb, bg); - for (let i=1;i bestCR) { best = candidates[i]; bestCR = cr; } } @@ -206,7 +242,7 @@ finalRgb = mixRgb01(best.rgb, mutedRgb, blend); } const haloStrength = Math.min(1, Math.max(0, Number(opts.haloStrength == null ? 0.5 : opts.haloStrength))); - const stroke = (best.css === '#000' || best.css.toLowerCase() === 'black') ? `rgba(255,255,255,${0.30 + 0.40*haloStrength})` : `rgba(0,0,0,${0.30 + 0.30*haloStrength})`; + const stroke = (best.css === '#000' || best.css.toLowerCase() === 'black') ? `rgba(255,255,255,${0.30 + 0.40 * haloStrength})` : `rgba(0,0,0,${0.30 + 0.30 * haloStrength})`; return { fill: toHex(finalRgb), stroke, strokeWidth: (opts.haloWidth == null ? 1 : Number(opts.haloWidth)) }; } catch { return { fill: getCssVar('--text-color') || '#000', stroke: 'var(--transparent-page-contrast)', strokeWidth: 1 }; @@ -214,14 +250,16 @@ }; window.ColorPalettes = { refresh: updatePalettes, - notify: () => { try { const primary = getPrimaryHex(); document.dispatchEvent(new CustomEvent('palettes:updated', { detail: { primary } })); } catch {} }, + notify: () => { try { const primaryOKLCH = getPrimaryOKLCH(); const primaryHex = getPrimaryHex(); document.dispatchEvent(new CustomEvent('palettes:updated', { detail: { primary: primaryHex, primaryOKLCH } })); } catch { } }, getPrimary: () => getPrimaryHex(), + getPrimaryOKLCH: () => getPrimaryOKLCH(), getColors: (key, count = 6) => { - const primary = getPrimaryHex(); + const primaryOKLCH = getPrimaryOKLCH(); + if (!primaryOKLCH) return []; const total = Math.max(1, Math.min(12, Number(count) || 6)); - if (key === 'categorical') return generators.categorical(primary, total); - if (key === 'sequential') return generators.sequential(primary, total); - if (key === 'diverging') return generators.diverging(primary, total); + if (key === 'categorical') return generators.categorical(primaryOKLCH, total); + if (key === 'sequential') return generators.sequential(primaryOKLCH, total); + if (key === 'diverging') return generators.diverging(primaryOKLCH, total); return []; }, getTextStyleForBackground: (bgCss, opts) => pickTextStyleForBackground(bgCss, opts || {}), diff --git a/app/scripts/latex-to-mdx/README.md b/app/scripts/latex-to-mdx/README.md index 6eb428dba27e92ce07d8d24955a9f7d522d94e8d..ff447bf0da35180484c085a559277f0396d31e1e 100644 --- a/app/scripts/latex-to-mdx/README.md +++ b/app/scripts/latex-to-mdx/README.md @@ -49,7 +49,7 @@ latex-to-mdx/ ### 🎨 **Automatic Styling** - **Highlights**: `\highlight{text}` → `text` - **Auto cleanup**: Removal of numbering `(1)`, `(2)`, etc. -- **Astro components**: Images → `ResponsiveImage` with automatic imports +- **Astro components**: Images → `Figure` with automatic imports ### 🔧 **Robust Pipeline** - **LaTeX preprocessor**: Reference cleanup before Pandoc @@ -83,7 +83,7 @@ title: "Your Article Title" description: "Generated from LaTeX" --- -import ResponsiveImage from '../components/ResponsiveImage.astro'; +import Figure from '../components/Figure.astro'; import figure1 from '../assets/image/figure1.png'; ## Section with invisible anchor @@ -96,7 +96,7 @@ Reference to an interactive [equation](#equation-name). Equation with KaTeX ID: $$\htmlId{equation-name}{E = mc^2}$$ - +
``` ## ⚙️ Required Astro Configuration @@ -141,7 +141,7 @@ export default defineConfig({ - Code snippet injection 4. **MDX Conversion** (`mdx-converter.mjs`) - - Images transformation → `ResponsiveImage` + - Images transformation → `Figure` - HTML span escaping correction - Automatic imports generation - MDX frontmatter diff --git a/app/scripts/latex-to-mdx/input/sections/05_foundation_models.tex.temp b/app/scripts/latex-to-mdx/input/sections/05_foundation_models.tex.temp deleted file mode 100644 index fe41a67381a725dff1ba15049c90396e35f68ae8..0000000000000000000000000000000000000000 --- a/app/scripts/latex-to-mdx/input/sections/05_foundation_models.tex.temp +++ /dev/null @@ -1,224 +0,0 @@ -\section{Generalist Robot Policies} -\label{sec:learning-foundation} - -\epigraph{\textit{Specialization is for insects}}{Robert A. Heinlein} - -> **TL;DR** -> Openly available large scale datasets and the development of stable, expressive and efficient architecture fostered research on the development of generalist robot policies that can operate across embodiment and tasks. - -The advent of large models trained on internet-scale datasets has drastically influenced fields like Computer Vision (CV) and Natural Language Processing (NLP), shifting the paradigm towards combining (1) an initial, task-agnostic large-scale pre-training stage and a (2) task-specific, adjustment phase. -The pre-training/adaptation paradigm has now largely replaced more classic approaches consisting of task-specific data collection, curation and model training in many subdomains within CV and NLP, motivated by the main drawback of limited scalability for \emph{task-specific approaches}, traditionally labor intensive. -Factors including (1) the advancements in generalist models learned with self-supervision for perception [@oquabDINOv2LearningRobust2024] or semantic understanding [@devlinBERTPretrainingDeep2019] and (2) the popularization collective efforts to aggregate large-scale openly available datasets [@collaborationOpenXEmbodimentRobotic2025,khazatskyDROIDLargeScaleInTheWild2025] are increasingly pushing the field of robot learning towards the pre-train-and-adapt paradigm. -This shift taps into the long-standing challenge of developing generalist robot policies, and holds the premise to surpass traditionally siloed approaches to robotics problems and develop a \emph{foundation robotics model}. -While Section~\ref{sec:learning-bc-single} introduced methods for learning \emph{single-task policies} such as ACT or Diffusion Policy, in this section we present advancements in developing \emph{generalist, multi-task, policies}, capable of performing a wide range of tasks across different environments and embodiments, and guided by unstructured instructions given via natural language. - -![Fields within ML such as Computer Vision and NLP converged on the development of foundation models, trained on a variety of large scale models and capable to perform multiple downstream tasks (top). Conversely, robotics suffered from limited standardization in terms of the architectures used, and siloed, task specific datasets, incurring in a high degree of fragmentation which traditionally hindered the development of generalist models for robotics in favour of task-specific models (bottom).](assets/image/ch5/ch5-ml-vs-robotics-foundation.png) {#fig-fig:ch5-ml-vs-robotics-foundation} - -*Fields within ML such as Computer Vision and NLP converged on the development of foundation models, trained on a variety of large scale models and capable to perform multiple downstream tasks (top). Conversely, robotics suffered from limited standardization in terms of the architectures used, and siloed, task specific datasets, incurring in a high degree of fragmentation which traditionally hindered the development of generalist models for robotics in favour of task-specific models (bottom).* - -\subsection{Preliminaries: Models and Data} -The remarkable success of foundation models in NLP and CV is predicated on two core principles: architectural innovation and joint data-compute scaling. -The transformer architecture proved instrumental in capturing long-range dependencies in sequential data such as text, and its stability and expressivity made it the \emph{de facto} standard for modern large-scale models trained on internet-scale amounts of data. -In stark contrast with popular NLP [@raffelExploringLimitsTransfer2023] and CV [@ImageNet_VSS09] general-purpose datasets, the field of robotics has historically developed around task-specific datasets which hinders scalability across problems, resulting in a concrete data deficit for general-purpose robot learning. -Unlike the wealth of relatively readily available text and images on the internet, robotics data is intrinsically embodied---datasets collected for a manipulation robot typically differ entirely from locomotion datasets. -Further, datasets consisting of expert demonstrations are (1) intrinsically expensive to collect (2) and notoriously heterogeneous---different human experts may perform the same task optimally yet in very different ways. -In particular, since each expert trajectory is tied to a specific robot platform and the operating conditions of its environment and task, data heterogeneity has long posed a \emph{methodological} challenge for scaling robotics datasets via aggregation. -Beyond this, heterogeneity also raises \emph{conceptual} issues: naively mixing data across embodiments can induce negative transfer, as control strategies developed in isolation for different robot systems in different environments may even conflict when combined. -Thus, the high degree of fragmentation of robotics datasets and tasks has traditionally led to the development of \emph{specialist} policies, trained on small, task-specific datasets, and which excel at their designated task but fail to generalize to new situations (Figure~\ref{fig:ch5-ml-vs-robotics-foundation}). - -![Early efforts in the development of generalist models for robotics include BC-Zero [@jangBCZZeroShotTask2022], RT-1 [@brohanRT1RoboticsTransformer2023], and RT-2 [@brohanRT2VisionLanguageActionModels2023]: large scale models trained on thousands of demonstrations. The open release of the Open-X [@collaborationOpenXEmbodimentRobotic2025] and DROID datasets [@khazatskyDROIDLargeScaleInTheWild2025] fostered the development of open source models: OpenVLA [@kimOpenVLAOpenSourceVisionLanguageAction2024], \pi_0 [@black$p_0$VisionLanguageActionFlow2024] and SmolVLA [@shukorSmolVLAVisionLanguageActionModel2025].](assets/image/ch5/ch5-generalist-policies-timeline.png) {#fig-fig:ch5-generalist-policies-timeline} - -*Early efforts in the development of generalist models for robotics include BC-Zero [@jangBCZZeroShotTask2022], RT-1 [@brohanRT1RoboticsTransformer2023], and RT-2 [@brohanRT2VisionLanguageActionModels2023]: large scale models trained on thousands of demonstrations. The open release of the Open-X [@collaborationOpenXEmbodimentRobotic2025] and DROID datasets [@khazatskyDROIDLargeScaleInTheWild2025] fostered the development of open source models: OpenVLA [@kimOpenVLAOpenSourceVisionLanguageAction2024], \pi_0 [@black$p_0$VisionLanguageActionFlow2024] and SmolVLA [@shukorSmolVLAVisionLanguageActionModel2025].* - -Motivated by the pursuit of generalist robot policies, the research community started investigating what and how to integrate from other domains within ML. -Figure~\ref{fig:ch5-generalist-policies-timeline} shows a timeline of some of the most popular contributions attempting at developing generalist policies. -Starting from BC-Zero, a latent variable model trained on 25K+ demonstrations, the field has now evolved into \( \pi_0 \), a transformer-based model trained on 10M+ demonstrations and exhibiting strong few-shot capabilities across tasks and embodiments. -For starters, Robotics Transformer 1 (RT-1) [@brohanRT1RoboticsTransformer2023] represented a significant step in the direction of developing a generalist robot policies over prior work including (1) BC-Zero [@jangBCZZeroShotTask2022] and (2) Gato [@reedGeneralistAgent2022], in that~@brohanRT1RoboticsTransformer2023 uses a much larger and diverse set of training tasks compared to both BC-Zero and Gato. -In particular, RT-1 uses a transformer architecture, and is trained on as many as 130k human-recorded trajectories collected over 13 robots in the span on 17 months. -RT-1 learns to process a history of camera images and a natural language instruction, and feeds the resulting sequence of high-dimensional tokens to a transformer, trained using a \emph{classification loss on a discretized actions space} consisting of 6 256 bins, each for each joint of a 6-dof robotic arm. - -Perhaps motivated by the contemporary successes of the transformer architecture in both CV and NLP, the same group of authors investigated using a discrete output space to model---inherently continuous---quantities such as actions, leveraging a (1) more powerful architecture and (2) scaling up the dataset used \citep[RT-2]{brohanRT2VisionLanguageActionModels2023}. -In RT-2,~@brohanRT2VisionLanguageActionModels2023 propose inheriting internet-scale semantic knowledge from large-scale multi-modal datasets to learn a single, \emph{unified model} for robotics control. -Such a model, termed \emph{Vision-Language-Action} (VLA) in the original RT-2 paper, effectively casts robot control as a language modeling problem, and in particular as a Visual Question-Answering (VQ\&A) task, whereby the output token space used to represent \emph{string} tokens is shared with the \emph{8-bits tokens} used to represent the 256 actuation levels of a 6-dof robot joint. -In their work,~@brohanRT2VisionLanguageActionModels2023 propose co-fine-tuning then-leading large-scale VLMs such as PaLIX [@chenPaLIXScalingMultilingual2023] or PaLM-E [@driessPaLMEEmbodiedMultimodal2023] on a mix of web and robotics data, thus complementing VQ\&A training with robotics-specific signal, learning to directly output robot actions in a shared token space for visual and language inputs. -Using large models trained on internet-scale data as backbones for VLAs allows models to tap into the rich semantic knowledge embedded in the VLM's parameters, interpret new commands as well as recognize unseen objects by connecting them to concepts acquired while pre-training. -For instance,~@brohanRT2VisionLanguageActionModels2023 show that while RT-2 has never been explicitly trained to repurpose tools for a hammering task, it can still combine its semantic understanding of images, so that when asked which object between (1) a piece of paper, (2) a pair of headphones or (3) a rock may be used instead of a hammer, it answers correctly, (3). - -Traditionally, research involved not only training the model but also collecting the underlying data, a costly and time-consuming process—for instance, @jangBCZZeroShotTask2022 gathered 25K+ trajectories before training, while RT-1 required 130K+. -In turn, the data used in robot learning research efforts have traditionally proved rather fragmented, tailored to the specific task considered by the specific group of researchers who collected it, ultimately hindering integration. -The Open X-Embodiment project [@collaborationOpenXEmbodimentRobotic2025] was a landmark effort to address the data fragmentation problem, curating the aggregation of 60 \emph{existing} robotics datasets from 22 different robot embodiments and 21 institutions, resulting in a total 1.4M of cross-embodiments, cross-tasks, openly-available trajectories. -Besides the contribution of an aggregate, large scale dataset,~@collaborationOpenXEmbodimentRobotic2025 also demonstrated significant positive transfer \emph{across tasks and embodiments}, showing that a single model trained on multi-embodiment data can outperform specialist models trained on their respective single-embodiment datasets. -The Distributed Robot Interaction Dataset (DROID) [@khazatskyDROIDLargeScaleInTheWild2025] represents another significant step towards addressing the problem of scarse and disaggregated data in robot learning, providing a unique dataset consisting of 75K+ human demonstrations collected in realistic (\emph{in-the-wild}) manipulation settings, providing another cornerstone for building general-purpose robot policies. -Recently, foundational datasets curated through large, centralized efforts, are increasingly complemented by decentralized, community-driven collection of robotics data. -Software libraries as **LeRobot**~have been instrumental in enabling decentralized collection of large amounts of data, providing the infrastructure for researchers and practitioners to easily contribute trajectories from range of embodiments, democratizing data access via distributed collection. - -The success of large, proprietary models like RT-1 and RT-2, highlighted a growing accessibility gap in robotics research, as training and deploying large-scale models requires computational resources simply unattainable for most research institutions. -The OpenVLA project [@kimOpenVLAOpenSourceVisionLanguageAction2024] emerged in direct contrast of closed-source counterparts, as a community-driven effort to create powerful, openly available VLAs. -In particular,~@kimOpenVLAOpenSourceVisionLanguageAction2024 trained OpenVLA by exclusively leveraging openly available data (970K+ from the Open-X dataset), and share training recipes alongside the model weights. -Architecturally, OpenVLA integrates a pre-trained vision encoder to project visual tokens into the embedding space of Llama2-7B [@touvronLlama2Open2023] language model backbone. -The language model backbone is then used to predict \emph{discrete action tokens} over 256 activation levels. - -![Robot learning is undergoing a paradigmatic shift: centralized data collections (A, left) are increasingly larger, often comprising Ms of demonstrations, and (A, right) decentralized approaches to data collection are also rising as an alternative for large scale data collection. (B) Generalist models are also becoming increasingly smaller and easier to run on limited hardware.](assets/image/ch5/ch5-trends.png) {#fig-fig:ch5-trends} - -*Robot learning is undergoing a paradigmatic shift: centralized data collections (A, left) are increasingly larger, often comprising Ms of demonstrations, and (A, right) decentralized approaches to data collection are also rising as an alternative for large scale data collection. (B) Generalist models are also becoming increasingly smaller and easier to run on limited hardware.* - -Figure~\ref{fig:ch5-trends} illustrates graphically the two most relevant trends in modern robot learning. -As datasets collected via centralized, cross-institutions cooperation of increasing size are made available for the research community, decentralized datasets collected by individual researchers and practitioners have also gained traction recently, closing the gap with academic benchmarks thanks to community-contributed datasets. -Further, models used across tasks and embodiments are also becoming much more compute-efficient, and as a result the models' size has been consistently reducing over time, with consequent gains for autonomous robots in real-world, resource-constrained environments. - -\subsection{Modern VLAs} -Modern recipes to train large scale VLAs extend early efforts to learn foundation models from large amounts of data via BC, introducing significant advancements concerning both architectural and procedural aspects. -From an architectural perspective, modern VLAs such as \pi_0 [@black$p_0$VisionLanguageActionFlow2024] leverage a \emph{unified transformer model} for efficiency of computation, while maintaining specialized sub-components within the model for visual perception and action prediction, enabling cross-task performance via language conditioning. -Crucially, modern VLAs including~@black$p_0$VisionLanguageActionFlow2024[\pi_0] and~@shukorSmolVLAVisionLanguageActionModel2025[SmolVLA] adopt \emph{unified} transformer models employing disjoint set of weights (\emph{experts}) for compute-efficient visual-semantic understanding and robotic control. -Procedurally, modern VLAs complement advanced Vision-Language Model (VLM) backbones with action-specific modules (1) adopting mid-sized \emph{action experts} to model continuous actions distributions \( p (a_{t:t+H_a} \vert o_t) \)---avoiding discrete action tokens entirely---and (2) relying on~\emph{action chunking} \citep[Section~\ref{sec:learning-bc-single}]{zhaoLearningFineGrainedBimanual2023} as a strategy to reduce error compounding when predicting multiple actions learning from inherently non-i.i.d. data, such as demonstration data. - -These architectural and procedural innovations present three benefits. -First, developing architectures that exploit internet-scale pre-trained backbones allows to fully capitalizes on the vast world knowledge and skills state-of-the-art VLMs exhibit, preventig models from needing to learn visual, linguistic and semantic concepts from scratch. -Second, using generative models for continuous action distributions allows to learn rich, multimodal data distributions, a much more likely scenario in the big-data regime typically tackled while developing generalist policies. -Further, introducing two separate components for perception and action planning could enable using Mixture of Experts (MoE) architectures [@fedusReviewSparseExpert2022], more efficient to run and thus resulting in faster inference---a key features for models deployed in real-world scenarios. -This new paradigm has been at the core of some of the most capable generalist policies developed to date, capable to few-shot adapt to novel tasks and to perform highly dexterous manipulation tasks, ranging from end-to-end folding laundry, to bussing tables. - -\subsubsection{VLMs for VLAs} -VLMs are designed to process both visual and textual modalities---most commonly by taking both images and text as input and generating text conditioned on the visual context. -Recent advances in VLMs have been driven by the success of LLMs, with many approaches building upon pretrained LLMs and adopting similar training paradigms to the ones used in language modeling. -Typically, VLMs [@alayracFlamingoVisualLanguage2022,laurenconWhatMattersWhen2024,linVILAPretrainingVisual2024] are constructed by integrating a pretrained vision encoder [@radfordLearningTransferableVisual2021,zhaiSigmoidLossLanguage2023,finiMultimodalAutoregressivePretraining2024] with a pretrained LLM [@grattafioriLlama3Herd2024,jiangMistral7B2023]. -Training then proceeds in multiple multimodal stages, beginning with a large-scale pretraining on datasets containing image-text pairs [@LAION-COCO,kakaobrain2022coyo700m] and interleaved vision-language corpora [@OBELICS,MMC4], all followed by a supervised fine-tuning stage on instruction-tuning datasets [@LLaVA-1.5,tong2024cambrian,laurenconWhatMattersWhen2024]. -The inherent multimodal nature of VLMs enables them to jointly reason over vision and language. -Pre-training on vast internet-scale datasets allows these models to associate visual patterns with textual descriptions, thereby acquiring a rich semantic understanding of the world---knowledge about objects, their properties, and relationships---without explicit supervision for each concept. -In turn, integrating a VLM as a perception backbone for a VLA allows the complete model to inherit rich world knowledge, sidestepping the need to learn visual and semantic representations from scratch. -In principle, this allows the robot to ground high-level natural language instructions in its visual context, and possibly recognize unseen objects by connecting them to pre-trained concepts absorbed during pre-training, improving on the possibility to generalize to novel scenarios. - -Recently, compute efficiency has also become a central focus in VLM research. -Several works aim to reduce training costs by using smaller, more diverse datasets [@LLaVA-1.5,InstructBLIP,bai2025qwen25vl,zhu2024minigpt,tong2024cambrian], training smaller-scale models [@marafiotiSmolVLMRedefiningSmall2025, moondream,minicmpv2024], or by adapting pretrained unimodal models by tuning only a small subset of parameters [@shukor2023epalm,vallaeys2024improveddepalm,MAPL,FROMAGe,tsimpoukelli2021multimodalfrozen,BLIP-2]. -While the majority of VLM research focuses on image and text modalities, recent work has demonstrated that similar techniques can be extended to integrate additional modalities, such as video and audio [@wang2025internvideo2,liu2024kangaroo,zhang2025videollama,kong2024audioflam]---a particularly promising direction of research for robotics applications, where multiple sensor modalities can be integrated effectively. -This trend towards efficiency is paramount for robotics applications, where policies must operate under the stringent constraints of real-world deployment. -Indeed, robots often possess limited on-board computational resources and must react in real-time to dynamic environments. -Smaller and faster VLMs have thus become quintessential for developing responsive autonomous systems, enabling high-frequency control loops by reducing the latency between perception and action. - -\subsection{\( \pi_0 \)} - -\pi_0 [@black$p_0$VisionLanguageActionFlow2024] introduce a VLA consisting of a MoE architecture consisting of (1) a pre-trained VLM backbone (Gemma 2.6B [@teamGemma2Improving2024]) and (2) a dedicated action expert used to generate continuous actions via flow matching. -Images and language are embedded with a late-fusion VLM (PaliGemma), while proprioceptive state and actions chunks are routed to a smaller action expert, initialized from scratch. -The two separate experts communicate via self-attention layers, but maintain disjoint weights to obtain query, key and values matrices at each layer, maintaining specialization while efficiently allocating computation. - -![The \pi_0 architecture, as in [@black$p_0$\beginalign] -VisionLanguageActionFlow2024. Vision and language tokens are routed to a VLM backbone which is prevented from attending robot proprioperceptive states and action tokens, which are instead routed to a smaller subset of weights within the architecture. The architecture is trained with Flow Matching on 10M+ trajectories from a mixture of closed and openly available datasets.](assets/image/ch5/ch5-pi0.png) {#fig-fig:ch5-pi0} - -*The \pi_0 architecture, as in [@black$p_0$\beginalign] -VisionLanguageActionFlow2024. Vision and language tokens are routed to a VLM backbone which is prevented from attending robot proprioperceptive states and action tokens, which are instead routed to a smaller subset of weights within the architecture. The architecture is trained with Flow Matching on 10M+ trajectories from a mixture of closed and openly available datasets.* - -Concretely, \( \pi_0 \) is a unified transformer with two disjoint sets of weights \( \phi, \theta\). -A larger VLM backbone \( p_\phi \) initialized from Gemma 2.6B processes multiple image frames obtained from multiple cameras points \( [\{ I_t \}_{t=1}^n] \), as well as a language instruction \([\ell_t]\) used to describe the task considered. -Concurrently, a 300M-parameter \emph{action expert} based on a similar transformer architecture is used processes the robot proprioperceptive state \(q_t\) and an action chunk \(a_{t:t+H_a}\) (Figure~\ref{fig:ch5-pi0}). -The different expert networks operate separately in processing the respective inputs and turning them into query, key and value matrices, and only share information between each other via self-attention layers. -The outputs from the VLM backbone are disregarded, while the vector field regressed by the action expert is used to iteratively refine the action process. -In particular, \pi_0 uses a \emph{blockwise causal attention mask} over tokens belonging to three separate blocks: (1) image and language tokens \(\mathcal T_i \) obtained from \([\{ I_t \}_{t=1}^n, \ell_t]\), (2) proprioperceptive tokens \(\mathcal T_q \) obtained from \(q_t\), and (3) the action tokens \( \mathcal T_a \) for items in the chunk \(a^{\tau}_{t:t+H_a}\) at time \( \tau \) in the flow-matching process. -Notably, \emph{within} each block the attention operations are bidirectional, while across blocks, future blocks are masked out. -Formally, this corresponds to using the attention mask -\begin{equation*} - \mathbf{A} = - \bordermatrix{ - & \mathcal{T}_i & \mathcal{T}_q & \mathcal{T}_a \cr - \mathcal{T}_i & \mathbf{1} & \mathbf{0} & \mathbf{0} \cr - \mathcal{T}_q & \mathbf{1} & \mathbf{1} & \mathbf{0} \cr - \mathcal{T}_a & \mathbf{1} & \mathbf{1} & \mathbf{1} \cr - }, - \quad \mathbf{1}: \text{Bidirectional Attention}, \ \mathbf{0}: \text{Masked Attention} -\end{equation*} -Note how \emph{intra}-block directional attention allows tokens to communicate freely, while \emph{inter}-block communication is mediated by the attention mask \(\mathbf{A} \). -\emph{Blockwise causal masking} effectively prevents the pre-trained perception-language tokens from attending to robotics-tokens, likely out of distribution for VLM backbones traditionally trained on large corpora of internet, non-robotics, data. -Crucially, because communication is obstructed between image-language tokens, proprioperceptive and action tokens, one can cache keys and values across denoising steps at runtime time, incuring in a reduced computational footprint and faster inference. - -In \pi_0, both the VLM backbone and action expert are update using a \emph{flow matching} loss, and in particular are updated minimizing: - -$$ - - \mathcal{L}(\phi, \theta) &= \mathbb{E}_{\tau, \epsilon, o_t, a_{t:t+H_a}}\Big[ - \big\Vert - v_\theta(\underbrace{\tau a_{t:t+H_a} + (1-\tau) \epsilon}_{\tilde a_{t:t+H_a}},\, o_t,\, \tau) - - (\epsilon - a_{t:t+H_a}) - \big\Vert^2 - \Big], \label{eq:pi0-loss} - - &\tau \sim \mathrm{Beta}_{[0,s]}(1.5,1), \quad - \epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I}), \quad - o_t, a_{t:t+H_a} \sim \mathcal D \notag - -$$ - -Where the experts parametrized by the separate weights \( \phi, \theta \) interact with each other via self-attention layers only, so that the action expert \( v_\theta \) internal computations also depend on the VLM backbone's parameters \( \phi \). -Importantly,~@black -\end{align$p_0$VisionLanguageActionFlow2024} minimize~\ref{eq:pi0-loss} over both the multimodal backbone and action expert parameters, thus updating the internal representations of the VLM using BC-specific gradients. -In contrast,~@driessKnowledgeInsulatingVisionLanguageAction2025 later show that failing to insulate the VLM knowledge from the flow matching gradients actually harms performance. -Inference is performed iteratively refining action chunks while numerically forward-integrating the vector field predicted by the action expert, -\begin{equation} - a_{t:t+H_a}^{\tau + \delta} = a_{t:t+H_a}^{\tau } + \delta v_\theta(a_{t:t+H_a}^{\tau }, o_t) -\end{equation} - -Flow matching \citep[Section\ref{sec:ch4-flow-matching}]{lipmanFlowMatchingGenerative2023} can be seen as a continuous time, detetrministic generalization of Diffusion and has proven effective in modeling highly complex multi-modal distributions, including those over images and video. -In turn, its application to large-scale data collections of multiple human behaviors across tasks and embodiments appears rather consequential, particularly considering how it can enable faster inference via a reduced number of denoising steps---as few as 10, in \pi_0. -In particular, the action expert is model as a conditional flow matching model. -Each action token embeds a noisy action \(a_i^{\tau} \in a^\tau_{t:t+H_a}\), alongside a sinusoidal encoding of the \emph{flow process} timestep \(\tau\). -The action expert then leverages full bidirectional attention across the \(H_a\) action tokens provided, as well as attends to previous proprioperceptive and image-language tokens as well. -Interestingly, differently from a standard flow matching pipeline~@lipmanFlowMatchingGenerative2023, \(\tau\) is \emph{not} sampled from a uniform distribution \(\tau \sim \mathcal U([0,1]) \), but rather obtained from \(\tau \sim \textrm{Beta}(1.5,1) \) defined on the \( [0,s], s<1 \) support (Figure~\ref{fig:ch5-pi0-sampling-timesteps}). - -![Unlike more traditional flow-matching algorithms, \pi_0 uses a modified distribution for the timestep \( \tau \) used during training and inference, favouring earlier timestamps corresponding to noisier chunks.](assets/image/ch5/ch5-pi0-sampling-timesteps.png) {#fig-fig:ch5-pi0-sampling-timesteps} - -Using such Beta distribution emphasizes higher noise levels during training, a choice~@black$p_0$VisionLanguageActionFlow2024 argue allows \pi_0 to focus on learning the mean of the data distribution \( \mathbb E[a_{t:t+H_a} \vert o_t] \) during training, in keeping with~@esserScalingRectifiedFlow2024. -To further optimize performance and reduce inference time,~@black$p_0$VisionLanguageActionFlow2024 propose reducing the support of the timestep distribution to \([0,s], \ s < 1 \), as for any forward-integration step size \( \delta = 1-s \) timesteps above \(s \) are never sampled at inference time. - -Besides adopting a MoE architecture with a VLM backbone initialized from a pre-trained model and trained jointly with an action expert via flow matching, \pi_0 also relies on a unique pre-training corpus mixes open data of 10M+ trajectories, which~@black$p_0$VisionLanguageActionFlow2024 claim to be the largest dataset used in building a foundational model in robotics to date. -The dataset used to train \pi_0---referred to as \( \pi \) dataset---comprises a private, undisclosed portion obtained via teleoperation aggregated to openly available datasets including Open-X and DROID, with \(\approx 9.1\ -Open datasets such as DROID and Open-X are complemeneted with expert trajectories with of dexterous demonstrations tasks spanning 7 robot configurations and 68 different tasks. -~@black$p_0$VisionLanguageActionFlow2024 show that pre-training on the \( \pi \) dataset yields a broadly capable base model, which can be adapted via post-training on narrower high-quality task data, inducing fluent multi-stage behavior while retaining robustness. -In particular,~@black$p_0$VisionLanguageActionFlow2024 report that, across a variety of benchmarks, \pi_0 pretrained on the \( \pi \) dataset and post-trained on extra high-quality data demonstrations \emph{consistently outperform} \pi_0 trained from scratch (i.e., without pretraining on the \( \pi \) dataset), further scoring the relevance of pretraining. -~@black$p_0$VisionLanguageActionFlow2024 offer an intuition behind this finding: high-quality demonstrations of a given task typically do not contain mistakes, and how human demonstrator may recover from them. -In turn, robot trained on high-quality data exclusively with BC may be incapable to recover from failure. -Conversely, large scale collections of human demonstrations are typically much more diverse (if anything, for their sheer scale), and therefore typically contain rich and diverse information, which may prove suboptimal for any given task when considered in isolation but that proves invaluable in coupling with a small, narrower set of demonstrations. - -Lastly,~@black$p_0$VisionLanguageActionFlow2024 present cross-embodiment experiments where they demonstrate \pi_0's ability to control both mobile and static manipulator robots with varying arm embodiments. -The emergence of cross-embodiment capabilities is largely to be attributed to the presence of large scale cross-embodiment data in the data mixture, handled by \pi_0 defaulting to the maximal configuration size across the \( \pi \) dataset, and zero-padding robots with fewer dof. -In that \pi_0 constantly processes 18 DoFs robots (two 6-DoF arms, two grippers, base, vertical torso), regardless of the kind of robot, and robots with fewer dofs are zero-padded. -\pi_0 also relies on three camera views, and uses masked image slots for training and deployment scenarios with fewer cameras. - -\subsubsection{Code Example: Using \pi_0} -\todo{add code example} - -\subsection{SmolVLA} -VLAs remain in an early stage of development and are not yet as mature or widely adopted as LLMs and VLMs. -Further, much of the impactful VLA progress remains proprietary, with many models sharing only weights while withholding full training details and essential methodological components. -SmolVLA [@shukorSmolVLAVisionLanguageActionModel2025] is an entirely open-source research effort, aiming to democratize the developments of robotics foundation models by open sourcing model, training recipes and data used. - -![The SmolVLA architecture, as in [@shukorSmolVLAVisionLanguageActionModel2025]. SmolVLA is a compact MoE model trained with flow matching to denoise action chunks. Vision and language tokens are fed to a VLM backbone, and share information with the proprioperceptive and action tokens via the attention mechanism. The attention expert interleaves SA and CA layers for further conditioning on the visual features from the VLM backbone. SmolVLA skips computations and reduces the visual tokens, resulting in 6x less memory usage than \pi_0.](assets/image/ch5/ch5-smolvla.png) {#fig-fig:ch5-smolvla} - -*The SmolVLA architecture, as in [@shukorSmolVLAVisionLanguageActionModel2025]. SmolVLA is a compact MoE model trained with flow matching to denoise action chunks. Vision and language tokens are fed to a VLM backbone, and share information with the proprioperceptive and action tokens via the attention mechanism. The attention expert interleaves SA and CA layers for further conditioning on the visual features from the VLM backbone. SmolVLA skips computations and reduces the visual tokens, resulting in 6x less memory usage than \pi_0.* - -While encouraging efforts like \pi_0 [@black$p_0$VisionLanguageActionFlow2024] demonstrate the feasibility of open VLA systems, they remain (1) large and compute-intensive and (2) dependent on closed datasets collected via centralized efforts on costly robotic platforms, ultimately hindering accessibility. -SmolVLA mitigates both these accessibility issues by (1) prioritizing a compact, compute-efficient VLA design and (2) targeting community-contributed datasets on accessible robotic platforms such as the SO-100 and SO-101 arms. -Similarly to \pi_0, SmolVLA (Figure~\ref{fig:ch5-smolvla}) employs a MoE architecture combining a pretrained VLM backbone with a dedicated action expert, and trains with flow matching. -To ensure efficiency and accessibility, SmolVLA adopts SmolVLM-2 [@marafiotiSmolVLMRedefiningSmall2025] as its VLM backbone, considering SmolVLM-2's reduced size and capability to process multiple image inputs alongside text items. -SmolVLM-2 uses SigLIP [@zhaiSigmoidLossLanguage2023] as vision encoder, producing visual features for a SmolLM2 language decoder [@allalSmolLM2WhenSmol2025]. -Further, SmolVLA adopts a smaller action expert consisting of \(\sim\)100M parameters and an interleaved stack of self and cross-attention layers. -To improve efficiency, the action expert adopts a reduced embedding dimension compared to the VLM backbone, resulting in \( d_{v_\theta} = 0.75 d_{\text{VLM}} \). -[@shukorSmolVLAVisionLanguageActionModel2025]'s design choices thus result in a much smaller size model compared to \pi_0, consisting of around 450M parameters versus \pi_0's 3.3B parameters. - -Effectively, SmolVLA consumes multi-view RGB images, a natural-language instruction, and a projected sensorimotor state token as inputs, together with the noised \emph{action chunk} \( \tilde{a_{t:t+H_a}} \) the action expert \( v_\theta \) is trained to denoise. -In particular, robot proprioperceptive states are projected into a shared token space with the VLM to match \( d_{\text{VLM}} \), and successively projected into the expert's token space. -Similarily to \pi_0, SmolVLA adopts separate experts communicating exclusively through self-attention layers, which do not employ the same blockwise causal masking in favour of a simple causal masking, resulting in a lower triangular attention mask. - -In contrast with \pi_0, the action expert interleaves \emph{cross-attention} (CA) and \emph{self-attention} (SA) layers, a choice shown to yield higher success and smoother action chunks in practice. -While in the expert SA layers, tokens are used to obtain queries, keys and values, CA layers use action tokens only as queries, and instead project visual, language and proprioperceptive tokens in a shared action space to obtain keys and values. -Notably, keys and values can be cached as well, resulting in performance gains at inference time. - -SmolVLA trims both token and layer compute. -First, it \emph{reduces visual tokens} via pixel shuffle to a fixed budget of 64 tokens per frame, foregoing tiling used during VLM pretraining for runtime efficiency. -Second, it \emph{skips upper VLM layers}: the action expert consumes features from the first \(N\) decoder layers, with \(N=L/2\) providing a good speed-performance trade-off and effectively halving downstream compute for the larger part of SmolVLA. -Beyond model compactness, SmolVLA also contributes an inference stack that decouples action prediction from execution for responsiveness on modest hardware (Section~\ref{sec:ch4-async-inference}). - -Departing from reliance on proprietary datasets, SmolVLA pretrains exclusively on 450+ \emph{community datasets}, totaling 20K+ trajectories. -Because instructions in community contributed dataset can be noisy or missing, the authors re-annotate tasks with a small off-the-shelf VLM using frames sampled from the dataset, and standardize camera viewpoints by mapping sources to a consistent top/wrist/side ordering. -At inference, similarily to \pi_0, SmolVLA integrates flow over 10 steps, resulting in fast inference. -SmolVLA proves effective across a range of both real-world and simulated environments, rivaling \pi_0 while being close to 40\ - -\subsubsection{Code Example: Using SmolVLA} -\todo{add code example} \ No newline at end of file diff --git a/app/scripts/latex-to-mdx/input/sections/test.md b/app/scripts/latex-to-mdx/input/sections/test.md deleted file mode 100644 index f5fc0dd9c0e54a5858731a3eb60a137d8a58af69..0000000000000000000000000000000000000000 --- a/app/scripts/latex-to-mdx/input/sections/test.md +++ /dev/null @@ -1,488 +0,0 @@ -# Classical Robotics {#sec:classical} - -::: epigraph -*Know your enemy* \[\...\] - -Sun Tzu -::: - -::: tldr -Learning-based approaches to robotics are motivated by the need to (1) -generalize across tasks and embodiments (2) reduce dependency on human -expertise (3) leverage historical trends on the production of data---all -traditionally overlooked by dynamics-based techniques. -::: - -## Explicit and Implicit Models - -![Overview of methods to generate motion (clearly non-exhausitve, -see @bekrisStateRobotMotion2024). The different methods can be grouped -based on whether they explicitly (*dynamics-based*) or implicitly -(*learning-based*) model robot-environment -interactions.](figures/ch2/ch2-approaches.png){#fig:generating-motion-atlas -width="50%"} - -Robotics is concerned with producing artificial motion in the physical -world in useful, reliable and safe fashion. Thus, robotics is an -inherently multi-disciplinar domain: producing autonomous motion in the -physical world requires, to the very least, interfacing different -software (motion planners) and hardware (motion executioners) -components. Further, knowledge of mechanical, electrical, and software -engineering, as well as rigid-body mechanics and control theory have -therefore proven quintessential in robotics since the field first -developed in the 1950s. More recently, Machine Learning (ML) has also -proved effective in robotics, complementing these more traditional -disciplines [@connellRobotLearning1993]. As a direct consequence of its -multi-disciplinar nature, robotics has developed as a rather wide array -of methods, all concerned with the main purpose of . - -Methods to produce robotics motion range from traditional *explicit* -models---[^1] methods, leveraging precise descriptions of the mechanics -of robots' rigid bodies and their interactions with eventual obstacles -in the environment---to *implicit* models--- methods, treating -artificial motion as a statistical pattern to learn given multiple -sensorimotor -readings [@agrawalComputationalSensorimotorLearning; @bekrisStateRobotMotion2024]. -A variety of methods have been developed between these two extrema. For -instance,  @hansenTemporalDifferenceLearning2022 show how learning-based -systems can benefit from information on the physics of problems, -complementing a traditional learning method such as Temporal Difference -(TD)-learning @suttonReinforcementLearningIntroduction2018 with -Model-Predictive Control (MPC). Conversely, as explicit models may be -relying on assumptions proving overly simplistic---or even -unrealistic---in practice, learning can prove effective to improve -modeling of complex phenomena or complement -perception [@mccormacSemanticFusionDense3D2016]. Such examples aim at -demonstrating the richness of approaches to robotics, and -Figure [1](#fig:generating-motion-atlas){reference-type="ref" -reference="fig:generating-motion-atlas"} graphically illustrates some of -the most relevant techniques. Such a list is clearly far from being -exhaustive, and we refer to @bekrisStateRobotMotion2024 for a more -comprehensive overview of both general and application-specific methods -for motion generation. In this section, we wish to introduce the -inherent benefits of ---the core focus on this tutorial. - -## Different Types of Motion - -![Different kinds of motions are achieved with potentially very -different robotic platforms. From left to right, top to bottom: ViperX, -SO-100, Boston Dynamics' Spot, Open-Duck, 1X's NEO, Boston Dynamics' -Atlas. This is an example list of robotic platforms and is (very) far -from being -exhaustive.](figures/ch2/ch2-platforms.png){#fig:robotics-platforms-atlas -width="70%"} - -In the vast majority of instances, robotics deals with producing motion -via actuating joints connecting nearly entirely-rigid links. A key -distinction between focus areas in robotics is based on whether the -generated motion modifies (1) the absolute state of the environment (via -dexterity), (2) the relative state of the robot with respect to its -environment (exercising mobility skills), or (3) a combination of the -two (Figure [2](#fig:robotics-platforms-atlas){reference-type="ref" -reference="fig:robotics-platforms-atlas"}). - -Effects such as (1) are typically achieved *through* the robot, i.e. -generating motion to perform an action inducing a desirable -modification, effectively *manipulating* the environment (manipulation). -Motions like (2) may result in changes in the robot's physical location -within its environment. Generally, modifications to a robot's location -within its environment may be considered instances of the general -*locomotion* problem, further specified as *wheeled* or *legged* -locomotion based on whenever a robot makes use of wheels or leg(s) to -move in the environment. Lastly, an increased level of dynamism in the -robot-environment interactions can be obtained combining (1) and (2), -thus designing systems capable to interact with *and* move within their -environment. This category is problems is typically termed *mobile -manipulation*, and is characterized by a typically much larger set of -control variables compared to either locomotion or manipulation alone. - -The traditional body of work developed since the very inception of -robotics is increasingly complemented by learning-based approaches. ML -has indeed proven particularly transformative across the entire robotics -stack, first empowering planning-based techniques with improved state -estimation used for traditional -planning [@tangPerceptionNavigationAutonomous2023] and then end-to-end -replacing controllers, effectively yielding perception-to-action -methods [@koberReinforcementLearningRobotics]. Work in producing robots -capable of navigating a diverse set of terrains demonstrated the premise -of both dynamics and learning-based approaches for -locomotion [@griffinWalkingStabilizationUsing2017; @jiDribbleBotDynamicLegged2023; @leeLearningQuadrupedalLocomotion2020; @margolisRapidLocomotionReinforcement2022], -and recent works on whole-body control indicated the premise of -learning-based approaches to generate rich motion on complex robots, -including -humanoids [@zhangWoCoCoLearningWholeBody2024; @bjorckGR00TN1Open2025]. -Manipulation has also been widely studied, particularly considering its -relevance for many impactful use-cases ranging from high-risk -applications for -humans [@fujitaDevelopmentRobotsNuclear2020; @alizadehComprehensiveSurveySpace2024] -to manufacturing [@sannemanStateIndustrialRobotics2020]. While explicit -models have proven fundamental in achieving important milestones towards -the development of modern robotics, recent works leveraging implicit -models proved particularly promising in surpassing scalability and -applicability challenges via -learning [@koberReinforcementLearningRobotics]. - -## Example: Planar Manipulation - -Robot manipulators typically consist of a series of links and joints, -articulated in a chain finally connected to an *end-effector*. Actuated -joints are considered responsible for generating motion of the links, -while the end effector is instead used to perform specific actions at -the target location (e.g., grasping/releasing objects via -closing/opening a gripper end-effector, using a specialized tool like a -screwdriver, etc.). - -Recently, the development of low-cost manipulators like the -ALOHA [@zhaoLearningFineGrainedBimanual2023] -ALOHA-2 [@aldacoALOHA2Enhanced] and -SO-100/SO-101 [@knightStandardOpenSO100] platforms significantly lowered -the barrier to entry to robotics, considering the increased -accessibility of these robots compared to more traditional platforms -like the Franka Emika Panda arm -(Figure [3](#fig:robotic-platforms-costs){reference-type="ref" -reference="fig:robotic-platforms-costs"}). - -![Cheaper, more accessible robots are starting to rival traditional -platforms like the Panda arm platforms in adoption in -resource-constrained scenarios. The SO-100, in particular, has a cost in -the 100s of Euros, and can be entirely 3D-printed in hours, while the -industrially-manufactured Panda arm costs tens of thousands of Euros and -is not openly -available.](figures/ch2/ch2-cost-accessibility.png){#fig:robotic-platforms-costs -width="40%"} - -Deriving an intuition as per why learning-based approaches are gaining -popularity in the robotics community requires briefly analyzing -traditional approaches for manipulation, leveraging tools like forward -and inverse kinematics (FK, IK) and control theory. Providing a detailed -overview of these methods falls (well) out of the scope of this -tutorial, and we refer the reader to works -including @sicilianoSpringerHandbookRobotics2016 -[@lynchModernRoboticsMechanics2017; @tedrakeRoboticManipulationPerception; @tedrakeUnderactuatedRoboticsAlgorithms] -for a much more comprehensive description of these techniques. Here, we -mostly wish to highlight the benefits of ML over these traditional -techniques - -![The SO-100 arm is a 6-dof manipulator arm. Preventing some of its -joints (shoulder pane, wrist flex and wrist roll) from actuating, it can -be represented as a traditional 2-dof planar manipulator (the gripper -joint in the end-effector is not considered towards the count of the -degrees of freedom used to produce -motion).](figures/ch2/ch2-so100-to-planar-manipulator.png){#fig:make-so100-planar-manipulator -width="70%"} - -Consider the (simple) case where a SO-100 is restrained from actuating -(1) the shoulder pane and (2) the wrist flex and roll motors. This -effectively reduces the degrees of freedom of the SO-100 from the -original 5+1 (5 joints + 1 gripper) to 2+1 (shoulder lift, elbow flex + -gripper). As the end-effector does not impact motion in this model, the -SO-100 is effectively reduced to the planar manipulator robot presented -in Figure [4](#fig:make-so100-planar-manipulator){reference-type="ref" -reference="fig:make-so100-planar-manipulator"}, where spheres represent -actuators, and solid lines indicate length-$l$ links from the base of -the SO-100 to the end-effector (*ee*). - -Further, let us make the simplifying assumption that actuators can -produce rotations up to $2 \pi$ radians. In practice, this is seldom the -case due to movement obstructions caused by the robot body itself (for -instance, the shoulder lift cannot produce counter-clockwise movement -due to the presence of the robot's base used to secure the SO-100 to its -support and host the robot bus), but we will introduce movement -obstruction at a later stage. - -All these simplifying assumptions leave us with the planar manipulator -of Figure [5](#fig:planar-manipulation-simple){reference-type="ref" -reference="fig:planar-manipulation-simple"}, free of moving its -end-effector by controlling the angles $\theta_1$ and $\theta_2$, -jointly referred to as the robot's *configuration*, and indicated with -$q = [\theta_1, \theta_2 ] \in [-\pi, +\pi]^2$. The axis attached to the -joints indicate the associated reference frame, whereas circular arrows -indicate the maximal feasible rotation allowed at each joint. In this -tutorial, we do not cover topics related to spatial algebra, and we -instead refer the reader to @lynchModernRoboticsMechanics2017 -[Chapter 2] and @tedrakeRoboticManipulationPerception [Chapter 3] for -excellent explanations of the mechanics and theoretical foundations of -producing motion on rigid bodies. - -
-
- -
Free to move
-
-
- -
Constrained by the surface
-
-
- -
Constrained by surface and (fixed) obstacle
-
-
Planar, 2-dof schematic representation of the SO-100 -manipulator under diverse deployment settings. From left to right: -completely free of moving; constrained by the presence of the surface; -constrained by the surface and presence of obstacles. Circular arrows -around each joint indicate the maximal rotation feasible at that -joint.
-
- -Considering the (toy) example presented in -Figure [5](#fig:planar-manipulation-simple){reference-type="ref" -reference="fig:planar-manipulation-simple"}, then we can analytically -write the end-effector's position $p \in \mathbb R^2$ as a function of -the robot's configuration, -$p = p(q), p: \mathcal Q \mapsto \mathbb R^2$. In particular, we have: -$$\begin{equation*} -p(q) = -\begin{pmatrix} -p_x(\theta_1, \theta_2) \\ -p_y(\theta_1, \theta_2) -\end{pmatrix} -= -\begin{pmatrix} -l \cos(\theta_1) + l \cos(\theta_1 + \theta_2) \\ -l \sin(\theta_1) + l \sin(\theta_1 + \theta_2) -\end{pmatrix} -\in S^{n=2}_{l_1+l_2} = \{ p(q) \in \mathbb R^2: \Vert p(q) \Vert_2^2 \leq (2l)^2, \ \forall q \in \mathcal Q \} -\end{equation*}$$ - -Deriving the end-effector's *pose*---position *and* orientation---in -some $m$-dimensional space -$\vec{p} \in \mathcal{P} \subset \mathbb{R}^{m}$ starting from the -configuration $\q \in \mathcal Q \subset \mathbb R^n$ of a $n$-joints -robot is referred to as *forward kinematics* (FK), whereas identifying -the configuration corresponding to any given target pose is termed -*inverse kinematics* (IK). In that, FK is used to map a robot -configuration into the corresponding end-effector pose, whereas IK is -used to reconstruct the configuration(s) given an end-effector pose. - -In the simplified case here considered (for which $\vec{p} \equiv p$, as -the orientation of the end-effector is disregarded for simplicity), one -can solve the problem of controlling the end-effector's location to -reach a goal position $p^*$ by solving analytically for -$q: p(q) = f_{\FK}(q) = p^*$. However, in the general case, one might -not be able to solve this problem analytically, and can typically resort -to iterative optimization methods comparing candidate solutions using a -loss function (in the simplest case, $\Vert p(q) - p^* \Vert_2^2$ is a -natural candidate), yielding: - -$$\begin{align} -\min_{q \in \mathcal Q} \Vert p(q) - p^* \Vert_2^2 \, . -\label{eq:ik_problem} -\end{align}$$ - -Exact analytical solutions to IK are even less appealing when one -considers the presence of obstacles in the robot's workspace, resulting -in constraints on the possible values of -$q \in \mathcal Q \subseteq [-\pi, +\pi]^n \subset \mathbb R^n$ in the -general case of $n$-links robots. - -For instance, the robot in -Figure [6](#fig:planar-manipulator-floor){reference-type="ref" -reference="fig:planar-manipulator-floor"} is (very naturally) obstacled -by the presence of the surface upon which it rests: $\theta_1$ can now -exclusively vary within $[0, \pi]$, while possible variations in -$\theta_2$ depend on $\theta_1$ (when $\theta_1 \to 0$ or -$\theta_1 \to \pi$, further downwards movements are restricted). Even -for a simplified kinematic model, developing techniques to -solve eq. [\[eq:ik_problem\]](#eq:ik_problem){reference-type="ref" -reference="eq:ik_problem"} is in general non-trivial in the presence of -constraints, particularly considering that the feasible set of solutions -$\mathcal Q$ may change across problems. -Figure [8](#fig:planar-manipulator-floor-shelf){reference-type="ref" -reference="fig:planar-manipulator-floor-shelf"} provides an example of -how the environment influences the feasible set considered, with a new -set of constraints deriving from the position of a new obstacle. - -However, IK---solving -eq. [\[eq:ik_problem\]](#eq:ik_problem){reference-type="ref" -reference="eq:ik_problem"} for a feasible $q$---only proves useful in -determining information regarding the robot's configuration in the goal -pose, and crucially does not provide information on the *trajectory* to -follow over time to reach a target pose. Expert-defined trajectories -obviate to this problem providing a length-$K$ succession of goal poses -$\tau_K = [p^*_0, p^*_1, \dots p^*_K]$ for tracking. In practice, -trajectories can also be obtained automatically through *motion -planning* algorithms, thus avoiding expensive trajectory definition from -human experts. However, tracking $\tau_K$ via IK can prove prohibitively -expensive, as tracking would require $K$ resolutions of -eq. [\[eq:ik_problem\]](#eq:ik_problem){reference-type="ref" -reference="eq:ik_problem"} (one for each target pose). *Differential* -inverse kinematics (diff-IK) complements IK via closed-form solution of -a variant of -eq. [\[eq:ik_problem\]](#eq:ik_problem){reference-type="ref" -reference="eq:ik_problem"}. Let $J(q)$ denote the Jacobian matrix of -(partial) derivatives of the FK-function -$f_\FK: \mathcal Q \mapsto \mathcal P$, such that -$J(q) = \frac{\partial f_{FK}(q)}{\partial q }$. Then, one can apply the -chain rule to any $p(q) = f_{\FK}(q)$, deriving $\dot p = J(q) \dot q$, -and thus finally relating variations in the robot configurations to -variations in pose, thereby providing a platform for control. - -Given a desired end-effector trajectory $\targetvel(t)$ (1) indicating -anchor regions in space and (2) how much time to spend in each region, -diff-IK finds $\dot q(t)$ solving for joints' *velocities* instead of -*configurations*, $$\begin{align} -\dot q(t) = \arg\min_\nu \; \lVert J(q(t)) \nu - \targetvel (t) \rVert_2^2 -\label{eq:reg_ik_velocity} -\end{align}$$ - -Unlike eq. [\[eq:ik_problem\]](#eq:ik_problem){reference-type="ref" -reference="eq:ik_problem"}, solving for $\dot q$ is much less dependent -on the environment (typically, variations in velocity are constrained by -physical limits on the actuators). Conveniently, -eq. [\[eq:reg_ik_velocity\]](#eq:reg_ik_velocity){reference-type="ref" -reference="eq:reg_ik_velocity"} also often admits the closed-form -solution $\dot q = J(q)^+ \targetvel$, where $J^+(q)$ denotes the -Moore-Penrose pseudo-inverse of $J(q)$. Finally, discrete-time joint -configurations $q$ can be reconstructed from joint velocities $\dot q$ -using forward-integration on the continuous-time joint velocity , -$q_{t+1} = q_t + \Delta t\,\dot q_t$ for a given $\Delta t$, resulting -in tracking via diff-IK. - -Following trajectories with diff-IK is a valid option in well-controlled -and static environments (e.g., industrial manipulators in controlled -manufacturing settings), and relies on the ability to define a set of -target velocities to track -$[\targetvel_0, \targetvel_1, \dots, \targetvel_k ]$---an error-prone -task largely requiring human expertise. Furthermore, diff-IK relies on -the ability to (1) access $J(q) \, \forall q \in \mathcal Q$ and (2) -compute its pseudo-inverse at every iteration of a given control -cycle---a challenging assumption in highly dynamical settings, or for -complex kinematic chains. - -### Adding Feedback Loops - -While very effective when a goal trajectory has been well specified, the -performance of diff-IK can degrade significantly in the presence of -modeling/tracking errors, or in the presence of non-modeled dynamics in -the environment. - -::: wrapfigure -r0.3 -![image](figures/ch2/ch2-planar-manipulator-floor-box.png){width="\\linewidth"} -::: - -One such case is presented in -Figure [\[fig:planar-manipulator-box-velocity\]](#fig:planar-manipulator-box-velocity){reference-type="ref" -reference="fig:planar-manipulator-box-velocity"}, where another rigid -body other than the manipulator is moving in the environment along the -horizontal axis, with velocity $\dot x_B$. Accounting analytically for -the presence of this disturbance---for instance, to prevent the midpoint -of the link from ever colliding with the object---requires access to -$\dot x_B$ at least, to derive the equation characterizing the motion of -the environment. - -Less predictable disturbances however (e.g., -$\dot x_B \leftarrow \dot x_B + \eps, \eps \sim N(0,1)$) may prove -challenging to model analytically, and one could attain the same result -of preventing link-object collision by adding a condition on the -distance between the midpoint of $l$ and $x_B$, enforced through a -feedback loop on the position of the robot and object at each control -cycle. - -To mitigate the effect of modeling errors, sensing noise and other -disturbances, classical pipelines indeed do augment diff-IK with -feedback control looping back quantities of interest. In practice, -following a trajectory with a closed feedback loop might consist in -backwarding the error between the target and measured pose, -$\Delta p = \targetpos - p(q)$, hereby modifying the control applied to -$\dot q = J(q)^+ (\targetvel + k_p \Delta p )$, with $k_p$ defined as -the (proportional) gain. - -More advanced techniques for control consisting in feedback -linearization, PID control, Linear Quatratic Regulator (LQR) or -Model-Predictive Control (MPC) can be employed to stabilize tracking and -reject moderate perturbations, and we refer to -@sicilianoSpringerHandbookRobotics2016 [Chapter 8] for in-detail -explanation of these concepts, or [@tedrakeRoboticManipulationPerception -Chapter 8] for a simple, intuitive example in the case of a point-mass -system. Nonetheless, feedback control presents its challenges as well: -tuning gains remains laborious and system-specific. Further, -manipulation tasks present intermittent contacts inducing hybrid -dynamics (mode switches) and discontinuities in the Jacobian, -challenging the stability guarantees of the controller and thus often -necessitating rather conservative gains and substantial hand-tuning. - -We point the interested reader to @sicilianoSpringerHandbookRobotics2016 -[Chapter 2,7,8], @lynchModernRoboticsMechanics2017 [Chapter 6,11], -and @tedrakeRoboticManipulationPerception [Chapter 3,8] for extended -coverage of FK, IK, diff-IK and control for (diff-)IK. - -## Limitations of Dynamics-based Robotics - -Despite the last 60+ years of robotics research, autonomous robots are -still largely incapable of performing tasks at human-level performance -in the physical world generalizing across (1) robot embodiments -(different manipulators, different locomotion platforms, etc.) and (2) -tasks (tying shoe-laces, manipulating a diverse set of objects). While -essential in the early development of robotics, the aforementioned -methods require significant human expertise to be used in practice, and -are typically specific to a particular applicative problem. - -![Dynamics-based approaches to robotics suffer from several limitations: -(1) orchestrating multiple components poses integration challenges; (2) -the need to develop custom processing pipelines for the sensing -modalities and tasks considered hinders scalability; (3) simplified -analytical models of physical phenomena (here friction at the gripper; -credits to @antonovaReinforcementLearningPivoting2017) limit real-world -performance. Lastly, (4) dynamics-based methods overlook trends in the -availability and growth of robotics -data.](figures/ch2/ch2-classical-limitations.png){#fig:classical-limitations -width="90%"} - -Dynamics-based robotics pipelines have historically been now within most -architectures for specific purposes. That is, sensing, state estimation, -mapping, planning, (diff-)IK, and low-level control have been -traditionally developed as distinct modules with fixed interfaces. -Pipelining these specific modules proved error-prone, and brittleness -emerges---alongside compounding errors---whenever changes incur (e.g., -changes in lighting for sensing, occlusion/failure of sensors, control -failures). Adapting such a stack to new tasks or robotic platforms often -entails re-specifying objectives, constraints, and heuristics at -multiple stages, incurring significant engineering overhead. - -Moreover, classical planners operate on compact, assumed-sufficient -state representations; extending them to reason directly over raw, -heterogeneous and noisy data streams is non-trivial. This results in a , -as incorporating high-dimensional perceptual inputs (RGB, depth, -tactile, audio) traditionally required extensive engineering efforts to -extract meaningful features for control. Also, the large number of -tasks, coupled with the adoption of *per-task* planners, goal -parameterizations, and safety constraints, results in an explosion in -design and validation options, with little opportunity to reuse -solutions across tasks. - -Setting aside integration and scalability challenges: developing -accurate modeling of contact, friction, and compliance for complicated -systems remains difficult. Rigid-body approximations are often -insufficient in the presence of deformable objects, and of the methods -developed. In the case of complex, time-dependent and/or non-linear -dynamics, even moderate mismatches in parameters, unmodeled evolutions, -or grasp-induced couplings can qualitatively affect the observed -dynamics. - -Lastly, dynamics-based methods (naturally) overlook the rather recent . -The curation of academic datasets by large centralized groups of human -experts in -robotics [@collaborationOpenXEmbodimentRobotic2025; @khazatskyDROIDLargeScaleInTheWild2025] -is now increasingly complemented by a by individuals with varied -expertise. If not tangentially, dynamics-based approaches are not posed -to maximally benefit from this trend, which holds the premise of -allowing generalization in the space of tasks and embodiments, like data -was the cornerstone for advancements in -vision [@alayracFlamingoVisualLanguage2022] and natural-language -understanding [@brownLanguageModelsAre2020]. - -Taken together, these limitations -(Figure [9](#fig:classical-limitations){reference-type="ref" -reference="fig:classical-limitations"}) motivate the exploration of -learning-based approaches that can (1) integrate perception and control -more tightly, (2) adapt across tasks and embodiments with reduced expert -modeling interventions and (3) scale gracefully in performance as more -robotics data becomes available. - -[^1]: In here, we refer to both *kinematics* and *dynamics*-based - control. diff --git a/app/scripts/latex-to-mdx/mdx-converter.mjs b/app/scripts/latex-to-mdx/mdx-converter.mjs index abf25355187f3c4251374af30b4373e48534a5cf..5a0deaf79026bc7e6cdf6af59c7c1b61cbea03fb 100644 --- a/app/scripts/latex-to-mdx/mdx-converter.mjs +++ b/app/scripts/latex-to-mdx/mdx-converter.mjs @@ -124,12 +124,12 @@ function addComponentImports(content) { /** - * Convert grouped figures (subfigures) to MultiImage components + * Convert grouped figures (subfigures) to MultiFigure components * @param {string} content - MDX content - * @returns {string} - Content with MultiImage components for grouped figures + * @returns {string} - Content with MultiFigure components for grouped figures */ -function convertSubfiguresToMultiImage(content) { - console.log(' 🖼️✨ Converting subfigures to MultiImage components...'); +function convertSubfiguresToMultiFigure(content) { + console.log(' 🖼️✨ Converting subfigures to MultiFigure components...'); let convertedCount = 0; @@ -187,8 +187,8 @@ function convertSubfiguresToMultiImage(content) { .replace(/'/g, "\\'") .trim(); - // Mark MultiImage component as used - usedComponents.add('MultiImage'); + // Mark MultiFigure component as used + usedComponents.add('MultiFigure'); // Determine layout based on number of images let layout = 'auto'; @@ -196,12 +196,12 @@ function convertSubfiguresToMultiImage(content) { else if (images.length === 3) layout = '3-column'; else if (images.length === 4) layout = '4-column'; - // Generate MultiImage component + // Generate MultiFigure component const imagesJson = images.map(img => ` {\n src: ${img.src},\n alt: "${img.alt}",\n caption: "${img.caption}",\n id: "${img.id}"\n }` ).join(',\n'); - return ` 0) { - console.log(` ✅ Converted ${convertedCount} subfigure group(s) to MultiImage component(s)`); + console.log(` ✅ Converted ${convertedCount} subfigure group(s) to MultiFigure component(s)`); } else { console.log(' ℹ️ No subfigure groups found'); } @@ -222,23 +222,23 @@ ${imagesJson} } /** - * Transform images to ResponsiveImage components + * Transform images to Figure components * @param {string} content - MDX content - * @returns {string} - Content with ResponsiveImage components + * @returns {string} - Content with Figure components */ /** - * Create ResponsiveImage component with import + * Create Figure component with import * @param {string} src - Clean image source * @param {string} alt - Alt text * @param {string} id - Element ID * @param {string} caption - Figure caption * @param {string} width - Optional width - * @returns {string} - ResponsiveImage component markup + * @returns {string} - Figure component markup */ -function createResponsiveImageComponent(src, alt = '', id = '', caption = '', width = '') { +function createFigureComponent(src, alt = '', id = '', caption = '', width = '') { const varName = generateImageVarName(src); imageImports.set(src, varName); - usedComponents.add('ResponsiveImage'); + usedComponents.add('Figure'); const props = []; props.push(`src={${varName}}`); @@ -249,11 +249,11 @@ function createResponsiveImageComponent(src, alt = '', id = '', caption = '', wi if (alt) props.push(`alt="${alt}"`); if (caption) props.push(`caption={'${caption}'}`); - return ``; + return ``; } function transformImages(content) { - console.log(' 🖼️ Transforming images to ResponsiveImage components with imports...'); + console.log(' 🖼️ Transforming images to Figure components with imports...'); let hasImages = false; @@ -297,7 +297,7 @@ function transformImages(content) { const altText = cleanAltText(cleanCap); hasImages = true; - return createResponsiveImageComponent(cleanSrc, altText, id, cleanCap); + return createFigureComponent(cleanSrc, altText, id, cleanCap); } ); @@ -309,7 +309,7 @@ function transformImages(content) { const cleanAlt = cleanAltText(alt || 'Figure'); hasImages = true; - return createResponsiveImageComponent(cleanSrc, cleanAlt); + return createFigureComponent(cleanSrc, cleanAlt); } ); @@ -320,7 +320,7 @@ function transformImages(content) { const cleanSrc = cleanSrcPath(src); hasImages = true; - return createResponsiveImageComponent(cleanSrc, 'Figure'); + return createFigureComponent(cleanSrc, 'Figure'); } ); @@ -333,7 +333,7 @@ function transformImages(content) { const altText = cleanAltText(cleanCap); hasImages = true; - return createResponsiveImageComponent(cleanSrc, altText, id, cleanCap); + return createFigureComponent(cleanSrc, altText, id, cleanCap); } ); @@ -346,7 +346,7 @@ function transformImages(content) { const altText = cleanAltText(cleanCap); hasImages = true; - return createResponsiveImageComponent(cleanSrc, altText, id, cleanCap); + return createFigureComponent(cleanSrc, altText, id, cleanCap); } ); @@ -364,12 +364,12 @@ function transformImages(content) { if (idMatch) id = idMatch[1]; } - return createResponsiveImageComponent(cleanSrc, cleanAlt, id); + return createFigureComponent(cleanSrc, cleanAlt, id); } ); if (hasImages) { - console.log(' ✅ ResponsiveImage components with imports will be created'); + console.log(' ✅ Figure components with imports will be created'); } return content; @@ -822,7 +822,7 @@ function processMdxContent(content, latexContent = '') { processedContent = formatDisplayMathBlocks(processedContent); processedContent = removeHtmlComments(processedContent); processedContent = cleanMdxSyntax(processedContent); - processedContent = convertSubfiguresToMultiImage(processedContent); + processedContent = convertSubfiguresToMultiFigure(processedContent); processedContent = transformImages(processedContent); processedContent = transformStyledSpans(processedContent); processedContent = transformReferenceLinks(processedContent); diff --git a/app/scripts/latex-to-mdx/post-processor.mjs b/app/scripts/latex-to-mdx/post-processor.mjs index 390014cc994624c899207e3ba0ddebbcb78d75de..c108c173957c93412672add2199f978ad73ab73f 100644 --- a/app/scripts/latex-to-mdx/post-processor.mjs +++ b/app/scripts/latex-to-mdx/post-processor.mjs @@ -263,7 +263,7 @@ function fixAllAttributes(content) { return `data-reference="${before}-${after}"`; }); - // Fix id attributes containing colons (like in ResponsiveImage components) + // Fix id attributes containing colons (like in Figure components) content = content.replace(/id="([^"]*):([^"]*)"/g, (match, before, after) => { fixedCount++; return `id="${before}-${after}"`; diff --git a/app/scripts/sync-template.mjs b/app/scripts/sync-template.mjs index 71197b3ea881be51f96f9fd7d94a4d05d1a089f1..949702ed5f27f5b1c673641bfb2bbde0e3bb0457 100644 --- a/app/scripts/sync-template.mjs +++ b/app/scripts/sync-template.mjs @@ -28,7 +28,7 @@ const PRESERVE_PATHS = [ // Project-specific content 'app/src/content', - // Public data (symlink to our data) + // Public data (symlink to our data) - CRITICAL: preserve this symlink 'app/public/data', // Local configuration @@ -75,7 +75,7 @@ console.log(''); async function executeCommand(command, options = {}) { try { if (isDryRun && !options.allowInDryRun) { - console.log(`[DRY-RUN] Commande: ${command}`); + console.log(`[DRY-RUN] Command: ${command}`); return ''; } console.log(`$ ${command}`); @@ -141,7 +141,20 @@ async function syncFile(sourcePath, targetPath) { } } - // Créer un backup si le fichier existe déjà (et que ce n'est pas un lien symbolique) + // Check if target file is a symbolic link to preserve + if (await pathExists(targetPath)) { + try { + const targetStats = await fs.lstat(targetPath); + if (targetStats.isSymbolicLink()) { + console.log(`🔗 SYMLINK TARGET (preserved): ${relativeTarget}`); + return; + } + } catch (error) { + console.warn(`⚠️ Impossible de vérifier ${targetPath}: ${error.message}`); + } + } + + // Create backup if file already exists (and is not a symbolic link) if (await pathExists(targetPath)) { try { const stats = await fs.lstat(targetPath); @@ -161,11 +174,11 @@ async function syncFile(sourcePath, targetPath) { // Assurer que le répertoire parent existe await fs.mkdir(path.dirname(targetPath), { recursive: true }); - // Vérifier si la source est un lien symbolique + // Check if source is a symbolic link try { const sourceStats = await fs.lstat(sourcePath); if (sourceStats.isSymbolicLink()) { - console.log(`🔗 SYMLINK (ignored): ${relativeTarget}`); + console.log(`🔗 SYMLINK SOURCE (ignored): ${relativeTarget}`); return; } } catch (error) { @@ -173,7 +186,7 @@ async function syncFile(sourcePath, targetPath) { return; } - // Supprimer le fichier cible s'il existe (pour gérer les liens symboliques) + // Remove target file if it exists (to handle symbolic links) if (await pathExists(targetPath)) { await fs.rm(targetPath, { recursive: true, force: true }); } @@ -212,19 +225,55 @@ async function cloneOrUpdateTemplate() { // Nettoyer le dossier temporaire s'il existe if (await pathExists(TEMP_DIR)) { - if (!isDryRun) { - await fs.rm(TEMP_DIR, { recursive: true, force: true }); - } else { + await fs.rm(TEMP_DIR, { recursive: true, force: true }); + if (isDryRun) { console.log(`[DRY-RUN] Suppression: ${TEMP_DIR}`); } } - // Cloner le repo template (même en dry-run pour pouvoir comparer) + // Clone template repo (even in dry-run to be able to compare) await executeCommand(`git clone ${TEMPLATE_REPO} "${TEMP_DIR}"`, { allowInDryRun: true }); return TEMP_DIR; } +async function ensureDataSymlink() { + const dataSymlinkPath = path.join(APP_ROOT, 'public', 'data'); + const dataSourcePath = path.join(APP_ROOT, 'src', 'content', 'assets', 'data'); + + // Check if symlink exists and is correct + if (await pathExists(dataSymlinkPath)) { + try { + const stats = await fs.lstat(dataSymlinkPath); + if (stats.isSymbolicLink()) { + const target = await fs.readlink(dataSymlinkPath); + const expectedTarget = path.relative(path.dirname(dataSymlinkPath), dataSourcePath); + if (target === expectedTarget) { + console.log('🔗 Data symlink is correct'); + return; + } else { + console.log(`⚠️ Data symlink points to wrong target: ${target} (expected: ${expectedTarget})`); + } + } else { + console.log('⚠️ app/public/data exists but is not a symlink'); + } + } catch (error) { + console.log(`⚠️ Error checking symlink: ${error.message}`); + } + } + + // Recreate symlink + if (!isDryRun) { + if (await pathExists(dataSymlinkPath)) { + await fs.rm(dataSymlinkPath, { recursive: true, force: true }); + } + await fs.symlink(path.relative(path.dirname(dataSymlinkPath), dataSourcePath), dataSymlinkPath); + console.log('✅ Data symlink recreated'); + } else { + console.log('[DRY-RUN] Would recreate data symlink'); + } +} + async function showSummary(templateDir) { console.log('\n📊 SYNCHRONIZATION SUMMARY'); console.log('================================'); @@ -265,19 +314,23 @@ async function cleanup() { async function main() { try { - // Vérifier qu'on est dans le bon répertoire + // Verify we're in the correct directory const packageJsonPath = path.join(APP_ROOT, 'package.json'); if (!(await pathExists(packageJsonPath))) { - throw new Error(`Package.json non trouvé dans ${APP_ROOT}. Êtes-vous dans le bon répertoire ?`); + throw new Error(`Package.json not found in ${APP_ROOT}. Are you in the correct directory?`); } - // Cloner le template + // Clone the template const templateDir = await cloneOrUpdateTemplate(); // Synchroniser console.log('\n🔄 Synchronisation en cours...'); await syncDirectory(templateDir, PROJECT_ROOT); + // S'assurer que le lien symbolique des données est correct + console.log('\n🔗 Vérification du lien symbolique des données...'); + await ensureDataSymlink(); + // Afficher le résumé await showSummary(templateDir); @@ -292,7 +345,7 @@ async function main() { } } -// Gestion des signaux pour nettoyer en cas d'interruption +// Signal handling to clean up on interruption process.on('SIGINT', async () => { console.log('\n\n⚠️ Interruption detected, cleaning up...'); await cleanup(); diff --git a/app/src/components/ColorPicker.astro b/app/src/components/ColorPicker.astro deleted file mode 100644 index b728b8c0b940d556d642561e65c7a084333b7ab9..0000000000000000000000000000000000000000 --- a/app/src/components/ColorPicker.astro +++ /dev/null @@ -1,118 +0,0 @@ ---- ---- -
- -
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Hue
-
-
-
-
337°
-
-
-
-
- - - diff --git a/app/src/components/ResponsiveImage.astro b/app/src/components/Figure.astro similarity index 92% rename from app/src/components/ResponsiveImage.astro rename to app/src/components/Figure.astro index 1632761635bebb300936902ca7ac09a30ad51eb2..1535848ace4d3d7b5c3b9d3bf72b19836e6d7ebd 100644 --- a/app/src/components/ResponsiveImage.astro +++ b/app/src/components/Figure.astro @@ -175,13 +175,10 @@ const resolvedRel = hasLink ? linkRel || "noopener noreferrer" : undefined; - - - diff --git a/app/src/components/Sidenote.astro b/app/src/components/Sidenote.astro index 1a7805fe030b6ed97975667c003c8674b8204e85..ea3cc86be616bdf37aa4f55ad6690af512a830b4 100644 --- a/app/src/components/Sidenote.astro +++ b/app/src/components/Sidenote.astro @@ -13,27 +13,27 @@ const containers = document.querySelectorAll(".sidenote-container"); containers.forEach((container) => { - // Trouve l'élément précédent (frère juste avant) + // Find the previous element (sibling just before) const previousElement = container.previousElementSibling; if (previousElement) { - // Rend le conteneur de la sidenote relatif à l'élément précédent + // Make the sidenote container relative to the previous element previousElement.style.position = "relative"; - // Déplace le conteneur de la sidenote comme enfant de l'élément précédent + // Move the sidenote container as child of the previous element previousElement.appendChild(container); - // Style le conteneur pour qu'il se positionne correctement + // Style the container so it positions correctly container.style.position = "absolute"; container.style.top = "0"; container.style.right = "-292px"; // 260px width + 32px gap container.style.width = "260px"; - // Affiche le container avec un fade-in + // Display the container with a fade-in container.style.display = "block"; container.style.opacity = "0"; - // Fade-in avec transition + // Fade-in with transition setTimeout(() => { container.style.opacity = "1"; }, 10); @@ -46,8 +46,8 @@ .sidenote-container { /* Caché par défaut, sera affiché par JS */ display: none; - margin: 12px 0; - /* Transition pour le fade-in */ + margin: 0 ; + /* Transition for fade-in */ transition: opacity 0.3s ease-in-out; } diff --git a/app/src/components/demo/ColorPicker.astro b/app/src/components/demo/ColorPicker.astro new file mode 100644 index 0000000000000000000000000000000000000000..c26e355ff005d3453246acb1db96cbc234e6c065 --- /dev/null +++ b/app/src/components/demo/ColorPicker.astro @@ -0,0 +1,633 @@ +--- + +--- + +
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Hue
+
+
+
+
214°
+
+
+
+
+ diff --git a/app/src/components/demo/Palettes.astro b/app/src/components/demo/Palettes.astro new file mode 100644 index 0000000000000000000000000000000000000000..2aabeda6ffa7b0af45799e105b04e4da5eb60060 --- /dev/null +++ b/app/src/components/demo/Palettes.astro @@ -0,0 +1,596 @@ +--- +const rootId = `palettes-${Math.random().toString(36).slice(2)}`; +--- + +
+ +
+
+ + +
+
+
+ + 8 +
+
+ +
+
+
+
+
+ +
+
+ diff --git a/app/src/components/trackio/Trackio.svelte b/app/src/components/trackio/Trackio.svelte index 63805331926bfff1ab7bc59b140d2c276cad02be..309e945f002482503aaed8c39e43337dbd0b07d1 100644 --- a/app/src/components/trackio/Trackio.svelte +++ b/app/src/components/trackio/Trackio.svelte @@ -1,14 +1,20 @@
- { const run = e?.detail?.name; if (!run) return; ghostRun(run); }} on:legend-leave={() => { clearGhost(); }} /> + { + const run = e?.detail?.name; + if (!run) return; + ghostRun(run); + }} + on:legend-leave={() => { + clearGhost(); + }} + />
{#each cellsDef as c, i} - colorsByRun[name] || '#999'} + colorsByRun[name] || "#999"} {hostEl} currentIndex={i} onOpenModal={openModal} @@ -467,9 +638,17 @@
@@ -477,7 +656,7 @@ - - diff --git a/app/src/components/TrackioWrapper.astro b/app/src/components/trackio/TrackioWrapper.astro similarity index 57% rename from app/src/components/TrackioWrapper.astro rename to app/src/components/trackio/TrackioWrapper.astro index fc5eae5d0113a30ad81dc6047a68c9a81b710f81..f8ac691cf261f0aa5fae6a3df2e5f8193853191a 100644 --- a/app/src/components/TrackioWrapper.astro +++ b/app/src/components/trackio/TrackioWrapper.astro @@ -1,12 +1,15 @@ --- // TrackioWrapper.astro -import Trackio from './trackio/Trackio.svelte'; +import Trackio from "./Trackio.svelte"; --- - - - + + +
@@ -20,11 +23,11 @@ import Trackio from './trackio/Trackio.svelte';
@@ -33,15 +36,24 @@ import Trackio from './trackio/Trackio.svelte'; - -
- +
@@ -49,245 +61,284 @@ import Trackio from './trackio/Trackio.svelte'; \ No newline at end of file diff --git a/app/src/content/embeds/arxiv/fetch_arxiv_api.py b/app/src/content/embeds/arxiv/fetch_arxiv_api.py new file mode 100644 index 0000000000000000000000000000000000000000..18b3a9f8a651ae9e5313a02b2356e0c87db75d90 --- /dev/null +++ b/app/src/content/embeds/arxiv/fetch_arxiv_api.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python3 +""" +Script to retrieve papers from the arXiv API +Optimized for natural representation of scientific domains +""" + +import requests +import xml.etree.ElementTree as ET +import json +import time +import os +from urllib.parse import quote +from datetime import datetime, timedelta +from collections import Counter +import random + +class ArxivFetcher: + def __init__(self): + self.base_url = "http://export.arxiv.org/api/query" + self.delay = 3 # Delay between requests (respecting API limits) + + def fetch_by_category(self, categories, max_per_category=500, total_max=15000): + """Retrieve papers by category with global limit""" + print(f"🔍 Retrieval by category (max {max_per_category} per cat, {total_max} total)") + + all_papers = [] + + for i, category in enumerate(categories): + if len(all_papers) >= total_max: + break + + print(f" [{i+1}/{len(categories)}] {category}...") + + # Dynamic calculation of number to retrieve + remaining = total_max - len(all_papers) + fetch_count = min(max_per_category, remaining) + + papers = self._fetch_category(category, fetch_count) + all_papers.extend(papers) + + print(f" ✅ {len(papers)} papers retrieved (total: {len(all_papers)})") + + # Delay between categories + if i < len(categories) - 1: + time.sleep(self.delay) + + return all_papers[:total_max] + + def fetch_recent_papers(self, days_back=30, max_results=15000): + """Retrieve recent papers from the last days""" + print(f"📅 Retrieving papers from the last {days_back} days") + + # End date: today + end_date = datetime.now() + # Start date: X days ago + start_date = end_date - timedelta(days=days_back) + + # Format arXiv: YYYYMMDDHHMM + date_query = f"submittedDate:[{start_date.strftime('%Y%m%d%H%M')} TO {end_date.strftime('%Y%m%d%H%M')}]" + + return self._fetch_with_query(date_query, max_results) + + def _fetch_category(self, category, max_results): + """Retrieve papers from a specific category""" + query = f"cat:{category}" + return self._fetch_with_query(query, max_results) + + def _fetch_with_query(self, query, max_results): + """Generic method to retrieve with a query""" + papers = [] + start = 0 + batch_size = min(1000, max_results) # arXiv limits to 1000 per request + + while len(papers) < max_results: + remaining = max_results - len(papers) + current_batch = min(batch_size, remaining) + + params = { + 'search_query': query, + 'start': start, + 'max_results': current_batch, + 'sortBy': 'submittedDate', + 'sortOrder': 'descending' + } + + try: + response = requests.get(self.base_url, params=params, timeout=30) + response.raise_for_status() + + batch_papers = self._parse_response(response.text) + if not batch_papers: + print(f" ⚠️ No results for start={start}") + break + + papers.extend(batch_papers) + start += len(batch_papers) + + print(f" 📄 Batch {len(batch_papers)} papers (total: {len(papers)})") + + # Delay between requests + time.sleep(self.delay) + + except Exception as e: + print(f" ❌ Error: {e}") + break + + return papers[:max_results] + + def _parse_response(self, xml_content): + """Parse arXiv XML response""" + papers = [] + + try: + root = ET.fromstring(xml_content) + + # arXiv Namespace + ns = {'atom': 'http://www.w3.org/2005/Atom', + 'arxiv': 'http://arxiv.org/schemas/atom'} + + entries = root.findall('atom:entry', ns) + + for entry in entries: + try: + # ID arXiv + arxiv_id = entry.find('atom:id', ns).text.split('/')[-1] + + # Titre + title = entry.find('atom:title', ns).text.strip() + title = ' '.join(title.split()) # Clean spaces + + # Résumé + summary = entry.find('atom:summary', ns).text.strip() + summary = ' '.join(summary.split())[:500] # Limit size + + # Auteurs + authors = [] + for author in entry.findall('atom:author', ns): + name = author.find('atom:name', ns) + if name is not None: + authors.append(name.text.strip()) + + # Catégories + categories = [] + primary_category = None + + for category in entry.findall('atom:category', ns): + term = category.get('term') + if term: + categories.append(term) + + # Primary category + primary_cat = entry.find('arxiv:primary_category', ns) + if primary_cat is not None: + primary_category = primary_cat.get('term') + elif categories: + primary_category = categories[0] + + # Publication date + published = entry.find('atom:published', ns) + published_date = published.text if published is not None else None + + paper = { + 'id': arxiv_id, + 'title': title, + 'summary': summary, + 'authors': authors, + 'categories': categories, + 'primary_category': primary_category, + 'published': published_date + } + + papers.append(paper) + + except Exception as e: + print(f" ⚠️ Error parsing entry: {e}") + continue + + except ET.ParseError as e: + print(f"❌ XML parsing error: {e}") + + return papers + +def save_papers(papers, filename): + """Save papers to JSON""" + with open(filename, 'w', encoding='utf-8') as f: + json.dump(papers, f, indent=2, ensure_ascii=False) + + size_mb = os.path.getsize(filename) / 1024 / 1024 + print(f"💾 Saved: {filename} ({len(papers)} papers, {size_mb:.1f} MB)") + +def main(): + """Main arXiv data retrieval""" + print("🚀 ArXiv Data Fetcher - Version Optimisée") + print("=" * 50) + + fetcher = ArxivFetcher() + + # Simple approach: 1 month of recent data + print("\n📅 SIMPLE APPROACH: 1 month of recent data") + print("🎯 Objective: retrieve everything available from the last month") + print("⚡ Without representativeness constraint - just natural data") + + # Try with different periods to find data + monthly_papers = None + for days in [30, 60, 90, 120]: # 1, 2, 3, 4 months + print(f"\n🔍 Attempt: {days} days...") + monthly_papers = fetcher.fetch_recent_papers(days_back=days, max_results=15000) + if monthly_papers and len(monthly_papers) > 1000: + print(f"✅ {len(monthly_papers)} papers found over {days} days") + break + elif monthly_papers: + print(f"⚠️ Only {len(monthly_papers)} papers over {days} days") + else: + print(f"❌ No papers found over {days} days") + + if not monthly_papers: + print("\n🔄 Fallback: retrieval by popular categories") + # If no recent data, just take popular categories + popular_categories = [ + 'cs.LG', 'cs.AI', 'cs.CV', 'cs.CL', 'cs.CR', 'cs.RO', 'cs.HC', + 'physics.comp-ph', 'physics.data-an', 'physics.optics', + 'math.ST', 'math.NA', 'math.OC', 'math.PR', + 'stat.ML', 'stat.ME', 'stat.AP', + 'eess.AS', 'eess.IV', 'eess.SP', + 'q-bio.QM', 'q-bio.BM', 'astro-ph.CO' + ] + + monthly_papers = fetcher.fetch_by_category( + categories=popular_categories, + max_per_category=500, + total_max=15000 + ) + + if monthly_papers: + save_papers(monthly_papers, "arxiv_monthly_papers.json") + + # Statistiques finales + from collections import Counter + + # Check paper structure + sample_keys = list(monthly_papers[0].keys()) if monthly_papers else [] + category_key = 'primary_category' if 'primary_category' in sample_keys else 'categories' + + domains = [] + for paper in monthly_papers: + if category_key in paper: + cat = paper[category_key] + if isinstance(cat, list) and cat: + domains.append(cat[0].split('.')[0]) + elif isinstance(cat, str): + domains.append(cat.split('.')[0]) + + domain_counts = Counter(domains) + + print(f"\n📊 Natural distribution ({len(monthly_papers)} papers):") + for domain, count in domain_counts.most_common(): + percentage = count / len(monthly_papers) * 100 + print(f" {domain}: {count} papers ({percentage:.1f}%)") + else: + print("❌ Complete retrieval failure") + + print("\n🎉 Retrieval completed!") + print("📁 Files created:") + for filename in ["arxiv_monthly_papers.json"]: + if os.path.exists(filename): + size = os.path.getsize(filename) / 1024 / 1024 # MB + print(f" - {filename} ({size:.1f} MB)") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/app/src/content/embeds/arxiv/generate_umap.py b/app/src/content/embeds/arxiv/generate_umap.py new file mode 100644 index 0000000000000000000000000000000000000000..23e43a76394a43d62abaac029f8d59776b59fd67 --- /dev/null +++ b/app/src/content/embeds/arxiv/generate_umap.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +UMAP Generator for arXiv papers +Creates 2D and 3D projections with density-weighted centroids +""" + +import json +import numpy as np +import pandas as pd +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.decomposition import TruncatedSVD +import umap +import os +import shutil +from datetime import datetime +from collections import Counter + +def load_papers(filename="arxiv_monthly_papers.json"): + """Load papers from JSON file""" + if not os.path.exists(filename): + print(f"❌ File {filename} not found!") + print("💡 Run fetch_arxiv_api.py first") + return None + + with open(filename, 'r', encoding='utf-8') as f: + papers = json.load(f) + + print(f"📚 {len(papers)} papers loaded from {filename}") + return papers + +def preprocess_papers(papers, sample_rate=5): + """Preprocess papers and sample if necessary""" + print(f"🔄 Preprocessing papers...") + + # Filter papers with missing data + valid_papers = [] + for paper in papers: + if (paper.get('title') and + paper.get('summary') and + paper.get('primary_category')): + valid_papers.append(paper) + + print(f"✅ {len(valid_papers)} valid papers after filtering") + + # Sampling for performance (1 out of N) + if sample_rate > 1: + sampled_papers = valid_papers[::sample_rate] + print(f"📊 Sampling 1/{sample_rate}: {len(sampled_papers)} papers retained") + return sampled_papers + + return valid_papers + +def create_embeddings(papers, max_features=5000, n_components=50): + """Create TF-IDF + SVD embeddings of papers""" + print(f"🔢 Creating embeddings (max_features={max_features}, n_components={n_components})") + + # Combine title and summary + texts = [] + for paper in papers: + title = paper.get('title', '').strip() + summary = paper.get('summary', '').strip() + combined = f"{title} {summary}" + texts.append(combined) + + # TF-IDF + print(" 📝 TF-IDF vectorization...") + tfidf = TfidfVectorizer( + max_features=max_features, + stop_words='english', + ngram_range=(1, 2), + min_df=2, + max_df=0.95 + ) + + tfidf_matrix = tfidf.fit_transform(texts) + print(f" ✅ TF-IDF: {tfidf_matrix.shape}") + + # Dimensionality reduction with SVD + print(f" 🔄 SVD reduction to {n_components} dimensions...") + svd = TruncatedSVD(n_components=n_components, random_state=42) + embeddings = svd.fit_transform(tfidf_matrix) + + print(f" ✅ Final embeddings: {embeddings.shape}") + print(f" 📊 Explained variance: {svd.explained_variance_ratio_.sum():.3f}") + + return embeddings + +def map_to_families(papers): + """Map categories to 9 main scientific families""" + + # Mapping to 9 scientific families + domain_to_family = { + 'cs': 'Computer Science', + 'math': 'Mathematics', + 'physics': 'Physics', + 'stat': 'Statistics', + 'q-bio': 'Biology', + 'eess': 'Engineering', + 'astro-ph': 'Astrophysics', + 'cond-mat': 'Condensed Matter', + 'nucl': 'Nuclear Physics' + } + + families = [] + for paper in papers: + primary_cat = paper.get('primary_category', '') + if primary_cat: + domain = primary_cat.split('.')[0] + family = domain_to_family.get(domain, 'Other') + else: + family = 'Other' + families.append(family) + + family_counts = Counter(families) + print(f"📊 Distribution by family:") + for family, count in family_counts.most_common(): + print(f" {family}: {count} papers") + + return families + +def generate_umap_projection(embeddings, families, n_neighbors=50, min_dist=0.1, spread=0.5, n_components=2): + """Generate UMAP projection""" + print(f"🎯 UMAP projection (n_neighbors={n_neighbors}, min_dist={min_dist}, spread={spread}, n_components={n_components})") + + # Configuration UMAP + reducer = umap.UMAP( + n_neighbors=n_neighbors, + min_dist=min_dist, + spread=spread, + n_components=n_components, + random_state=42, + metric='cosine' + ) + + # Projection + projection = reducer.fit_transform(embeddings) + print(f"✅ Projection UMAP: {projection.shape}") + + return projection + +def calculate_density_weighted_centroids(projection, families, families_list): + """Calculate density-weighted centroids""" + print("🎯 Calculating density-weighted centroids...") + + centroids = {} + + for family in families_list: + # Points of this family + family_mask = np.array(families) == family + family_points = projection[family_mask] + + if len(family_points) < 30: # Filter families too small + continue + + if projection.shape[1] == 2: # 2D + # Calculate 2D density + densities = [] + for point in family_points: + distances = np.linalg.norm(family_points - point, axis=1) + density = np.sum(distances < np.percentile(distances, 20)) # Local density + densities.append(density) + + densities = np.array(densities) + weights = densities / densities.sum() + + # Weighted centroid + centroid_x = np.sum(family_points[:, 0] * weights) + centroid_y = np.sum(family_points[:, 1] * weights) + + centroids[family] = { + 'x': float(centroid_x), + 'y': float(centroid_y), + 'count': len(family_points) + } + + else: # 3D + # Calculate 3D density + densities = [] + for point in family_points: + distances = np.linalg.norm(family_points - point, axis=1) + density = np.sum(distances < np.percentile(distances, 20)) + densities.append(density) + + densities = np.array(densities) + weights = densities / densities.sum() + + # Weighted centroid + centroid_x = np.sum(family_points[:, 0] * weights) + centroid_y = np.sum(family_points[:, 1] * weights) + centroid_z = np.sum(family_points[:, 2] * weights) + + centroids[family] = { + 'x': float(centroid_x), + 'y': float(centroid_y), + 'z': float(centroid_z), + 'count': len(family_points) + } + + print(f"✅ {len(centroids)} centroids calculated") + return centroids + +def save_visualization_data(papers, projection, families, centroids, output_prefix): + """Save visualization data""" + + # Prepare data + viz_data = [] + for i, paper in enumerate(papers): + if projection.shape[1] == 2: # 2D + point = { + 'id': paper.get('id', f'paper_{i}'), + 'title': paper.get('title', ''), + 'summary': paper.get('summary', '')[:200] + '...', + 'authors': ', '.join(paper.get('authors', [])[:3]), # Max 3 authors + 'category': paper.get('primary_category', ''), + 'family': families[i], + 'x': float(projection[i, 0]), + 'y': float(projection[i, 1]) + } + else: # 3D + point = { + 'id': paper.get('id', f'paper_{i}'), + 'title': paper.get('title', ''), + 'summary': paper.get('summary', '')[:200] + '...', + 'authors': ', '.join(paper.get('authors', [])[:3]), + 'category': paper.get('primary_category', ''), + 'family': families[i], + 'x': float(projection[i, 0]), + 'y': float(projection[i, 1]), + 'z': float(projection[i, 2]) + } + viz_data.append(point) + + # Add centroids + viz_data_with_centroids = { + 'points': viz_data, + 'centroids': centroids, + 'metadata': { + 'total_papers': len(papers), + 'dimensions': projection.shape[1], + 'families': list(set(families)), + 'generated': datetime.now().isoformat() + } + } + + # Save + output_file = f"{output_prefix}.json" + with open(output_file, 'w', encoding='utf-8') as f: + json.dump(viz_data_with_centroids, f, indent=2, ensure_ascii=False) + + size_mb = os.path.getsize(output_file) / 1024 / 1024 + print(f"💾 Data saved: {output_file} ({size_mb:.1f} MB)") + + return output_file + +def main(): + """Main UMAP generation pipeline""" + print("🚀 ArXiv UMAP Generator") + print("=" * 40) + + # 1. Data loading + papers = load_papers() + if not papers: + return + + # 2. Preprocessing + papers = preprocess_papers(papers, sample_rate=5) # 1 point out of 5 + + # 3. Mapping to families + families = map_to_families(papers) + families_list = list(set(families)) + + # 4. Embedding creation + embeddings = create_embeddings(papers, max_features=3000, n_components=50) + + # 5. UMAP projection generation + + # UMAP 2D + print("\n🎯 Generating 2D UMAP...") + projection_2d = generate_umap_projection( + embeddings, families, + n_neighbors=50, min_dist=0.8, spread=1.0, n_components=2 + ) + + centroids_2d = calculate_density_weighted_centroids(projection_2d, families, families_list) + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + output_2d = save_visualization_data( + papers, projection_2d, families, centroids_2d, + f"arxiv_umap_viz_2d_{timestamp}" + ) + + # UMAP 3D + print("\n🎯 Generating 3D UMAP...") + projection_3d = generate_umap_projection( + embeddings, families, + n_neighbors=50, min_dist=0.8, spread=1.0, n_components=3 + ) + + centroids_3d = calculate_density_weighted_centroids(projection_3d, families, families_list) + + output_3d = save_visualization_data( + papers, projection_3d, families, centroids_3d, + f"arxiv_umap_viz_3d_{timestamp}" + ) + + # Automatic copy to content/assets/data + import shutil + source_file = output_2d # Use 2D by default + target_dir = "../../assets/data" + target_file = os.path.join(target_dir, "data.json") + + try: + # Create directory if necessary + os.makedirs(target_dir, exist_ok=True) + shutil.copy2(source_file, target_file) + print(f"\n✅ AUTOMATIC COPY SUCCESSFUL!") + print(f"📁 {source_file} → {target_file}") + except Exception as e: + print(f"\n⚠️ Automatic copy failed: {e}") + + print(f"\n🎉 Generation completed!") + print(f"📁 Files created:") + for f in [output_2d, output_3d]: + if os.path.exists(f): + size = os.path.getsize(f) / 1024 / 1024 + print(f" - {f} ({size:.1f} MB)") + +if __name__ == "__main__": + main() diff --git a/app/src/content/embeds/banner.html b/app/src/content/embeds/banner.html index 1ad415ed788791dcb7eec078aea3fda6d4d8907c..37f6caa91500ddb8b43cf937b24d572f3affeb6b 100644 --- a/app/src/content/embeds/banner.html +++ b/app/src/content/embeds/banner.html @@ -1,267 +1,258 @@
+ // First render + resize + if (window.ResizeObserver) { + const ro = new ResizeObserver(() => render()); + ro.observe(container); + } else { + window.addEventListener('resize', render); + } + render(); + }; + if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', () => ensureD3(bootstrap), { once: true }); + } else { ensureD3(bootstrap); } + })(); + \ No newline at end of file diff --git a/app/src/content/embeds/d3-bar.html b/app/src/content/embeds/d3-bar.html index 15a6766e601399392e2192b14c7f21f751e76d35..b5d311bc2a6aa58cf80996ffd27f6e1e694d3243 100644 --- a/app/src/content/embeds/d3-bar.html +++ b/app/src/content/embeds/d3-bar.html @@ -1,51 +1,195 @@ -
+
+ \ No newline at end of file diff --git a/app/src/content/embeds/d3-line-quad.html b/app/src/content/embeds/d3-line-quad.html index 7e1c77eb7923636d60c7263c83a0a703d14ac733..b7e275ca7e6f83b15f7d6b7c6dd83a3bdcd2bd2e 100644 --- a/app/src/content/embeds/d3-line-quad.html +++ b/app/src/content/embeds/d3-line-quad.html @@ -1,74 +1,190 @@
- +
-
+
+
- - - + \ No newline at end of file diff --git a/app/src/content/embeds/d3-matrix.html b/app/src/content/embeds/d3-matrix.html index c72ef203e5c43f0c878463d2d6b2e2da382db2cf..0d76ac4dbde93deb809cec26e1d65ebccc460227 100644 --- a/app/src/content/embeds/d3-matrix.html +++ b/app/src/content/embeds/d3-matrix.html @@ -1,18 +1,21 @@ -
+
- - - + \ No newline at end of file diff --git a/app/src/content/embeds/d3-neural-network.html b/app/src/content/embeds/d3-neural-network.html index c64f407c4432b2fe6c75590f18e6110ba9a11797..6ee10b2e538592185ff863262c279155c372bbc2 100644 --- a/app/src/content/embeds/d3-neural-network.html +++ b/app/src/content/embeds/d3-neural-network.html @@ -1,33 +1,172 @@
- - - - + \ No newline at end of file diff --git a/app/src/content/embeds/d3-pie-quad.html b/app/src/content/embeds/d3-pie-quad.html index a596e86e399191f3066b0ba5d4f8576a9b64d0fa..314d16e9870f8a4a4845e2e0257221d5fa643175 100644 --- a/app/src/content/embeds/d3-pie-quad.html +++ b/app/src/content/embeds/d3-pie-quad.html @@ -21,7 +21,7 @@ .d3-pie-quad.hovering .slice.ghost { opacity: .25; } - /* Layout HTML (pas JS) pour la grille et les cellules */ + /* HTML layout (not JS) for grid and cells */ .d3-pie-quad .plots-grid { display: flex; flex-wrap: wrap; @@ -33,21 +33,21 @@ margin-right: auto; width: 100%; } - /* Par défaut (flux ~1280): 2 colonnes centrées */ + /* Default (flow ~1280): 2 centered columns */ .content-grid .d3-pie-quad .plots-grid { width: 100%; } .content-grid .d3-pie-quad .pie-cell { flex: 0 0 calc((100% - 20px)/2); } - /* En wrappers larges: viser 4 colonnes si l'espace le permet */ + /* In wide wrappers: aim for 4 columns if space allows */ .wide .d3-pie-quad .plots-grid, .full-width .d3-pie-quad .plots-grid { width: 100%; } .wide .d3-pie-quad .pie-cell, .full-width .d3-pie-quad .pie-cell { flex: 0 0 calc((100% - 60px)/4); } - /* Forcer 2 colonnes dans le flux lorsque le parent ~1280px */ + /* Force 2 columns in flow when parent ~1280px */ .content-grid .d3-pie-quad .plots-grid { width: min(740px, 100%); } .d3-pie-quad .pie-cell { display: flex; flex-direction: column; align-items: center; - flex: 0 0 360px; /* 2 colonnes fixes dans le flux à 1280px */ + flex: 0 0 360px; /* 2 fixed columns in flow at 1280px */ } /* 4/2/1 colonnes en fonction de la largeur du parent */ /* @container (min-width: 740px) { @@ -179,10 +179,10 @@ const CAPTION_GAP = 36; // espace entre titre et donut const GAP_X = 20; // espace entre colonnes const GAP_Y = 12; // espace entre lignes - const TOP_OFFSET = 4; // décalage vertical supplémentaire pour aérer le haut + const TOP_OFFSET = 4; // additional vertical offset to air out the top const DONUT_INNER_RATIO = 0.58; // ratio du trou central (0 = pie plein, 0.5 = moitié) // LEGEND_GAP supprimé: l'espacement est désormais géré en CSS via .d3-pie-quad .legend { margin-bottom } - const SVG_VPAD = 16; // padding vertical supplémentaire à l'intérieur des SVG pour éviter la coupe + const SVG_VPAD = 16; // additional vertical padding inside SVGs to avoid cropping const updateSize = () => { width = container.clientWidth || 800; @@ -199,7 +199,7 @@ function drawPies(rows){ const { innerWidth } = updateSize(); - // Catégories (triées) + échelle de couleurs harmonisée avec banner.html + // Categories (sorted) + color scale harmonized with banner.html const categories = Array.from(new Set(rows.map(r => r.eagle_cathegory || 'Unknown'))).sort(); const getCatColors = (n) => { try { if (window.ColorPalettes && typeof window.ColorPalettes.getColors === 'function') return window.ColorPalettes.getColors('categorical', n); } catch(_) {} diff --git a/app/src/content/embeds/d3-scatter.html b/app/src/content/embeds/d3-scatter.html index 895cd77adbc6e04dbdf25f1128aa6b6098ab3819..610baf1e52e9e65ab5e15e103f8bfe97128c97c9 100644 --- a/app/src/content/embeds/d3-scatter.html +++ b/app/src/content/embeds/d3-scatter.html @@ -80,7 +80,7 @@ function getDotStrokeColor(fillColor = null){ if (!fillColor) return 'var(--muted-color)'; - // Résoudre les variables CSS en couleurs réelles + // Resolve CSS variables to actual colors let resolvedColor = fillColor; if (fillColor.startsWith('var(')) { const tempEl = document.createElement('div'); diff --git a/app/src/content/embeds/d3-umap-typography.html b/app/src/content/embeds/d3-umap-typography.html new file mode 100644 index 0000000000000000000000000000000000000000..47fbd89dfc09dee38527a9254cf134ec5438315f --- /dev/null +++ b/app/src/content/embeds/d3-umap-typography.html @@ -0,0 +1,804 @@ +
+ + + + + +
+
+ + + \ No newline at end of file diff --git a/app/src/content/embeds/demo/color-picker.html b/app/src/content/embeds/demo/color-picker.html deleted file mode 100644 index 3cd1eaf960868ff23534f5c6daa62484ad647947..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/demo/color-picker.html +++ /dev/null @@ -1,226 +0,0 @@ -
- -
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Hue
-
-
-
-
337°
-
-
-
-
- - - diff --git a/app/src/content/embeds/demo/palettes.html b/app/src/content/embeds/demo/palettes.html deleted file mode 100644 index 31850e2706bdb187409061c917a80858d0117277..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/demo/palettes.html +++ /dev/null @@ -1,219 +0,0 @@ -
- -
-
- - -
-
-
- - 8 -
-
- -
-
-
-
-
- - -
-
- - diff --git a/app/src/content/embeds/original_embeds/plotly/banner.py b/app/src/content/embeds/original_embeds/plotly/banner.py deleted file mode 100644 index 73841e91b8ae9a0e1d4dcca0e6534b507b0baabe..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/banner.py +++ /dev/null @@ -1,134 +0,0 @@ -import plotly.graph_objects as go -import numpy as np -import pandas as pd - -# Scene parameters (same ranges as the Astro integration) -cx, cy = 1.5, 0.5 # center -a, b = 1.3, 0.45 # max extent in x/y (ellipse for anisotropy) - -# Spiral galaxy parameters -num_points = 3000 # more dots -num_arms = 3 # number of spiral arms -num_turns = 2.1 # number of turns per arm -angle_jitter = 0.12 # angular jitter to fan out the arms -pos_noise = 0.015 # global position noise - -# Generate points along spiral arms (Archimedean spiral) -t = np.random.rand(num_points) * (2 * np.pi * num_turns) # progression along the arm -arm_indices = np.random.randint(0, num_arms, size=num_points) -arm_offsets = arm_indices * (2 * np.pi / num_arms) - -theta = t + arm_offsets + np.random.randn(num_points) * angle_jitter - -# Normalized radius (0->center, 1->edge). Power <1 to densify the core -r_norm = (t / (2 * np.pi * num_turns)) ** 0.9 - -# Radial/lateral noise that slightly increases with radius -noise_x = pos_noise * (0.8 + 0.6 * r_norm) * np.random.randn(num_points) -noise_y = pos_noise * (0.8 + 0.6 * r_norm) * np.random.randn(num_points) - -# Elliptic projection -x_spiral = cx + a * r_norm * np.cos(theta) + noise_x -y_spiral = cy + b * r_norm * np.sin(theta) + noise_y - -# Central bulge (additional points very close to the core) -bulge_points = int(0.18 * num_points) -phi_b = 2 * np.pi * np.random.rand(bulge_points) -r_b = (np.random.rand(bulge_points) ** 2.2) * 0.22 # compact bulge -noise_x_b = (pos_noise * 0.6) * np.random.randn(bulge_points) -noise_y_b = (pos_noise * 0.6) * np.random.randn(bulge_points) -x_bulge = cx + a * r_b * np.cos(phi_b) + noise_x_b -y_bulge = cy + b * r_b * np.sin(phi_b) + noise_y_b - -# Concatenation -x = np.concatenate([x_spiral, x_bulge]) -y = np.concatenate([y_spiral, y_bulge]) - -# Central intensity (for sizes/colors). 1 at center, ~0 at edge -z_spiral = 1 - r_norm -z_bulge = 1 - (r_b / max(r_b.max(), 1e-6)) # very bright bulge -z_raw = np.concatenate([z_spiral, z_bulge]) - -# Sizes: keep the 5..10 scale for consistency -sizes = (z_raw + 1) * 5 - -# Remove intermediate filtering: keep all placed points, filter at the very end - -df = pd.DataFrame({ - "x": x, - "y": y, - "z": sizes, # reused for size+color as before -}) - -def get_label(z): - if z < 0.25: - return "smol dot" - if z < 0.5: - return "ok-ish dot" - if z < 0.75: - return "a dot" - else: - return "biiig dot" - -# Labels based on central intensity -df["label"] = pd.Series(z_raw).apply(get_label) - -# Rendering order: small points first, big ones after (on top) -df = df.sort_values(by="z", ascending=True).reset_index(drop=True) - -fig = go.Figure() - -fig.add_trace(go.Scattergl( - x=df['x'], - y=df['y'], - mode='markers', - marker=dict( - size=df['z'], - color=df['z'], - colorscale=[ - [0, 'rgb(78, 165, 183)'], - [0.5, 'rgb(206, 192, 250)'], - [1, 'rgb(232, 137, 171)'] - ], - opacity=0.9, - ), - customdata=df[["label"]], - hovertemplate="Dot category: %{customdata[0]}", - hoverlabel=dict(namelength=0), - showlegend=False -)) - -fig.update_layout( - autosize=True, - paper_bgcolor='rgba(0,0,0,0)', - plot_bgcolor='rgba(0,0,0,0)', - showlegend=False, - margin=dict(l=0, r=0, t=0, b=0), - xaxis=dict( - showgrid=False, - zeroline=False, - showticklabels=False, - range=[0, 3] - ), - yaxis=dict( - showgrid=False, - zeroline=False, - showticklabels=False, - scaleanchor="x", - scaleratio=1, - range=[0, 1] - ) -) - -# fig.show() - -fig.write_html( - "../app/src/content/fragments/banner.html", - include_plotlyjs=False, - full_html=False, - config={ - 'displayModeBar': False, - 'responsive': True, - 'scrollZoom': False, - } -) \ No newline at end of file diff --git a/app/src/content/embeds/original_embeds/plotly/bar.py b/app/src/content/embeds/original_embeds/plotly/bar.py deleted file mode 100644 index 4137fb1a2f90da2f7cf2ad1aec4bc15ca9c5720a..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/bar.py +++ /dev/null @@ -1,173 +0,0 @@ -import plotly.graph_objects as go -import plotly.io as pio -import numpy as np - -""" -Stacked bar chart: GPU memory breakdown vs sequence length, with menus for Model Size and Recomputation. -Responsive, no zoom/pan, clean hover; styled to match the minimal theme. -""" - -# Axes -seq_labels = ["1024", "2048", "4096", "8192"] -seq_scale = np.array([1, 2, 4, 8], dtype=float) - -# Components and colors (aligned with the provided example) -components = [ - ("parameters", "rgb(78, 165, 183)"), - ("gradients", "rgb(227, 138, 66)"), - ("optimizer", "rgb(232, 137, 171)"), - ("activations", "rgb(206, 192, 250)"), -] - -# Model sizes and base memory (GB) for params/grad/opt (constant vs seq), by size -model_sizes = ["1B", "3B", "8B", "70B", "405B"] -params_mem = { - "1B": 4.0, - "3B": 13.3, - "8B": 26.0, - "70B": 244.0, - "405B": 1520.0, -} -# Optimizer ~= 2x params; gradients ~= params (illustrative) - -# Activations base coefficient per size (growth ~ coeff * (seq/1024)^2) -act_coeff = { - "1B": 3.6, - "3B": 9.3, - "8B": 46.2, - "70B": 145.7, - "405B": 1519.9, -} - -def activations_curve(size_key: str, recompute: str) -> np.ndarray: - base = act_coeff[size_key] * (seq_scale ** 2) - if recompute == "selective": - return base * 0.25 - if recompute == "full": - return base * (1.0/16.0) - return base - -def stack_for(size_key: str, recompute: str): - p = np.full_like(seq_scale, params_mem[size_key], dtype=float) - g = np.full_like(seq_scale, params_mem[size_key], dtype=float) - o = np.full_like(seq_scale, 2.0 * params_mem[size_key], dtype=float) - a = activations_curve(size_key, recompute) - return { - "parameters": p, - "gradients": g, - "optimizer": o, - "activations": a, - } - -# Precompute all combinations -recomp_modes = ["none", "selective", "full"] -Y = {mode: {size: stack_for(size, mode) for size in model_sizes} for mode in recomp_modes} - -# Build traces: 4 traces per size (20 total). Start with size index 0 visible -fig = go.Figure() -for size in model_sizes: - for comp_name, color in components: - fig.add_bar( - x=seq_labels, - y=Y["none"][size][comp_name], - name=comp_name, - marker=dict(color=color), - hovertemplate="Seq len=%{x}
Mem=%{y:.1f}GB
%{data.name}", - showlegend=True, - visible=(size == model_sizes[0]), - ) - -# Compute y-axis ranges per size and recomputation -def max_total(size: str, mode: str) -> float: - stacks = Y[mode][size] - totals = stacks["parameters"] + stacks["gradients"] + stacks["optimizer"] + stacks["activations"] - return float(np.max(totals)) - -layout_y_ranges = {mode: {size: 1.05 * max_total(size, mode) for size in model_sizes} for mode in recomp_modes} - -# Layout -fig.update_layout( - barmode="stack", - autosize=True, - paper_bgcolor="rgba(0,0,0,0)", - plot_bgcolor="rgba(0,0,0,0)", - margin=dict(l=40, r=28, t=20, b=40), - hovermode="x unified", - legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="left", x=0), - xaxis=dict(title=dict(text="Sequence Length"), fixedrange=True), - yaxis=dict(title=dict(text="Memory (GB)"), fixedrange=True), -) - -# Updatemenus: Model Size (toggle visibility) -buttons_sizes = [] -for i, size in enumerate(model_sizes): - visible = [False] * (len(model_sizes) * len(components)) - start = i * len(components) - for j in range(len(components)): - visible[start + j] = True - buttons_sizes.append(dict( - label=size, - method="update", - args=[ - {"visible": visible}, - {"yaxis": {"range": [0, layout_y_ranges["none"][size]]}}, - ], - )) - -# Updatemenus: Recomputation (restyle y across all traces) -def y_for_mode(mode: str): - ys = [] - for size in model_sizes: - stacks = Y[mode][size] - for comp_name, _ in components: - ys.append(stacks[comp_name]) - return ys - -buttons_recomp = [] -for mode, label in [("none", "None"), ("selective", "selective"), ("full", "full")]: - ys = y_for_mode(mode) - # Flatten into the format expected by Plotly for multiple traces - buttons_recomp.append(dict( - label=label, - method="update", - args=[ - {"y": ys}, - {"yaxis": {"range": [0, max(layout_y_ranges[mode].values())]}}, - ], - )) - -fig.update_layout( - updatemenus=[ - dict( - type="dropdown", - x=1.03, xanchor="left", - y=0.60, yanchor="top", - showactive=True, - active=0, - buttons=buttons_sizes, - ), - dict( - type="dropdown", - x=1.03, xanchor="left", - y=0.40, yanchor="top", - showactive=True, - active=0, - buttons=buttons_recomp, - ), - ], - annotations=[ - dict(text="Model Size:", x=1.03, xanchor="left", xref="paper", y=0.60, yanchor="bottom", yref="paper", showarrow=False), - dict(text="Recomputation:", x=1.03, xanchor="left", xref="paper", y=0.40, yanchor="bottom", yref="paper", showarrow=False), - ], -) - -# Write fragment -fig.write_html("../../app/src/content/fragments/bar.html", - include_plotlyjs=False, - full_html=False, - config={ - 'displayModeBar': False, - 'responsive': True, - 'scrollZoom': False, - }) - diff --git a/app/src/content/embeds/original_embeds/plotly/heatmap.py b/app/src/content/embeds/original_embeds/plotly/heatmap.py deleted file mode 100644 index 3ca90d9eb42502e593dad2545993e6195e9d1cff..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/heatmap.py +++ /dev/null @@ -1,125 +0,0 @@ -import plotly.graph_objects as go -import plotly.io as pio -import numpy as np -import datetime as dt -import os - -""" -Calendar-like heatmap (GitHub-style) over the last 52 weeks. -Minimal, responsive, transparent background; suitable for Distill. -""" - -# Parameters -NUM_WEEKS = 52 -DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] - -# Build dates matrix (7 rows x NUM_WEEKS columns) -today = dt.date.today() -# Align to start of current week (Monday) -start = today - dt.timedelta(days=(today.weekday())) # Monday of current week -weeks = [start - dt.timedelta(weeks=w) for w in range(NUM_WEEKS-1, -1, -1)] -dates = [[weeks[c] + dt.timedelta(days=r) for c in range(NUM_WEEKS)] for r in range(7)] - -# Generate values (synthetic) — smooth seasonal pattern + noise -def gen_value(d: dt.date) -> float: - day_of_year = d.timetuple().tm_yday - base = 0.5 + 0.45 * np.sin(2 * np.pi * (day_of_year / 365.0)) - noise = np.random.default_rng(hash(d) % 2**32).uniform(-0.15, 0.15) - return max(0.0, min(1.0, base + noise)) - -z = [[gen_value(d) for d in row] for row in dates] -custom = [[d.isoformat() for d in row] for row in dates] - -# Colors aligned with other charts (slate / blue / gray) -colorscale = [ - [0.00, "#e5e7eb"], # light gray background for low - [0.40, "#64748b"], # slate-500 - [0.75, "#2563eb"], # blue-600 - [1.00, "#4b5563"], # gray-600 (high end accent) -] - -fig = go.Figure( - data=go.Heatmap( - z=z, - x=[w.isoformat() for w in weeks], - y=DAYS, - colorscale=colorscale, - showscale=False, - hovertemplate="Date: %{customdata}
Value: %{z:.2f}", - customdata=custom, - xgap=2, - ygap=2, - ) -) - -fig.update_layout( - autosize=True, - paper_bgcolor="rgba(0,0,0,0)", - plot_bgcolor="rgba(0,0,0,0)", - margin=dict(l=28, r=12, t=8, b=28), - xaxis=dict( - showgrid=False, - zeroline=False, - showline=False, - ticks="", - showticklabels=False, - fixedrange=True, - ), - yaxis=dict( - showgrid=False, - zeroline=False, - showline=False, - ticks="", - tickfont=dict(size=12, color="rgba(0,0,0,0.65)"), - fixedrange=True, - ), -) - -post_script = """ -(function(){ - var plots = document.querySelectorAll('.js-plotly-plot'); - plots.forEach(function(gd){ - function round(){ - try { - var root = gd && gd.parentNode ? gd.parentNode : document; - var rects = root.querySelectorAll('.hoverlayer .hovertext rect'); - rects.forEach(function(r){ r.setAttribute('rx', 8); r.setAttribute('ry', 8); }); - } catch(e) {} - } - if (gd && gd.on){ - gd.on('plotly_hover', round); - gd.on('plotly_unhover', round); - gd.on('plotly_relayout', round); - } - setTimeout(round, 0); - }); -})(); -""" - -html = pio.to_html( - fig, - include_plotlyjs=False, - full_html=False, - post_script=post_script, - config={ - "displayModeBar": False, - "responsive": True, - "scrollZoom": False, - "doubleClick": False, - "modeBarButtonsToRemove": [ - "zoom2d", "pan2d", "select2d", "lasso2d", - "zoomIn2d", "zoomOut2d", "autoScale2d", "resetScale2d", - "toggleSpikelines" - ], - }, -) - -fig.write_html("../app/src/content/fragments/heatmap.html", - include_plotlyjs=False, - full_html=False, - config={ - 'displayModeBar': False, - 'responsive': True, - 'scrollZoom': False, - }) - diff --git a/app/src/content/embeds/original_embeds/plotly/line.py b/app/src/content/embeds/original_embeds/plotly/line.py deleted file mode 100644 index 1fc71f26a0f3385b8c5455c6668abb2d26333e0f..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/line.py +++ /dev/null @@ -1,276 +0,0 @@ -import plotly.graph_objects as go -import plotly.io as pio -import numpy as np -import os -import uuid - -""" -Interactive line chart example (Baseline / Improved / Target) with a live slider. - -Context: research-style training curves for multiple datasets (CIFAR-10, CIFAR-100, ImageNet-1K). -The slider "Augmentation α" blends the Improved curve between the Baseline (α=0) -and an augmented counterpart (α=1) via a simple mixing equation. -Export remains responsive, with no zoom and no mode bar. -""" - -# Grid (x) and parameterization -N = 240 -x = np.linspace(0, 1, N) - -# Logistic helper for smooth learning curves -def logistic(xv: np.ndarray, ymin: float, ymax: float, k: float, x0: float) -> np.ndarray: - return ymin + (ymax - ymin) / (1.0 + np.exp(-k * (xv - x0))) - -# Plausible dataset params (baseline vs augmented) + a constant target line -datasets_params = [ - { - "name": "CIFAR-10", - "base": {"ymin": 0.10, "ymax": 0.90, "k": 10.0, "x0": 0.55}, - "aug": {"ymin": 0.15, "ymax": 0.96, "k": 12.0, "x0": 0.40}, - "target": 0.97, - }, - { - "name": "CIFAR-100", - "base": {"ymin": 0.05, "ymax": 0.70, "k": 9.5, "x0": 0.60}, - "aug": {"ymin": 0.08, "ymax": 0.80, "k": 11.0, "x0": 0.45}, - "target": 0.85, - }, - { - "name": "ImageNet-1K", - "base": {"ymin": 0.02, "ymax": 0.68, "k": 8.5, "x0": 0.65}, - "aug": {"ymin": 0.04, "ymax": 0.75, "k": 9.5, "x0": 0.50}, - "target": 0.82, - }, -] - -# Initial dataset index and alpha -alpha0 = 0.7 -ds0 = datasets_params[0] -base0 = logistic(x, **ds0["base"]) -aug0 = logistic(x, **ds0["aug"]) -target0 = np.full_like(x, ds0["target"], dtype=float) - -# Traces: Baseline (fixed), Improved (blended by α), Target (constant goal) -blend = lambda l, e, a: (1 - a) * l + a * e -y1 = base0 -y2 = blend(base0, aug0, alpha0) -y3 = target0 - -color_base = "#64748b" # slate-500 -color_improved = "#F981D4" # pink -color_target = "#4b5563" # gray-600 (dash) - -fig = go.Figure() -fig.add_trace( - go.Scatter( - x=x, - y=y1, - name="Baseline", - mode="lines", - line=dict(color=color_base, width=2, shape="spline", smoothing=0.6), - hovertemplate="%{fullData.name}
x=%{x:.2f}
y=%{y:.3f}", - showlegend=True, - ) -) -fig.add_trace( - go.Scatter( - x=x, - y=y2, - name="Improved", - mode="lines", - line=dict(color=color_improved, width=2, shape="spline", smoothing=0.6), - hovertemplate="%{fullData.name}
x=%{x:.2f}
y=%{y:.3f}", - showlegend=True, - ) -) -fig.add_trace( - go.Scatter( - x=x, - y=y3, - name="Target", - mode="lines", - line=dict(color=color_target, width=2, dash="dash"), - hovertemplate="%{fullData.name}
x=%{x:.2f}
y=%{y:.3f}", - showlegend=True, - ) -) - -fig.update_layout( - autosize=True, - paper_bgcolor="rgba(0,0,0,0)", - plot_bgcolor="rgba(0,0,0,0)", - margin=dict(l=40, r=28, t=20, b=40), - hovermode="x unified", - legend=dict( - orientation="v", - x=1, - y=0, - xanchor="right", - yanchor="bottom", - bgcolor="rgba(255,255,255,0)", - borderwidth=0, - ), - hoverlabel=dict( - bgcolor="white", - font=dict(color="#111827", size=12), - bordercolor="rgba(0,0,0,0.15)", - align="left", - namelength=-1, - ), - xaxis=dict( - showgrid=False, - zeroline=False, - showline=True, - linecolor="rgba(0,0,0,0.25)", - linewidth=1, - ticks="outside", - ticklen=6, - tickcolor="rgba(0,0,0,0.25)", - tickfont=dict(size=12, color="rgba(0,0,0,0.55)"), - title=None, - automargin=True, - fixedrange=True, - ), - yaxis=dict( - showgrid=False, - zeroline=False, - showline=True, - linecolor="rgba(0,0,0,0.25)", - linewidth=1, - ticks="outside", - ticklen=6, - tickcolor="rgba(0,0,0,0.25)", - tickfont=dict(size=12, color="rgba(0,0,0,0.55)"), - title=None, - tickformat=".2f", - rangemode="tozero", - automargin=True, - fixedrange=True, - ), -) - -# Write the fragment next to this file into src/fragments/line.html (robust path) -output_path = os.path.join(os.path.dirname(__file__), "fragments", "line.html") -os.makedirs(os.path.dirname(output_path), exist_ok=True) - -# Inject a small post-render script to round the hover box corners -post_script = """ -(function(){ - function attach(gd){ - function round(){ - try { - var root = gd && gd.parentNode ? gd.parentNode : document; - var rects = root.querySelectorAll('.hoverlayer .hovertext rect'); - rects.forEach(function(r){ r.setAttribute('rx', 8); r.setAttribute('ry', 8); }); - } catch(e) {} - } - if (gd && gd.on) { - gd.on('plotly_hover', round); - gd.on('plotly_unhover', round); - gd.on('plotly_relayout', round); - } - setTimeout(round, 0); - } - var plots = document.querySelectorAll('.js-plotly-plot'); - plots.forEach(attach); -})(); -""" - -html_plot = pio.to_html( - fig, - include_plotlyjs=False, - full_html=False, - post_script=post_script, - config={ - "displayModeBar": False, - "responsive": True, - "scrollZoom": False, - "doubleClick": False, - "modeBarButtonsToRemove": [ - "zoom2d", "pan2d", "select2d", "lasso2d", - "zoomIn2d", "zoomOut2d", "autoScale2d", "resetScale2d", - "toggleSpikelines" - ], - }, -) - -# Build a self-contained fragment with a live slider (no mouseup required) -uid = uuid.uuid4().hex[:8] -slider_id = f"line-ex-alpha-{uid}" -container_id = f"line-ex-container-{uid}" - -slider_tpl = ''' -
- __PLOT__ -
- - -
-
- -''' - -slider_html = (slider_tpl - .replace('__CID__', container_id) - .replace('__SID__', slider_id) - .replace('__A0__', f"{alpha0:.2f}") - .replace('__N__', str(N)) - .replace('__PLOT__', html_plot) -) - -with open("../../app/src/content/fragments/line.html", "w", encoding="utf-8") as f: - f.write(slider_html) - diff --git a/app/src/content/embeds/original_embeds/plotly/poetry.lock b/app/src/content/embeds/original_embeds/plotly/poetry.lock deleted file mode 100644 index 55dc5b270a044b51d1def9be28f25d940a66265f..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/poetry.lock +++ /dev/null @@ -1,511 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. - -[[package]] -name = "certifi" -version = "2025.8.3" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, - {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.4.3" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"}, - {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"}, - {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"}, - {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, - {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"}, - {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"}, - {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"}, - {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"}, - {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, - {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, -] - -[[package]] -name = "idna" -version = "3.10" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, -] - -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - -[[package]] -name = "markdown" -version = "3.8.2" -description = "Python implementation of John Gruber's Markdown." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24"}, - {file = "markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45"}, -] - -[package.extras] -docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "numpy" -version = "2.2.6" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.10" -groups = ["main"] -markers = "python_version == \"3.10\"" -files = [ - {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf"}, - {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83"}, - {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915"}, - {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680"}, - {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289"}, - {file = "numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d"}, - {file = "numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491"}, - {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a"}, - {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf"}, - {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1"}, - {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab"}, - {file = "numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47"}, - {file = "numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282"}, - {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87"}, - {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249"}, - {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49"}, - {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de"}, - {file = "numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4"}, - {file = "numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566"}, - {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f"}, - {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f"}, - {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868"}, - {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d"}, - {file = "numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd"}, - {file = "numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8"}, - {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f"}, - {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa"}, - {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571"}, - {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1"}, - {file = "numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff"}, - {file = "numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00"}, - {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"}, -] - -[[package]] -name = "numpy" -version = "2.3.2" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.11" -groups = ["main"] -markers = "python_version >= \"3.11\"" -files = [ - {file = "numpy-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9"}, - {file = "numpy-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168"}, - {file = "numpy-2.3.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b"}, - {file = "numpy-2.3.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8"}, - {file = "numpy-2.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d"}, - {file = "numpy-2.3.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3"}, - {file = "numpy-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f"}, - {file = "numpy-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097"}, - {file = "numpy-2.3.2-cp311-cp311-win32.whl", hash = "sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220"}, - {file = "numpy-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170"}, - {file = "numpy-2.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89"}, - {file = "numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b"}, - {file = "numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f"}, - {file = "numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0"}, - {file = "numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b"}, - {file = "numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370"}, - {file = "numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73"}, - {file = "numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc"}, - {file = "numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be"}, - {file = "numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036"}, - {file = "numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f"}, - {file = "numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07"}, - {file = "numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3"}, - {file = "numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b"}, - {file = "numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6"}, - {file = "numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089"}, - {file = "numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2"}, - {file = "numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f"}, - {file = "numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee"}, - {file = "numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6"}, - {file = "numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b"}, - {file = "numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56"}, - {file = "numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2"}, - {file = "numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab"}, - {file = "numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2"}, - {file = "numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a"}, - {file = "numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286"}, - {file = "numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8"}, - {file = "numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a"}, - {file = "numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91"}, - {file = "numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5"}, - {file = "numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5"}, - {file = "numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450"}, - {file = "numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a"}, - {file = "numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a"}, - {file = "numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b"}, - {file = "numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125"}, - {file = "numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19"}, - {file = "numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f"}, - {file = "numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5"}, - {file = "numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58"}, - {file = "numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0"}, - {file = "numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2"}, - {file = "numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b"}, - {file = "numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910"}, - {file = "numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e"}, - {file = "numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45"}, - {file = "numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b"}, - {file = "numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2"}, - {file = "numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0"}, - {file = "numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0"}, - {file = "numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2"}, - {file = "numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf"}, - {file = "numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1"}, - {file = "numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b"}, - {file = "numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981"}, - {file = "numpy-2.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619"}, - {file = "numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48"}, -] - -[[package]] -name = "packaging" -version = "25.0" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, - {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, -] - -[[package]] -name = "pandas" -version = "2.3.2" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "pandas-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52bc29a946304c360561974c6542d1dd628ddafa69134a7131fdfd6a5d7a1a35"}, - {file = "pandas-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:220cc5c35ffaa764dd5bb17cf42df283b5cb7fdf49e10a7b053a06c9cb48ee2b"}, - {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c05e15111221384019897df20c6fe893b2f697d03c811ee67ec9e0bb5a3424"}, - {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc03acc273c5515ab69f898df99d9d4f12c4d70dbfc24c3acc6203751d0804cf"}, - {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d25c20a03e8870f6339bcf67281b946bd20b86f1a544ebbebb87e66a8d642cba"}, - {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21bb612d148bb5860b7eb2c10faacf1a810799245afd342cf297d7551513fbb6"}, - {file = "pandas-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:b62d586eb25cb8cb70a5746a378fc3194cb7f11ea77170d59f889f5dfe3cec7a"}, - {file = "pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743"}, - {file = "pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4"}, - {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2"}, - {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e"}, - {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea"}, - {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372"}, - {file = "pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f"}, - {file = "pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9"}, - {file = "pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b"}, - {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175"}, - {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9"}, - {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4"}, - {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811"}, - {file = "pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae"}, - {file = "pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e"}, - {file = "pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9"}, - {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a"}, - {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b"}, - {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6"}, - {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a"}, - {file = "pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b"}, - {file = "pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57"}, - {file = "pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2"}, - {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9"}, - {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2"}, - {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012"}, - {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370"}, - {file = "pandas-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88080a0ff8a55eac9c84e3ff3c7665b3b5476c6fbc484775ca1910ce1c3e0b87"}, - {file = "pandas-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4a558c7620340a0931828d8065688b3cc5b4c8eb674bcaf33d18ff4a6870b4a"}, - {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45178cf09d1858a1509dc73ec261bf5b25a625a389b65be2e47b559905f0ab6a"}, - {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77cefe00e1b210f9c76c697fedd8fdb8d3dd86563e9c8adc9fa72b90f5e9e4c2"}, - {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:13bd629c653856f00c53dc495191baa59bcafbbf54860a46ecc50d3a88421a96"}, - {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:36d627906fd44b5fd63c943264e11e96e923f8de77d6016dc2f667b9ad193438"}, - {file = "pandas-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a9d7ec92d71a420185dec44909c32e9a362248c4ae2238234b76d5be37f208cc"}, - {file = "pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.7" - -[package.extras] -all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] -aws = ["s3fs (>=2022.11.0)"] -clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] -compression = ["zstandard (>=0.19.0)"] -computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] -feather = ["pyarrow (>=10.0.1)"] -fss = ["fsspec (>=2022.11.0)"] -gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] -hdf5 = ["tables (>=3.8.0)"] -html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] -mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] -parquet = ["pyarrow (>=10.0.1)"] -performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] -plot = ["matplotlib (>=3.6.3)"] -postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] -pyarrow = ["pyarrow (>=10.0.1)"] -spss = ["pyreadstat (>=1.2.0)"] -sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.9.2)"] - -[[package]] -name = "plotly" -version = "5.24.1" -description = "An open-source, interactive data visualization library for Python" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, - {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, -] - -[package.dependencies] -packaging = "*" -tenacity = ">=6.2.0" - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "pytz" -version = "2025.2" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, - {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, -] - -[[package]] -name = "requests" -version = "2.32.5" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, - {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset_normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "six" -version = "1.17.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, - {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, -] - -[[package]] -name = "tenacity" -version = "9.1.2" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"}, - {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"}, -] - -[package.extras] -doc = ["reno", "sphinx"] -test = ["pytest", "tornado (>=4.5)", "typeguard"] - -[[package]] -name = "tzdata" -version = "2025.2" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -groups = ["main"] -files = [ - {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, - {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, -] - -[[package]] -name = "urllib3" -version = "2.5.0" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, - {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[metadata] -lock-version = "2.1" -python-versions = ">=3.10,<3.13" -content-hash = "a7f64c43efcba78952701498a72a8fe503e995841717b2d5de4c9aa20c9a996a" diff --git a/app/src/content/embeds/original_embeds/plotly/pyproject.toml b/app/src/content/embeds/original_embeds/plotly/pyproject.toml deleted file mode 100644 index 2c020f06dbed2c5d70596f65de09d943de4b100b..0000000000000000000000000000000000000000 --- a/app/src/content/embeds/original_embeds/plotly/pyproject.toml +++ /dev/null @@ -1,20 +0,0 @@ -[tool.poetry] -name = "blogpost-fine-tasks-python" -version = "0.1.0" -description = "Plotly fragment generation scripts and HTML/Markdown conversions for the blogpost." -package-mode = false - -[tool.poetry.dependencies] -python = ">=3.10,<3.13" -Markdown = "^3.6" -requests = "^2.32.3" -numpy = "^2.0.0" -pandas = "^2.2.2" -plotly = "^5.24.0" - -[tool.poetry.scripts] -html-to-md = "convert_to_md:main" - -[build-system] -requires = ["poetry-core>=1.5.0"] -build-backend = "poetry.core.masonry.api" diff --git a/app/src/content/embeds/typography/1-download-fonts.mjs b/app/src/content/embeds/typography/1-download-fonts.mjs new file mode 100755 index 0000000000000000000000000000000000000000..b0d2378db4c87ec2fb39b0830af512f01a61a28d --- /dev/null +++ b/app/src/content/embeds/typography/1-download-fonts.mjs @@ -0,0 +1,531 @@ +#!/usr/bin/env node + +import fs from 'fs/promises'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import opentype from 'opentype.js'; +import fonteditor from 'fonteditor-core'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Configuration +const GOOGLE_FONTS_API_KEY = process.env.GOOGLE_FONTS_API_KEY; +const GOOGLE_FONTS_API_URL = 'https://www.googleapis.com/webfonts/v1/webfonts'; +const TYPOGRAPHY_BASE = __dirname; +const GENERATED_DIR = path.join(TYPOGRAPHY_BASE, 'generated'); +const FONTS_DIR = path.join(GENERATED_DIR, 'fonts'); +const SVGS_DIR = path.join(GENERATED_DIR, 'svgs'); +const FONT_MANIFEST_PATH = path.join(GENERATED_DIR, 'data', 'font_manifest.json'); +const TYPOGRAPHY_DATA_PATH = path.join(GENERATED_DIR, 'data', 'typography_data.json'); + +/** + * Downloads the Google Fonts list + */ +async function fetchGoogleFontsList() { + // 1. Try Google Fonts API with key if available + if (GOOGLE_FONTS_API_KEY && GOOGLE_FONTS_API_KEY !== 'YOUR_API_KEY_HERE') { + try { + console.log('🔍 Fetching from Google Fonts API (with key)...'); + const url = `${GOOGLE_FONTS_API_URL}?key=${GOOGLE_FONTS_API_KEY}&sort=popularity`; + + const response = await fetch(url); + if (!response.ok) { + throw new Error(`HTTP ${response.status}`); + } + + const data = await response.json(); + const fonts = data.items || []; + + console.log(`✅ ${fonts.length} fonts retrieved from official API`); + + return fonts.map(font => ({ + family: font.family, + category: font.category || 'sans-serif', + files: font.files || {} + })); + + } catch (error) { + console.error('❌ Google Fonts API error:', error.message); + } + } + + // 2. Try fontsource google-font-metadata (without key) + try { + console.log('🔍 Attempting via fontsource google-font-metadata...'); + + const metadataUrl = 'https://raw.githubusercontent.com/fontsource/google-font-metadata/main/data/google-fonts-v1.json'; + + console.log(`📥 Attempting: ${metadataUrl}`); + const response = await fetch(metadataUrl, { timeout: 15000 }); + + if (response.ok) { + const data = await response.json(); + + // Fontsource data structure: { "font-id": { family: "Font Name", category: "sans-serif", ... }, ... } + if (data && typeof data === 'object') { + const fonts = Object.values(data).map(font => ({ + family: font.family, + category: font.category || 'sans-serif', + files: { + regular: `https://fonts.googleapis.com/css2?family=${encodeURIComponent(font.family)}:wght@400&display=swap` + } + })); + + console.log(`✅ ${fonts.length} fonts retrieved from fontsource metadata`); + return fonts; + } + } else { + console.log(`⚠️ Failed ${metadataUrl}: HTTP ${response.status}`); + } + + } catch (error) { + console.log('⚠️ Fontsource metadata not available:', error.message); + } + + // 3. Fallback: existing local manifest + + // Fallback: use existing manifest + try { + const manifestData = await fs.readFile(FONT_MANIFEST_PATH, 'utf-8'); + const manifest = JSON.parse(manifestData); + const fontNames = Object.keys(manifest); + + console.log(`📚 ${fontNames.length} fonts found in manifest`); + + return fontNames.map(name => ({ + family: name, + files: { + regular: `https://fonts.googleapis.com/css2?family=${encodeURIComponent(name)}:wght@400&display=swap` + } + })); + } catch (error) { + console.error('❌ Error reading manifest:', error.message); + + // Ultimate fallback: use fallback fonts + console.log('🔄 Using fallback fonts...'); + return getFallbackFonts(); + } +} + +// Add function for fallback fonts +function getFallbackFonts() { + const fallbackFonts = [ + "Roboto", "Open Sans", "Lato", "Montserrat", "Source Sans Pro", + "Playfair Display", "Lora", "Crimson Text", "Merriweather", + "Fira Code", "Source Code Pro", "JetBrains Mono", "Roboto Mono", + "Dancing Script", "Pacifico", "Caveat", "Oswald", "Bebas Neue" + ]; + + return fallbackFonts.map(name => ({ + family: name, + files: { + regular: `https://fonts.googleapis.com/css2?family=${encodeURIComponent(name)}:wght@400&display=swap` + } + })); +} + +/** + * Extracts WOFF2 URL from Google Fonts CSS response + */ +async function extractWOFF2Url(cssUrl, fontFamily) { + try { + console.log(`📥 CSS request for ${fontFamily}...`); + + const response = await fetch(cssUrl, { + headers: { + // Old User-Agent to force TTF/WOFF instead of WOFF2 + 'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', + 'Accept': 'text/css,*/*;q=0.1' + }, + timeout: 10000 + }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status} - ${response.statusText}`); + } + + const css = await response.text(); + + if (!css || css.trim().length === 0) { + throw new Error('Empty CSS response'); + } + + console.log(`📄 CSS received (${css.length} characters)`); + if (css.includes('font-family')) { + console.log(`✅ Valid CSS for ${fontFamily}`); + } else { + console.log(`⚠️ Suspicious CSS for ${fontFamily} - no font-family found`); + } + + // Look for TTF first (most compatible) + const ttfMatch = css.match(/url\((https:\/\/fonts\.gstatic\.com\/[^)]+\.ttf)\)/); + if (ttfMatch) { + return { url: ttfMatch[1], format: 'ttf' }; + } + + // Look for WOFF (compatible with opentype.js) + const woffMatch = css.match(/url\((https:\/\/fonts\.gstatic\.com\/[^)]+\.woff)\)/); + if (woffMatch) { + return { url: woffMatch[1], format: 'woff' }; + } + + // Look for WOFF2 as last resort + const woff2Match = css.match(/url\((https:\/\/fonts\.gstatic\.com\/[^)]+\.woff2)\)/); + if (woff2Match) { + return { url: woff2Match[1], format: 'woff2' }; + } + + throw new Error('No font file found in CSS'); + } catch (error) { + console.error(`❌ Error extracting font URL for ${fontFamily}:`, error.message); + return null; + } +} + +/** + * Downloads and converts a Google Font to TTF + */ +async function downloadAndConvertGoogleFont(fontFamily, outputPath) { + try { + // Clean and properly encode the family name + const cleanFontFamily = fontFamily.trim(); + const encodedFamily = encodeURIComponent(cleanFontFamily); + + // Build Google Fonts CSS URL with special character handling + const cssUrl = `https://fonts.googleapis.com/css2?family=${encodedFamily}:wght@400&display=swap`; + + console.log(`🔍 Extracting font URL from Google Fonts for "${cleanFontFamily}"...`); + console.log(`🔗 CSS URL: ${cssUrl}`); + + const fontInfo = await extractWOFF2Url(cssUrl, cleanFontFamily); + + if (!fontInfo) { + throw new Error('Font URL not found'); + } + + console.log(`📥 Downloading ${fontInfo.format.toUpperCase()} from Google Fonts...`); + const response = await fetch(fontInfo.url); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}`); + } + + const fontBuffer = await response.arrayBuffer(); + + if (fontInfo.format === 'ttf') { + // Already TTF, save directly + await fs.writeFile(outputPath, Buffer.from(fontBuffer)); + console.log(`✅ TTF font saved directly`); + } else if (fontInfo.format === 'woff2') { + // Convert WOFF2 to TTF + console.log(`🔄 Converting WOFF2 to TTF...`); + try { + const font = fonteditor.woff2.decode(Buffer.from(fontBuffer)); + const ttfBuffer = fonteditor.ttf.encode(font); + await fs.writeFile(outputPath, ttfBuffer); + console.log(`✅ Font converted and saved as TTF`); + } catch (conversionError) { + throw new Error(`WOFF2 conversion failed: ${conversionError.message}`); + } + } else if (fontInfo.format === 'woff') { + // WOFF version 1 - opentype.js can handle it directly + await fs.writeFile(outputPath, Buffer.from(fontBuffer)); + console.log(`✅ WOFF font saved (opentype.js can read it)`); + } + + return true; + + } catch (error) { + console.error(`❌ Error during download/conversion for ${fontFamily}:`, error.message); + return false; + } +} + + +/** + * Generates an SVG of letter A from a font + */ +async function generateLetterASVG(fontPath, fontFamily) { + try { + const fontBuffer = await fs.readFile(fontPath); + const font = opentype.parse(fontBuffer.buffer); + + // Get the glyph for letter 'A' + const glyph = font.charToGlyph('A'); + + if (!glyph || !glyph.path) { + throw new Error('Glyph A not found or without path'); + } + + // Configuration uniforme + const SVG_SIZE = 80; // Taille fixe 80x80 + const fontSize = 60; // Reduced font size to leave margins + + // Get glyph dimensions + const tempPath = glyph.getPath(0, 0, fontSize); + const bbox = tempPath.getBoundingBox(); + + // Calculate actual glyph dimensions + const glyphWidth = bbox.x2 - bbox.x1; + const glyphHeight = bbox.y2 - bbox.y1; + + // Center perfectly in 80x80 canvas + const centerX = SVG_SIZE / 2; + const centerY = SVG_SIZE / 2; + + // Position glyph to be centered + const offsetX = centerX - (bbox.x1 + glyphWidth / 2); + const offsetY = centerY - (bbox.y1 + glyphHeight / 2); + + // Generate final centered path + const adjustedPath = glyph.getPath(offsetX, offsetY, fontSize); + + // Generate SVG with fixed dimensions + const svgPathData = adjustedPath.toPathData(2); + const svg = ` + +`; + + return { + svg, + width: SVG_SIZE, + height: SVG_SIZE, + fontMetrics: { + unitsPerEm: font.unitsPerEm, + ascender: font.ascender, + descender: font.descender + } + }; + + } catch (error) { + console.error(`❌ Error generating SVG for ${fontFamily}:`, error.message); + return null; + } +} + +/** + * Validates that a font name is compatible with Google Fonts + */ +function validateFontName(fontName) { + if (!fontName || typeof fontName !== 'string') { + return { valid: false, reason: 'Empty or invalid name' }; + } + + const trimmed = fontName.trim(); + if (trimmed.length === 0) { + return { valid: false, reason: 'Empty name after cleanup' }; + } + + if (trimmed.length > 100) { + return { valid: false, reason: 'Name too long' }; + } + + // Problematic characters for Google Fonts URLs + const problematicChars = /[<>'"&]/; + if (problematicChars.test(trimmed)) { + return { valid: false, reason: 'Problematic characters detected' }; + } + + return { valid: true, cleaned: trimmed }; +} + +/** + * Converts a font name to usable ID + */ +function fontNameToId(fontName) { + return fontName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '_') + .replace(/^_|_$/g, ''); +} + +/** + * Processes a font: download and SVG generation + */ +async function processFont(fontData, index, total) { + const fontFamily = fontData.family; + + console.log(`\n[${index + 1}/${total}] 🔄 Processing "${fontFamily}"...`); + + // Font name validation + const validation = validateFontName(fontFamily); + if (!validation.valid) { + console.error(`❌ Invalid font "${fontFamily}": ${validation.reason}`); + return { + fontFamily, + fontId: fontNameToId(fontFamily), + status: 'error', + error: `Invalid name: ${validation.reason}` + }; + } + + const cleanFontFamily = validation.cleaned; + const fontId = fontNameToId(cleanFontFamily); + + // File paths + const fontPath = path.join(FONTS_DIR, `${fontId}.ttf`); + + try { + // Download font directly + console.log(`⬇️ Downloading ${fontFamily} from Google Fonts...`); + const downloadSuccess = await downloadAndConvertGoogleFont(fontFamily, fontPath); + + if (!downloadSuccess) { + throw new Error('Download/conversion from Google Fonts failed'); + } + + console.log(`✅ Font downloaded and ready: ${fontFamily}`); + + return { + fontFamily, + fontId, + status: 'downloaded', + fontPath: fontPath + }; + + } catch (error) { + console.error(`❌ Error for ${fontFamily}:`, error.message); + return { + fontFamily, + fontId, + status: 'error', + error: error.message + }; + } +} + +/** + * Updates font manifest with new SVGs + */ +async function updateFontManifest(results) { + try { + console.log('\n📝 Updating font manifest...'); + + // Read existing manifest + let manifest = {}; + try { + const manifestData = await fs.readFile(FONT_MANIFEST_PATH, 'utf-8'); + manifest = JSON.parse(manifestData); + } catch { + // Create new manifest if none exists + } + + // Read existing typography data + let typographyData = []; + try { + const typographyDataContent = await fs.readFile(TYPOGRAPHY_DATA_PATH, 'utf-8'); + typographyData = JSON.parse(typographyDataContent); + } catch { + // Use empty array if no data exists + } + + // Update with new results + const successfulResults = results.filter(r => r.status === 'downloaded'); + + for (const result of successfulResults) { + const { fontFamily, fontId, svgPath, dimensions, fontMetrics } = result; + + // Find corresponding typography data + const typographyEntry = typographyData.find(entry => entry.name === fontFamily); + const family = typographyEntry?.family || 'sans-serif'; + + // Update manifest + manifest[fontFamily] = { + id: fontId, + family: family, + images: { + A: svgPath, + a: svgPath // Use same SVG for lowercase and uppercase for now + }, + svg: { + A: { + path: svgPath, + width: dimensions.width, + height: dimensions.height, + viewBox: `0 0 ${dimensions.width} ${dimensions.height}` + } + }, + fontMetrics: fontMetrics + }; + } + + // Save updated manifest + await fs.writeFile(FONT_MANIFEST_PATH, JSON.stringify(manifest, null, 2), 'utf-8'); + + console.log(`✅ Manifest updated with ${successfulResults.length} fonts`); + + } catch (error) { + console.error('❌ Error updating manifest:', error.message); + } +} + +/** + * Main function + */ +async function main() { + console.log('🚀 Generating Google Fonts SVGs\n'); + + try { + // Create necessary directories + await fs.mkdir(FONTS_DIR, { recursive: true }); + await fs.mkdir(SVGS_DIR, { recursive: true }); + await fs.mkdir(path.dirname(FONT_MANIFEST_PATH), { recursive: true }); + + // Get fonts list + console.log('📋 Fetching fonts list...'); + const fonts = await fetchGoogleFontsList(); + + if (fonts.length === 0) { + console.error('❌ No fonts found'); + process.exit(1); + } + + console.log(`📊 ${fonts.length} fonts found`); + + // Processing 300 fonts + const limitedFonts = fonts.slice(0, 300); + console.log(`🔬 Processing first ${limitedFonts.length} fonts`); + + // Process each font + const results = []; + for (let i = 0; i < limitedFonts.length; i++) { + const result = await processFont(limitedFonts[i], i, limitedFonts.length); + results.push(result); + + // Pause between requests to avoid rate limiting + if (i < limitedFonts.length - 1) { + await new Promise(resolve => setTimeout(resolve, 100)); + } + } + + // Note: Manifest will be updated in later steps when SVGs are available + + // Display final statistics + const downloaded = results.filter(r => r.status === 'downloaded').length; + const errors = results.filter(r => r.status === 'error').length; + + console.log('\n📊 Final statistics:'); + console.log(`✅ Downloaded fonts: ${downloaded}`); + console.log(`❌ Errors: ${errors}`); + console.log(`📋 Total processed: ${results.length}`); + + if (errors > 0) { + console.log('\n❌ Fonts with errors:'); + results + .filter(r => r.status === 'error') + .forEach(r => console.log(` - ${r.fontFamily}: ${r.error}`)); + } + + } catch (error) { + console.error('💥 Fatal error:', error.message); + process.exit(1); + } +} + +// Execute script if run directly +if (import.meta.url === `file://${process.argv[1]}`) { + main(); +} + +export { main, generateLetterASVG, fontNameToId }; diff --git a/app/src/content/embeds/typography/2-generate-svgs.mjs b/app/src/content/embeds/typography/2-generate-svgs.mjs new file mode 100644 index 0000000000000000000000000000000000000000..f1c9924fbe15249cd9b7a6fa60a6282fbdc940b8 --- /dev/null +++ b/app/src/content/embeds/typography/2-generate-svgs.mjs @@ -0,0 +1,265 @@ +#!/usr/bin/env node + +import fs from 'fs/promises'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import opentype from 'opentype.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Configuration +const TYPOGRAPHY_BASE = __dirname; +const GENERATED_DIR = path.join(TYPOGRAPHY_BASE, 'generated'); +const FONTS_DIR = path.join(GENERATED_DIR, 'fonts'); +const SVGS_DIR = path.join(GENERATED_DIR, 'svgs'); +const FONT_MANIFEST_PATH = path.join(GENERATED_DIR, 'data', 'font_manifest.json'); +const TYPOGRAPHY_DATA_PATH = path.join(GENERATED_DIR, 'data', 'typography_data.json'); + +/** + * Updates font manifest with new SVGs + */ +async function updateFontManifest(results) { + try { + console.log('\n📝 Updating font manifest...'); + + // Read existing manifest + let manifest = {}; + try { + const manifestData = await fs.readFile(FONT_MANIFEST_PATH, 'utf-8'); + manifest = JSON.parse(manifestData); + } catch { + // Create new manifest if none exists + } + + // Read existing typography data + let typographyData = []; + try { + const typographyDataContent = await fs.readFile(TYPOGRAPHY_DATA_PATH, 'utf-8'); + const data = JSON.parse(typographyDataContent); + typographyData = data.fonts || []; + } catch { + // Use empty array if no data exists + } + + // Update with new results + const successfulResults = results.filter(r => r.status === 'success'); + + for (const result of successfulResults) { + const { fontFamily, fontId, svgPath, dimensions, fontMetrics } = result; + + // Find corresponding typography data + const typographyEntry = typographyData.find(entry => entry.name === fontFamily); + const family = typographyEntry?.family || 'sans-serif'; + + // Update manifest + manifest[fontFamily] = { + id: fontId, + family: family, + images: { + A: svgPath, + a: svgPath // Use same SVG for lowercase and uppercase for now + }, + svg: { + A: { + path: svgPath, + width: dimensions.width, + height: dimensions.height, + viewBox: `0 0 ${dimensions.width} ${dimensions.height}` + } + }, + fontMetrics: fontMetrics + }; + } + + // Ensure data directory exists + await fs.mkdir(path.dirname(FONT_MANIFEST_PATH), { recursive: true }); + + // Save updated manifest + await fs.writeFile(FONT_MANIFEST_PATH, JSON.stringify(manifest, null, 2), 'utf-8'); + + console.log(`✅ Manifest updated with ${successfulResults.length} fonts`); + + } catch (error) { + console.error('❌ Error updating manifest:', error.message); + } +} + +/** + * Generates an SVG of letter A from a font + */ +async function generateLetterASVG(fontPath, fontFamily) { + try { + const fontBuffer = await fs.readFile(fontPath); + const font = opentype.parse(fontBuffer.buffer); + + // Get the glyph for letter 'A' + const glyph = font.charToGlyph('A'); + + if (!glyph || !glyph.path) { + throw new Error('Glyph A not found or without path'); + } + + // Uniform configuration + const SVG_SIZE = 80; // Fixed size 80x80 + const fontSize = 60; // Reduced font size to leave margins + + // Get glyph dimensions + const tempPath = glyph.getPath(0, 0, fontSize); + const bbox = tempPath.getBoundingBox(); + + // Calculate actual glyph dimensions + const glyphWidth = bbox.x2 - bbox.x1; + const glyphHeight = bbox.y2 - bbox.y1; + + // Center perfectly in 80x80 canvas + const centerX = SVG_SIZE / 2; + const centerY = SVG_SIZE / 2; + + // Position glyph to be centered + const offsetX = centerX - (bbox.x1 + glyphWidth / 2); + const offsetY = centerY - (bbox.y1 + glyphHeight / 2); + + // Generate final centered path + const adjustedPath = glyph.getPath(offsetX, offsetY, fontSize); + + // Generate SVG with fixed dimensions + const svgPathData = adjustedPath.toPathData(2); + const svg = ` + +`; + + return { + svg, + width: SVG_SIZE, + height: SVG_SIZE, + fontMetrics: { + unitsPerEm: font.unitsPerEm, + ascender: font.ascender, + descender: font.descender + } + }; + + } catch (error) { + console.error(`❌ Error generating SVG for ${fontFamily}:`, error.message); + return null; + } +} + +/** + * Converts a font name to usable ID + */ +function fontNameToId(fontName) { + return fontName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '_') + .replace(/^_|_$/g, ''); +} + +/** + * Generates SVGs for all fonts in the folder + */ +async function generateSVGsForAllFonts() { + console.log('🎨 Generating SVGs for all downloaded fonts\n'); + + try { + // Create SVG folder if necessary + await fs.mkdir(SVGS_DIR, { recursive: true }); + + // Read all TTF files + const fontFiles = await fs.readdir(FONTS_DIR); + const ttfFiles = fontFiles.filter(file => file.endsWith('.ttf')); + + if (ttfFiles.length === 0) { + console.error('❌ No TTF files found in', FONTS_DIR); + process.exit(1); + } + + console.log(`📁 Found ${ttfFiles.length} TTF files`); + + const results = []; + + for (let i = 0; i < ttfFiles.length; i++) { + const ttfFile = ttfFiles[i]; + const fontPath = path.join(FONTS_DIR, ttfFile); + + // Extract font name from filename + const fontId = ttfFile.replace('.ttf', ''); + const fontFamily = fontId.replace(/_/g, ' '); + + console.log(`\n[${i + 1}/${ttfFiles.length}] 🔄 Generating SVG for "${fontFamily}"...`); + + try { + // Generate SVG + const svgResult = await generateLetterASVG(fontPath, fontFamily); + + if (!svgResult) { + results.push({ + fontFamily, + fontId, + status: 'error', + error: 'SVG generation failed' + }); + continue; + } + + // Save SVG + const svgPath = path.join(SVGS_DIR, `${fontId}_a.svg`); + await fs.writeFile(svgPath, svgResult.svg, 'utf-8'); + + console.log(`✅ SVG generated: ${fontFamily} (${svgResult.width}x${svgResult.height})`); + + results.push({ + fontFamily, + fontId, + status: 'success', + svgPath: `/content/embeds/typography/font_svgs/${fontId}_a.svg`, + dimensions: { + width: svgResult.width, + height: svgResult.height + }, + fontMetrics: svgResult.fontMetrics + }); + + } catch (error) { + console.error(`❌ Error for ${fontFamily}:`, error.message); + results.push({ + fontFamily, + fontId, + status: 'error', + error: error.message + }); + } + } + + // Update font manifest + await updateFontManifest(results); + + // Display final statistics + const successful = results.filter(r => r.status === 'success').length; + const errors = results.filter(r => r.status === 'error').length; + + console.log('\n📊 Final statistics:'); + console.log(`✅ SVGs generated successfully: ${successful}`); + console.log(`❌ Errors: ${errors}`); + console.log(`📋 Total processed: ${results.length}`); + + if (errors > 0) { + console.log('\n❌ Fonts with errors:'); + results + .filter(r => r.status === 'error') + .forEach(r => console.log(` - ${r.fontFamily}: ${r.error}`)); + } + + } catch (error) { + console.error('💥 Fatal error:', error.message); + process.exit(1); + } +} + +// Execute script if run directly +if (import.meta.url === `file://${process.argv[1]}`) { + generateSVGsForAllFonts(); +} + +export { generateSVGsForAllFonts, generateLetterASVG }; \ No newline at end of file diff --git a/app/src/content/embeds/typography/3-generate-pngs.mjs b/app/src/content/embeds/typography/3-generate-pngs.mjs new file mode 100644 index 0000000000000000000000000000000000000000..7c3de9603f7c18412ed7f700d5d881e3e3c3a3a8 --- /dev/null +++ b/app/src/content/embeds/typography/3-generate-pngs.mjs @@ -0,0 +1,140 @@ +#!/usr/bin/env node + +import fs from 'fs/promises'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import sharp from 'sharp'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +// Configuration +const TYPOGRAPHY_BASE = __dirname; +const GENERATED_DIR = path.join(TYPOGRAPHY_BASE, 'generated'); +const SVGS_DIR = path.join(GENERATED_DIR, 'svgs'); +const PNGS_DIR = path.join(GENERATED_DIR, 'pngs'); +const PNG_SIZE = 40; // Final size 40x40 pixels + +/** + * Converts an SVG to PNG + */ +async function convertSvgToPng(svgPath, pngPath) { + try { + const svgBuffer = await fs.readFile(svgPath); + + await sharp(svgBuffer) + .resize(PNG_SIZE, PNG_SIZE, { + fit: 'contain', + background: { r: 255, g: 255, b: 255, alpha: 1 } // White background + }) + .flatten({ background: { r: 255, g: 255, b: 255 } }) // Force white background + .png() + .toFile(pngPath); + + return true; + } catch (error) { + console.error(`❌ Error during conversion ${svgPath}:`, error.message); + return false; + } +} + +/** + * Generates PNGs for all SVGs + */ +async function generatePNGsForAllSVGs() { + console.log(`🖼️ Generating ${PNG_SIZE}x${PNG_SIZE} PNGs for all SVGs\n`); + + try { + // Create PNG folder if necessary + await fs.mkdir(PNGS_DIR, { recursive: true }); + + // Read all SVG files + const svgFiles = await fs.readdir(SVGS_DIR); + const svgFilesFiltered = svgFiles.filter(file => file.endsWith('.svg')); + + if (svgFilesFiltered.length === 0) { + console.error('❌ No SVG files found in', SVGS_DIR); + process.exit(1); + } + + console.log(`📁 Found ${svgFilesFiltered.length} SVG files`); + + const results = []; + + for (let i = 0; i < svgFilesFiltered.length; i++) { + const svgFile = svgFilesFiltered[i]; + const svgPath = path.join(SVGS_DIR, svgFile); + + // Create PNG filename + const pngFile = svgFile.replace('.svg', '.png'); + const pngPath = path.join(PNGS_DIR, pngFile); + + // Extract font name from filename + const fontId = svgFile.replace('_a.svg', ''); + const fontFamily = fontId.replace(/_/g, ' '); + + console.log(`[${i + 1}/${svgFilesFiltered.length}] 🔄 Converting "${fontFamily}"...`); + + try { + const success = await convertSvgToPng(svgPath, pngPath); + + if (success) { + console.log(`✅ PNG generated: ${fontFamily} (${PNG_SIZE}x${PNG_SIZE})`); + results.push({ + fontFamily, + fontId, + status: 'success', + pngPath: `/content/embeds/typography/font_pngs/${pngFile}`, + dimensions: { + width: PNG_SIZE, + height: PNG_SIZE + } + }); + } else { + results.push({ + fontFamily, + fontId, + status: 'error', + error: 'SVG to PNG conversion failed' + }); + } + + } catch (error) { + console.error(`❌ Error for ${fontFamily}:`, error.message); + results.push({ + fontFamily, + fontId, + status: 'error', + error: error.message + }); + } + } + + // Display final statistics + const successful = results.filter(r => r.status === 'success').length; + const errors = results.filter(r => r.status === 'error').length; + + console.log('\n📊 Final statistics:'); + console.log(`✅ PNGs generated successfully: ${successful}`); + console.log(`❌ Errors: ${errors}`); + console.log(`📋 Total processed: ${results.length}`); + + if (errors > 0) { + console.log('\n❌ Fonts with errors:'); + results + .filter(r => r.status === 'error') + .forEach(r => console.log(` - ${r.fontFamily}: ${r.error}`)); + } + + } catch (error) { + console.error('💥 Fatal error:', error.message); + process.exit(1); + } +} + +// Execute script if run directly +if (import.meta.url === `file://${process.argv[1]}`) { + generatePNGsForAllSVGs(); +} + +export { generatePNGsForAllSVGs }; \ No newline at end of file diff --git a/app/src/content/embeds/typography/4-generate-umap.py b/app/src/content/embeds/typography/4-generate-umap.py new file mode 100644 index 0000000000000000000000000000000000000000..d98a6c971cbe667c9b501358baa5c7282fdc9690 --- /dev/null +++ b/app/src/content/embeds/typography/4-generate-umap.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 +""" +UMAP generator for typography fonts +Based on pixel matrices from generated PNGs +""" + +import umap +import numpy as np +import pandas as pd +import json +import os +import glob +from PIL import Image +from sklearn.preprocessing import StandardScaler +from datetime import datetime + +# Configuration +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +GENERATED_DIR = os.path.join(SCRIPT_DIR, "generated") +PNGS_DIR = os.path.join(GENERATED_DIR, "pngs") +DATA_DIR = os.path.join(GENERATED_DIR, "data") +OUTPUT_FILENAME = "typography_data.json" +FULL_OUTPUT_PATH = os.path.join(DATA_DIR, OUTPUT_FILENAME) + +# UMAP parameters +UMAP_PARAMS = { + 'n_neighbors': 15, + 'min_dist': 1.0, + 'n_components': 2, + 'metric': 'euclidean', + 'random_state': 42 +} + +def load_png_as_matrix(png_path): + """ + Loads a PNG and converts it to a normalized pixel matrix + + Returns: + numpy.array: 1D vector of 1600 dimensions (40x40 flattened) + """ + try: + # Load image in grayscale + img = Image.open(png_path).convert('L') + + # Check dimensions + if img.size != (40, 40): + print(f"⚠️ Unexpected size for {png_path}: {img.size}") + img = img.resize((40, 40)) + + # Convert to numpy array and normalize (0-255 → 0-1) + pixel_matrix = np.array(img, dtype=np.float32) / 255.0 + + # Flatten to 1D vector + pixel_vector = pixel_matrix.flatten() + + return pixel_vector + + except Exception as e: + print(f"❌ Error loading {png_path}: {e}") + return None + +def extract_font_info_from_filename(filename): + """ + Extracts font information from filename + + Args: + filename: filename (e.g., "roboto_a.png") + + Returns: + dict: font information + """ + # Remove extension and "_a" suffix + font_id = filename.replace('.png', '').replace('_a', '') + font_name = font_id.replace('_', ' ').title() + + # Simple classification based on names + category = "sans-serif" # default + + # Classification rules based on names + serif_keywords = ['times', 'garamond', 'georgia', 'serif', 'baskerville', + 'caslon', 'merriweather', 'playfair', 'lora', 'crimson', + 'spectral', 'alegreya', 'cardo', 'vollkorn', 'gentium', + 'eb garamond', 'cormorant', 'libre baskerville'] + + script_keywords = ['script', 'cursive', 'brush', 'hand', 'dancing', + 'pacifico', 'satisfy', 'allura', 'tangerine', 'caveat', + 'sacramento', 'kaushan', 'alex brush', 'marck script'] + + mono_keywords = ['mono', 'code', 'courier', 'consola', 'inconsolata', + 'fira code', 'source code', 'jetbrains', 'roboto mono', + 'space mono', 'ubuntu mono', 'pt mono'] + + display_keywords = ['display', 'black', 'ultra', 'bebas', 'anton', 'oswald', + 'staatliches', 'bangers', 'fredoka', 'righteous', + 'russo one', 'alfa slab'] + + font_lower = font_name.lower() + + if any(keyword in font_lower for keyword in serif_keywords): + category = "serif" + elif any(keyword in font_lower for keyword in script_keywords): + category = "handwriting" + elif any(keyword in font_lower for keyword in mono_keywords): + category = "monospace" + elif any(keyword in font_lower for keyword in display_keywords): + category = "display" + + # Générer l'URL Google Fonts (utiliser le nom avec majuscules) + google_fonts_url = f"https://fonts.google.com/specimen/{font_name.replace(' ', '+')}" + + return { + "name": font_name, + "id": font_id, + "family": category, + "google_fonts_url": google_fonts_url + } + +def load_all_font_data(): + """ + Loads all font data from PNGs + + Returns: + tuple: (font_data_list, pixel_matrices) + """ + print("🔄 Loading font data from PNGs...") + + # Create data folder if necessary + os.makedirs(DATA_DIR, exist_ok=True) + + # Find all PNG files + png_pattern = os.path.join(PNGS_DIR, "*_a.png") + png_files = glob.glob(png_pattern) + + if not png_files: + raise FileNotFoundError(f"No PNG files found in {PNGS_DIR}") + + print(f"📁 Found {len(png_files)} PNG files") + + font_data_list = [] + pixel_matrices = [] + + for i, png_path in enumerate(png_files): + filename = os.path.basename(png_path) + + # Extract font info + font_info = extract_font_info_from_filename(filename) + + # Load pixel matrix + pixel_matrix = load_png_as_matrix(png_path) + + if pixel_matrix is not None: + font_data_list.append(font_info) + pixel_matrices.append(pixel_matrix) + + if (i + 1) % 50 == 0: + print(f"⚡ Processed {i + 1}/{len(png_files)} fonts...") + + print(f"✅ Loaded {len(font_data_list)} fonts successfully") + + # Convert to numpy array + pixel_matrices = np.array(pixel_matrices) + print(f"📊 Final matrix: {pixel_matrices.shape} ({pixel_matrices.shape[0]} fonts × {pixel_matrices.shape[1]} pixels)") + + return font_data_list, pixel_matrices + +def generate_umap_embedding(pixel_matrices): + """ + Generates UMAP embeddings from pixel matrices + + Args: + pixel_matrices: numpy array (n_fonts, 1600) + + Returns: + numpy.array: 2D UMAP coordinates + """ + print("🔄 Generating UMAP embeddings...") + + # Normalize data (important for UMAP) + print("📊 Normalizing data...") + scaler = StandardScaler() + normalized_data = scaler.fit_transform(pixel_matrices) + + # Apply UMAP + print(f"🗺️ Applying UMAP with parameters: {UMAP_PARAMS}") + reducer = umap.UMAP(**UMAP_PARAMS) + embedding = reducer.fit_transform(normalized_data) + + print(f"✅ UMAP completed - Embedding shape: {embedding.shape}") + print(f"📊 X range: [{embedding[:, 0].min():.2f}, {embedding[:, 0].max():.2f}]") + print(f"📊 Y range: [{embedding[:, 1].min():.2f}, {embedding[:, 1].max():.2f}]") + + return embedding + +def save_typography_data(font_data_list, embedding): + """ + Saves final data in JSON format + """ + print("💾 Saving data...") + + # Combine font data and UMAP coordinates + final_data = [] + for i, font_info in enumerate(font_data_list): + font_data = { + **font_info, + "x": float(embedding[i, 0]), + "y": float(embedding[i, 1]) + } + final_data.append(font_data) + + # Metadata + metadata = { + "generated_at": datetime.now().isoformat(), + "method": "umap_from_png_pixels", + "total_fonts": len(final_data), + "umap_params": UMAP_PARAMS, + "data_source": "PNG pixel matrices (40x40)" + } + + # Final structure + output_data = { + "metadata": metadata, + "fonts": final_data + } + + # Save + with open(FULL_OUTPUT_PATH, 'w', encoding='utf-8') as f: + json.dump(output_data, f, indent=2, ensure_ascii=False) + + print(f"✅ Data saved to {FULL_OUTPUT_PATH}") + + # Statistics by category + categories = {} + for font in final_data: + cat = font['family'] + categories[cat] = categories.get(cat, 0) + 1 + + print("\n📊 Distribution by category:") + for cat, count in sorted(categories.items()): + print(f" {cat}: {count} fonts") + +def main(): + """Main function""" + print("🎨 UMAP generation for typography from pixel matrices\n") + + try: + # 1. Load font data + font_data_list, pixel_matrices = load_all_font_data() + + # 2. Generate UMAP embeddings + embedding = generate_umap_embedding(pixel_matrices) + + # 3. Save results + save_typography_data(font_data_list, embedding) + + print("\n🎉 UMAP generation completed successfully!") + + except Exception as e: + print(f"💥 Fatal error: {e}") + raise + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/app/src/content/embeds/typography/5-generate-sprite.mjs b/app/src/content/embeds/typography/5-generate-sprite.mjs new file mode 100644 index 0000000000000000000000000000000000000000..fd2426db36a290174a3a570015bc3d027253422c --- /dev/null +++ b/app/src/content/embeds/typography/5-generate-sprite.mjs @@ -0,0 +1,108 @@ +#!/usr/bin/env node + +import fs from 'fs/promises'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +// Configuration +const TYPOGRAPHY_BASE = __dirname; +const GENERATED_DIR = path.join(TYPOGRAPHY_BASE, 'generated'); +const SVGS_DIR = path.join(GENERATED_DIR, 'svgs'); +const DATA_DIR = path.join(GENERATED_DIR, 'data'); +const SPRITES_DIR = path.join(GENERATED_DIR, 'sprites'); +const OUTPUT_SPRITE = path.join(SPRITES_DIR, 'font-sprite.svg'); + +async function generateSvgSprite() { + console.log('🎨 Generating SVG sprite...'); + + try { + // Read all SVG files + const files = await fs.readdir(SVGS_DIR); + const svgFiles = files.filter(file => file.endsWith('.svg')); + + console.log(`📁 Found ${svgFiles.length} SVG files`); + + let sprites = []; + let processedCount = 0; + + // Process each SVG + for (const file of svgFiles) { + try { + const filePath = path.join(SVGS_DIR, file); + const content = await fs.readFile(filePath, 'utf-8'); + + // Extract SVG content (without tags) + const match = content.match(/]*>(.*?)<\/svg>/s); + if (!match) continue; + + const innerContent = match[1].trim(); + if (!innerContent) continue; + + // Create symbol ID from filename + const symbolId = file.replace('.svg', ''); + + // Extract viewBox if present + const viewBoxMatch = content.match(/viewBox=["']([^"']+)["']/); + const viewBox = viewBoxMatch ? viewBoxMatch[1] : '0 0 80 80'; + + // Create symbol + sprites.push(` + ${innerContent} + `); + + processedCount++; + + if (processedCount % 100 === 0) { + console.log(`⚡ Processed ${processedCount}/${svgFiles.length} SVGs...`); + } + + } catch (error) { + console.warn(`⚠️ Error with ${file}:`, error.message); + } + } + + // Create final sprite + const spriteContent = ` + + +${sprites.join('\n')} + +`; + + // Create output folder if necessary + await fs.mkdir(path.dirname(OUTPUT_SPRITE), { recursive: true }); + + // Write sprite file + await fs.writeFile(OUTPUT_SPRITE, spriteContent, 'utf-8'); + + console.log(`✅ SVG sprite generated with ${sprites.length} symbols`); + console.log(`📍 File: ${OUTPUT_SPRITE}`); + console.log(`📊 Size: ${(spriteContent.length / 1024).toFixed(1)} KB`); + + // Also generate mapping file for easier usage + const mapping = {}; + svgFiles.forEach(file => { + const fontName = file.replace('_a.svg', '').replace(/_/g, ' '); + const symbolId = file.replace('.svg', ''); + mapping[fontName] = symbolId; + }); + + const mappingFile = path.join(DATA_DIR, 'font-sprite-mapping.json'); + await fs.writeFile(mappingFile, JSON.stringify(mapping, null, 2)); + + console.log(`🗺️ Mapping generated: ${mappingFile}`); + + } catch (error) { + console.error('❌ Error during generation:', error); + process.exit(1); + } +} + +// Execute script +if (import.meta.url === `file://${process.argv[1]}`) { + generateSvgSprite(); +} + +export { generateSvgSprite }; diff --git a/app/src/content/embeds/typography/auto-continue-pipeline.sh b/app/src/content/embeds/typography/auto-continue-pipeline.sh new file mode 100755 index 0000000000000000000000000000000000000000..15f8c8190925792a89d686d47b9307a9957514d7 --- /dev/null +++ b/app/src/content/embeds/typography/auto-continue-pipeline.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# Script to monitor and automatically continue the pipeline + +echo "🔍 Pipeline monitoring in progress..." + +# Wait for step 1 to complete (check for presence of 300 fonts) +echo "⏳ Waiting for fonts download to complete..." + +while true; do + if [ -d "generated/fonts" ]; then + font_count=$(ls generated/fonts/*.ttf 2>/dev/null | wc -l) + echo "📈 Fonts downloaded: $font_count/300" + + if [ "$font_count" -ge 295 ]; then # We accept 295+ in case some fail + echo "✅ Download completed! Launching next steps..." + break + fi + else + echo "📁 generated/fonts directory not yet created..." + fi + + sleep 5 +done + +# Step 2: Generate SVGs +echo "" +echo "🎨 Step 2: SVG Generation..." +node 2-generate-svgs.mjs + +if [ $? -eq 0 ]; then + echo "✅ Step 2 completed successfully" +else + echo "❌ Step 2 Error" + exit 1 +fi + +# Step 3: Generate PNGs +echo "" +echo "🖼️ Step 3: Converting to PNGs..." +node 3-generate-pngs.mjs + +if [ $? -eq 0 ]; then + echo "✅ Step 3 completed successfully" +else + echo "❌ Step 3 Error" + exit 1 +fi + +# Step 4: Generate UMAP +echo "" +echo "🗺️ Step 4: UMAP Generation..." +poetry run python 4-generate-umap.py + +if [ $? -eq 0 ]; then + echo "✅ Step 4 completed successfully" +else + echo "❌ Step 4 Error" + exit 1 +fi + +# Step 5: Generate Sprite +echo "" +echo "🎯 Step 5: Sprite Generation..." +node 5-generate-sprite.mjs + +if [ $? -eq 0 ]; then + echo "✅ Step 5 completed successfully" + echo "" + echo "🎉 Complete pipeline finished successfully!" + + # Display final statistics + echo "" + echo "📊 Final Results:" + echo "📁 Fonts TTF: $(ls generated/fonts/*.ttf 2>/dev/null | wc -l)" + echo "🎨 SVGs: $(ls generated/svgs/*.svg 2>/dev/null | wc -l)" + echo "🖼️ PNGs: $(ls generated/pngs/*.png 2>/dev/null | wc -l)" + echo "📄 Data files:" + ls -la generated/data/ 2>/dev/null + + # Check manifest + if [ -f "generated/data/font_manifest.json" ]; then + manifest_count=$(jq 'keys | length' generated/data/font_manifest.json 2>/dev/null) + echo "📝 Fonts in manifest: $manifest_count" + fi + +else + echo "❌ Step 5 Error" + exit 1 +fi \ No newline at end of file diff --git a/app/src/content/embeds/typography/poetry.lock b/app/src/content/embeds/typography/poetry.lock new file mode 100644 index 0000000000000000000000000000000000000000..4e559eb06d969b2ef69fd380e266a1770f6f3c08 --- /dev/null +++ b/app/src/content/embeds/typography/poetry.lock @@ -0,0 +1,654 @@ +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +markers = "platform_system == \"Windows\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "joblib" +version = "1.5.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241"}, + {file = "joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55"}, +] + +[[package]] +name = "llvmlite" +version = "0.45.0" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "llvmlite-0.45.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:3018e5f8547c8b05e736281d5bd23ff86b88ab94697db2beeaa6f3bce9cfc721"}, + {file = "llvmlite-0.45.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca7b15dc4422551f1b5fb1dbd734d5e8a9416028890d31d4e23a04fbc8a975c4"}, + {file = "llvmlite-0.45.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a9c7343bec403a79248859df75c7945768de70bf547eac8c1cc8b8840e0336ba"}, + {file = "llvmlite-0.45.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56713a25bf81081fc818aa36cbffb70533b3c23291ce0efc17ac8a3b684b8be3"}, + {file = "llvmlite-0.45.0-cp310-cp310-win_amd64.whl", hash = "sha256:849ba7de7153d8d92bc66577bb951c9baf8d9f67f2521c4f39c78718d471362e"}, + {file = "llvmlite-0.45.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:9b1b37e00b553e9420d9a2e327e84c5ac65a5690dcacf7fc153014780d97532a"}, + {file = "llvmlite-0.45.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cd039b8da5514db2729b7c9ae7526cae8da748a540fa3ab721b50c54651d2362"}, + {file = "llvmlite-0.45.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c6815d0d3f96de34491d3dc192e11e933e3448ceff0b58572a53f39795996e01"}, + {file = "llvmlite-0.45.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba79cc2cbdd0f61632ca8e9235fef3657a8aacd636d5775cd13807ceb8265f63"}, + {file = "llvmlite-0.45.0-cp311-cp311-win_amd64.whl", hash = "sha256:6188da8e9e3906b167fb64bc84a05e6bf98095d982f45f323bed5def2ba7db1c"}, + {file = "llvmlite-0.45.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:3928119253849e7c9aad4f881feb3e886370bb7ac6eccbc728b35a1be89064cc"}, + {file = "llvmlite-0.45.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3e9b5dad694edb9e43904ede037458ee73a18b4e2f227e44fc0f808aceab824"}, + {file = "llvmlite-0.45.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4955635f316e3ffc0271ee7a3da586ae92cd3e70709b6cd59df641e980636d4c"}, + {file = "llvmlite-0.45.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e7497f1b75d741e568bf4a2dfccd5c702d6b5f3d232dd4a59ed851a82e587bd"}, + {file = "llvmlite-0.45.0-cp312-cp312-win_amd64.whl", hash = "sha256:6404f5363986efbe1c7c1afd19da495534e46180466d593ace5a5c042b2f3f94"}, + {file = "llvmlite-0.45.0-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:f719f98e4f3a6292b1a6495500b2cf668d3604907499c483b326da5ce2ff9f01"}, + {file = "llvmlite-0.45.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4ffa899f7584ef48f1037308d92cb19460a0afb834aa1fe9db9d3e52d0e81a79"}, + {file = "llvmlite-0.45.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c12fde908967e464b265554143c030ba4dcc2b981a815582d7708a30295018e"}, + {file = "llvmlite-0.45.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83567cbbf598eb57f108222dfc3dfee065c20a2aa004391360949f2e8ff2b8b4"}, + {file = "llvmlite-0.45.0-cp313-cp313-win_amd64.whl", hash = "sha256:f68890ceb662e874933103e91e239389ff7275c4befba8e43ccd46ae3231b89e"}, + {file = "llvmlite-0.45.0.tar.gz", hash = "sha256:ceb0bcd20da949178bd7ab78af8de73e9f3c483ac46b5bef39f06a4862aa8336"}, +] + +[[package]] +name = "numba" +version = "0.62.0" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "numba-0.62.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:3e7eaff7ce35799de4dda09a4cfcf1bb204ad59be5fa29a1efc080c0a72eb6d6"}, + {file = "numba-0.62.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a7694c45ddfe5c9a26d05cd2bf378e214ae2d5332601a3c89c94207eb4661166"}, + {file = "numba-0.62.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2f07c6e67e8f54dba62a46a3b72294c5f4333ff703eb8966576ef731cc8ecd7"}, + {file = "numba-0.62.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f77fadaa6592d2a6b9c35bcddc710b22dceca0af9a7037dbc61ff209eaddfa8"}, + {file = "numba-0.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:77050a79f6bc19324c2f6f456c074a49d3de35c8124c91668054e9d62243ac99"}, + {file = "numba-0.62.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1370708a54281e1dd3e4b73f423f88d3b34b64cf3f5fa0e460a1fbe6bd4e0f3f"}, + {file = "numba-0.62.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6bd7032d6c1e771967fc1d07a499bb10ce1639662451fc0a86089fa8efc420e7"}, + {file = "numba-0.62.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:87cdc476ea1b2feefb7f893a648be2f1e7a04f671f355ac9bbeb007eaf039f8c"}, + {file = "numba-0.62.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:144a57e504a5423acfc91fcd3be4e6481cb0667ce0bcc6cd3e8bd43a735b58a4"}, + {file = "numba-0.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:499b00e0bd95c83fedf1cbf349b7132a432a90292cbe2014eeaf482ce7c3b9f8"}, + {file = "numba-0.62.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:82edb589c9607ec2dbe0b2d34793d8c5104daf766277acc49ad7e179f8634fd2"}, + {file = "numba-0.62.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:469e042750d5a6aa6847dc89d64de5f0bfaf2208b6d442e4634de3318b7043de"}, + {file = "numba-0.62.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ad2dc2b3583f8f24f35c8ade7e215c44590c9aa757ccba640dd293297cb15bb"}, + {file = "numba-0.62.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0266998a842074fc91bfc406dd91c8ee12c196ea834375af6174f62647ffd9b1"}, + {file = "numba-0.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbc84e030548a5aad74971eb1a579f69edc7da961d89ef09a5ee1fe01c207795"}, + {file = "numba-0.62.0-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:07e76ac7bcd47156a758df52e9752fdfb94ff5f80b78c4710cabc568d8d3d6ad"}, + {file = "numba-0.62.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:a972689dad64a7047f555d93ce829fe05ca2519ad0cf7af0071a64145c571039"}, + {file = "numba-0.62.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f789b1f2997fc34b1b88fcc4481886dcd44afcffbd3e28affedce54aec7fdcc1"}, + {file = "numba-0.62.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:516525981f19f36d3a0bada0fb7479cf0bf925b5e389d03aac87f3758c5cfb9e"}, + {file = "numba-0.62.0-cp313-cp313-win_amd64.whl", hash = "sha256:591a9c485904f219a129b0493f89d27de24286fb66dd5a577b11edc62fc78db4"}, + {file = "numba-0.62.0.tar.gz", hash = "sha256:2afcc7899dc93fefecbb274a19c592170bc2dbfae02b00f83e305332a9857a5a"}, +] + +[package.dependencies] +llvmlite = "==0.45.*" +numpy = ">=1.22,<2.4" + +[[package]] +name = "numpy" +version = "2.3.3" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +groups = ["main"] +files = [ + {file = "numpy-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ffc4f5caba7dfcbe944ed674b7eef683c7e94874046454bb79ed7ee0236f59d"}, + {file = "numpy-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7e946c7170858a0295f79a60214424caac2ffdb0063d4d79cb681f9aa0aa569"}, + {file = "numpy-2.3.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:cd4260f64bc794c3390a63bf0728220dd1a68170c169088a1e0dfa2fde1be12f"}, + {file = "numpy-2.3.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:f0ddb4b96a87b6728df9362135e764eac3cfa674499943ebc44ce96c478ab125"}, + {file = "numpy-2.3.3-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:afd07d377f478344ec6ca2b8d4ca08ae8bd44706763d1efb56397de606393f48"}, + {file = "numpy-2.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc92a5dedcc53857249ca51ef29f5e5f2f8c513e22cfb90faeb20343b8c6f7a6"}, + {file = "numpy-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7af05ed4dc19f308e1d9fc759f36f21921eb7bbfc82843eeec6b2a2863a0aefa"}, + {file = "numpy-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:433bf137e338677cebdd5beac0199ac84712ad9d630b74eceeb759eaa45ddf30"}, + {file = "numpy-2.3.3-cp311-cp311-win32.whl", hash = "sha256:eb63d443d7b4ffd1e873f8155260d7f58e7e4b095961b01c91062935c2491e57"}, + {file = "numpy-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:ec9d249840f6a565f58d8f913bccac2444235025bbb13e9a4681783572ee3caa"}, + {file = "numpy-2.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:74c2a948d02f88c11a3c075d9733f1ae67d97c6bdb97f2bb542f980458b257e7"}, + {file = "numpy-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cfdd09f9c84a1a934cde1eec2267f0a43a7cd44b2cca4ff95b7c0d14d144b0bf"}, + {file = "numpy-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb32e3cf0f762aee47ad1ddc6672988f7f27045b0783c887190545baba73aa25"}, + {file = "numpy-2.3.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:396b254daeb0a57b1fe0ecb5e3cff6fa79a380fa97c8f7781a6d08cd429418fe"}, + {file = "numpy-2.3.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:067e3d7159a5d8f8a0b46ee11148fc35ca9b21f61e3c49fbd0a027450e65a33b"}, + {file = "numpy-2.3.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c02d0629d25d426585fb2e45a66154081b9fa677bc92a881ff1d216bc9919a8"}, + {file = "numpy-2.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9192da52b9745f7f0766531dcfa978b7763916f158bb63bdb8a1eca0068ab20"}, + {file = "numpy-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cd7de500a5b66319db419dc3c345244404a164beae0d0937283b907d8152e6ea"}, + {file = "numpy-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:93d4962d8f82af58f0b2eb85daaf1b3ca23fe0a85d0be8f1f2b7bb46034e56d7"}, + {file = "numpy-2.3.3-cp312-cp312-win32.whl", hash = "sha256:5534ed6b92f9b7dca6c0a19d6df12d41c68b991cef051d108f6dbff3babc4ebf"}, + {file = "numpy-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:497d7cad08e7092dba36e3d296fe4c97708c93daf26643a1ae4b03f6294d30eb"}, + {file = "numpy-2.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:ca0309a18d4dfea6fc6262a66d06c26cfe4640c3926ceec90e57791a82b6eee5"}, + {file = "numpy-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f5415fb78995644253370985342cd03572ef8620b934da27d77377a2285955bf"}, + {file = "numpy-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d00de139a3324e26ed5b95870ce63be7ec7352171bc69a4cf1f157a48e3eb6b7"}, + {file = "numpy-2.3.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:9dc13c6a5829610cc07422bc74d3ac083bd8323f14e2827d992f9e52e22cd6a6"}, + {file = "numpy-2.3.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:d79715d95f1894771eb4e60fb23f065663b2298f7d22945d66877aadf33d00c7"}, + {file = "numpy-2.3.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:952cfd0748514ea7c3afc729a0fc639e61655ce4c55ab9acfab14bda4f402b4c"}, + {file = "numpy-2.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5b83648633d46f77039c29078751f80da65aa64d5622a3cd62aaef9d835b6c93"}, + {file = "numpy-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b001bae8cea1c7dfdb2ae2b017ed0a6f2102d7a70059df1e338e307a4c78a8ae"}, + {file = "numpy-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e9aced64054739037d42fb84c54dd38b81ee238816c948c8f3ed134665dcd86"}, + {file = "numpy-2.3.3-cp313-cp313-win32.whl", hash = "sha256:9591e1221db3f37751e6442850429b3aabf7026d3b05542d102944ca7f00c8a8"}, + {file = "numpy-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f0dadeb302887f07431910f67a14d57209ed91130be0adea2f9793f1a4f817cf"}, + {file = "numpy-2.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:3c7cf302ac6e0b76a64c4aecf1a09e51abd9b01fc7feee80f6c43e3ab1b1dbc5"}, + {file = "numpy-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:eda59e44957d272846bb407aad19f89dc6f58fecf3504bd144f4c5cf81a7eacc"}, + {file = "numpy-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:823d04112bc85ef5c4fda73ba24e6096c8f869931405a80aa8b0e604510a26bc"}, + {file = "numpy-2.3.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:40051003e03db4041aa325da2a0971ba41cf65714e65d296397cc0e32de6018b"}, + {file = "numpy-2.3.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6ee9086235dd6ab7ae75aba5662f582a81ced49f0f1c6de4260a78d8f2d91a19"}, + {file = "numpy-2.3.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94fcaa68757c3e2e668ddadeaa86ab05499a70725811e582b6a9858dd472fb30"}, + {file = "numpy-2.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da1a74b90e7483d6ce5244053399a614b1d6b7bc30a60d2f570e5071f8959d3e"}, + {file = "numpy-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2990adf06d1ecee3b3dcbb4977dfab6e9f09807598d647f04d385d29e7a3c3d3"}, + {file = "numpy-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ed635ff692483b8e3f0fcaa8e7eb8a75ee71aa6d975388224f70821421800cea"}, + {file = "numpy-2.3.3-cp313-cp313t-win32.whl", hash = "sha256:a333b4ed33d8dc2b373cc955ca57babc00cd6f9009991d9edc5ddbc1bac36bcd"}, + {file = "numpy-2.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:4384a169c4d8f97195980815d6fcad04933a7e1ab3b530921c3fef7a1c63426d"}, + {file = "numpy-2.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:75370986cc0bc66f4ce5110ad35aae6d182cc4ce6433c40ad151f53690130bf1"}, + {file = "numpy-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cd052f1fa6a78dee696b58a914b7229ecfa41f0a6d96dc663c1220a55e137593"}, + {file = "numpy-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:414a97499480067d305fcac9716c29cf4d0d76db6ebf0bf3cbce666677f12652"}, + {file = "numpy-2.3.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:50a5fe69f135f88a2be9b6ca0481a68a136f6febe1916e4920e12f1a34e708a7"}, + {file = "numpy-2.3.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:b912f2ed2b67a129e6a601e9d93d4fa37bef67e54cac442a2f588a54afe5c67a"}, + {file = "numpy-2.3.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9e318ee0596d76d4cb3d78535dc005fa60e5ea348cd131a51e99d0bdbe0b54fe"}, + {file = "numpy-2.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce020080e4a52426202bdb6f7691c65bb55e49f261f31a8f506c9f6bc7450421"}, + {file = "numpy-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e6687dc183aa55dae4a705b35f9c0f8cb178bcaa2f029b241ac5356221d5c021"}, + {file = "numpy-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d8f3b1080782469fdc1718c4ed1d22549b5fb12af0d57d35e992158a772a37cf"}, + {file = "numpy-2.3.3-cp314-cp314-win32.whl", hash = "sha256:cb248499b0bc3be66ebd6578b83e5acacf1d6cb2a77f2248ce0e40fbec5a76d0"}, + {file = "numpy-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:691808c2b26b0f002a032c73255d0bd89751425f379f7bcd22d140db593a96e8"}, + {file = "numpy-2.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:9ad12e976ca7b10f1774b03615a2a4bab8addce37ecc77394d8e986927dc0dfe"}, + {file = "numpy-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9cc48e09feb11e1db00b320e9d30a4151f7369afb96bd0e48d942d09da3a0d00"}, + {file = "numpy-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:901bf6123879b7f251d3631967fd574690734236075082078e0571977c6a8e6a"}, + {file = "numpy-2.3.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:7f025652034199c301049296b59fa7d52c7e625017cae4c75d8662e377bf487d"}, + {file = "numpy-2.3.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:533ca5f6d325c80b6007d4d7fb1984c303553534191024ec6a524a4c92a5935a"}, + {file = "numpy-2.3.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0edd58682a399824633b66885d699d7de982800053acf20be1eaa46d92009c54"}, + {file = "numpy-2.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:367ad5d8fbec5d9296d18478804a530f1191e24ab4d75ab408346ae88045d25e"}, + {file = "numpy-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8f6ac61a217437946a1fa48d24c47c91a0c4f725237871117dea264982128097"}, + {file = "numpy-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:179a42101b845a816d464b6fe9a845dfaf308fdfc7925387195570789bb2c970"}, + {file = "numpy-2.3.3-cp314-cp314t-win32.whl", hash = "sha256:1250c5d3d2562ec4174bce2e3a1523041595f9b651065e4a4473f5f48a6bc8a5"}, + {file = "numpy-2.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:b37a0b2e5935409daebe82c1e42274d30d9dd355852529eab91dab8dcca7419f"}, + {file = "numpy-2.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:78c9f6560dc7e6b3990e32df7ea1a50bbd0e2a111e05209963f5ddcab7073b0b"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1e02c7159791cd481e1e6d5ddd766b62a4d5acf8df4d4d1afe35ee9c5c33a41e"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:dca2d0fc80b3893ae72197b39f69d55a3cd8b17ea1b50aa4c62de82419936150"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:99683cbe0658f8271b333a1b1b4bb3173750ad59c0c61f5bbdc5b318918fffe3"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d9d537a39cc9de668e5cd0e25affb17aec17b577c6b3ae8a3d866b479fbe88d0"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8596ba2f8af5f93b01d97563832686d20206d303024777f6dfc2e7c7c3f1850e"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1ec5615b05369925bd1125f27df33f3b6c8bc10d788d5999ecd8769a1fa04db"}, + {file = "numpy-2.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:2e267c7da5bf7309670523896df97f93f6e469fb931161f483cd6882b3b1a5dc"}, + {file = "numpy-2.3.3.tar.gz", hash = "sha256:ddc7c39727ba62b80dfdbedf400d1c10ddfa8eefbd7ec8dcb118be8b56d31029"}, +] + +[[package]] +name = "pandas" +version = "2.3.2" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pandas-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52bc29a946304c360561974c6542d1dd628ddafa69134a7131fdfd6a5d7a1a35"}, + {file = "pandas-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:220cc5c35ffaa764dd5bb17cf42df283b5cb7fdf49e10a7b053a06c9cb48ee2b"}, + {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c05e15111221384019897df20c6fe893b2f697d03c811ee67ec9e0bb5a3424"}, + {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc03acc273c5515ab69f898df99d9d4f12c4d70dbfc24c3acc6203751d0804cf"}, + {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d25c20a03e8870f6339bcf67281b946bd20b86f1a544ebbebb87e66a8d642cba"}, + {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21bb612d148bb5860b7eb2c10faacf1a810799245afd342cf297d7551513fbb6"}, + {file = "pandas-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:b62d586eb25cb8cb70a5746a378fc3194cb7f11ea77170d59f889f5dfe3cec7a"}, + {file = "pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743"}, + {file = "pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4"}, + {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2"}, + {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e"}, + {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea"}, + {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372"}, + {file = "pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f"}, + {file = "pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9"}, + {file = "pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b"}, + {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175"}, + {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9"}, + {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4"}, + {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811"}, + {file = "pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae"}, + {file = "pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e"}, + {file = "pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9"}, + {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a"}, + {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b"}, + {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6"}, + {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a"}, + {file = "pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b"}, + {file = "pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57"}, + {file = "pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2"}, + {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9"}, + {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2"}, + {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012"}, + {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370"}, + {file = "pandas-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88080a0ff8a55eac9c84e3ff3c7665b3b5476c6fbc484775ca1910ce1c3e0b87"}, + {file = "pandas-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4a558c7620340a0931828d8065688b3cc5b4c8eb674bcaf33d18ff4a6870b4a"}, + {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45178cf09d1858a1509dc73ec261bf5b25a625a389b65be2e47b559905f0ab6a"}, + {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77cefe00e1b210f9c76c697fedd8fdb8d3dd86563e9c8adc9fa72b90f5e9e4c2"}, + {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:13bd629c653856f00c53dc495191baa59bcafbbf54860a46ecc50d3a88421a96"}, + {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:36d627906fd44b5fd63c943264e11e96e923f8de77d6016dc2f667b9ad193438"}, + {file = "pandas-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a9d7ec92d71a420185dec44909c32e9a362248c4ae2238234b76d5be37f208cc"}, + {file = "pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb"}, +] + +[package.dependencies] +numpy = {version = ">=1.26.0", markers = "python_version >= \"3.12\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pillow" +version = "11.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, + {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e"}, + {file = "pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6"}, + {file = "pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f"}, + {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94"}, + {file = "pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0"}, + {file = "pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac"}, + {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d"}, + {file = "pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149"}, + {file = "pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d"}, + {file = "pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b"}, + {file = "pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3"}, + {file = "pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51"}, + {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c"}, + {file = "pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788"}, + {file = "pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31"}, + {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a"}, + {file = "pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214"}, + {file = "pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635"}, + {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b"}, + {file = "pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12"}, + {file = "pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db"}, + {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d"}, + {file = "pillow-11.3.0-cp39-cp39-win32.whl", hash = "sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71"}, + {file = "pillow-11.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada"}, + {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, + {file = "pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +test-arrow = ["pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] +typing = ["typing-extensions ; python_version < \"3.10\""] +xmp = ["defusedxml"] + +[[package]] +name = "pynndescent" +version = "0.5.13" +description = "Nearest Neighbor Descent" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pynndescent-0.5.13-py3-none-any.whl", hash = "sha256:69aabb8f394bc631b6ac475a1c7f3994c54adf3f51cd63b2730fefba5771b949"}, + {file = "pynndescent-0.5.13.tar.gz", hash = "sha256:d74254c0ee0a1eeec84597d5fe89fedcf778593eeabe32c2f97412934a9800fb"}, +] + +[package.dependencies] +joblib = ">=0.11" +llvmlite = ">=0.30" +numba = ">=0.51.2" +scikit-learn = ">=0.18" +scipy = ">=1.0" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2025.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, +] + +[[package]] +name = "scikit-learn" +version = "1.7.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b33579c10a3081d076ab403df4a4190da4f4432d443521674637677dc91e61f"}, + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:36749fb62b3d961b1ce4fedf08fa57a1986cd409eff2d783bca5d4b9b5fce51c"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7a58814265dfc52b3295b1900cfb5701589d30a8bb026c7540f1e9d3499d5ec8"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a847fea807e278f821a0406ca01e387f97653e284ecbd9750e3ee7c90347f18"}, + {file = "scikit_learn-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:ca250e6836d10e6f402436d6463d6c0e4d8e0234cfb6a9a47835bd392b852ce5"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7509693451651cd7361d30ce4e86a1347493554f172b1c72a39300fa2aea79e"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:0486c8f827c2e7b64837c731c8feff72c0bd2b998067a8a9cbc10643c31f0fe1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89877e19a80c7b11a2891a27c21c4894fb18e2c2e077815bcade10d34287b20d"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8da8bf89d4d79aaec192d2bda62f9b56ae4e5b4ef93b6a56b5de4977e375c1f1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:9b7ed8d58725030568523e937c43e56bc01cadb478fc43c042a9aca1dacb3ba1"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8d91a97fa2b706943822398ab943cde71858a50245e31bc71dba62aab1d60a96"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:acbc0f5fd2edd3432a22c69bed78e837c70cf896cd7993d71d51ba6708507476"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e5bf3d930aee75a65478df91ac1225ff89cd28e9ac7bd1196853a9229b6adb0b"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4d6e9deed1a47aca9fe2f267ab8e8fe82ee20b4526b2c0cd9e135cea10feb44"}, + {file = "scikit_learn-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:6088aa475f0785e01bcf8529f55280a3d7d298679f50c0bb70a2364a82d0b290"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b7dacaa05e5d76759fb071558a8b5130f4845166d88654a0f9bdf3eb57851b7"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:abebbd61ad9e1deed54cca45caea8ad5f79e1b93173dece40bb8e0c658dbe6fe"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:502c18e39849c0ea1a5d681af1dbcf15f6cce601aebb657aabbfe84133c1907f"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a4c328a71785382fe3fe676a9ecf2c86189249beff90bf85e22bdb7efaf9ae0"}, + {file = "scikit_learn-1.7.2-cp313-cp313-win_amd64.whl", hash = "sha256:63a9afd6f7b229aad94618c01c252ce9e6fa97918c5ca19c9a17a087d819440c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9acb6c5e867447b4e1390930e3944a005e2cb115922e693c08a323421a6966e8"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:2a41e2a0ef45063e654152ec9d8bcfc39f7afce35b08902bfe290c2498a67a6a"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98335fb98509b73385b3ab2bd0639b1f610541d3988ee675c670371d6a87aa7c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:191e5550980d45449126e23ed1d5e9e24b2c68329ee1f691a3987476e115e09c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-win_amd64.whl", hash = "sha256:57dc4deb1d3762c75d685507fbd0bc17160144b2f2ba4ccea5dc285ab0d0e973"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fa8f63940e29c82d1e67a45d5297bdebbcb585f5a5a50c4914cc2e852ab77f33"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f95dc55b7902b91331fa4e5845dd5bde0580c9cd9612b1b2791b7e80c3d32615"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9656e4a53e54578ad10a434dc1f993330568cfee176dff07112b8785fb413106"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96dc05a854add0e50d3f47a1ef21a10a595016da5b007c7d9cd9d0bffd1fcc61"}, + {file = "scikit_learn-1.7.2-cp314-cp314-win_amd64.whl", hash = "sha256:bb24510ed3f9f61476181e4db51ce801e2ba37541def12dc9333b946fc7a9cf8"}, + {file = "scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.22.0" +scipy = ">=1.8.0" +threadpoolctl = ">=3.1.0" + +[package.extras] +benchmark = ["matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "pandas (>=1.4.0)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.17.1)", "numpy (>=1.22.0)", "scipy (>=1.8.0)"] +docs = ["Pillow (>=8.4.0)", "matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] +examples = ["matplotlib (>=3.5.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.22.0)", "scipy (>=1.8.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==3.0.1)"] +tests = ["matplotlib (>=3.5.0)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.2.1)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)", "scikit-image (>=0.19.0)"] + +[[package]] +name = "scipy" +version = "1.16.2" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.11" +groups = ["main"] +files = [ + {file = "scipy-1.16.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6ab88ea43a57da1af33292ebd04b417e8e2eaf9d5aa05700be8d6e1b6501cd92"}, + {file = "scipy-1.16.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c95e96c7305c96ede73a7389f46ccd6c659c4da5ef1b2789466baeaed3622b6e"}, + {file = "scipy-1.16.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:87eb178db04ece7c698220d523c170125dbffebb7af0345e66c3554f6f60c173"}, + {file = "scipy-1.16.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:4e409eac067dcee96a57fbcf424c13f428037827ec7ee3cb671ff525ca4fc34d"}, + {file = "scipy-1.16.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e574be127bb760f0dad24ff6e217c80213d153058372362ccb9555a10fc5e8d2"}, + {file = "scipy-1.16.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5db5ba6188d698ba7abab982ad6973265b74bb40a1efe1821b58c87f73892b9"}, + {file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec6e74c4e884104ae006d34110677bfe0098203a3fec2f3faf349f4cb05165e3"}, + {file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:912f46667d2d3834bc3d57361f854226475f695eb08c08a904aadb1c936b6a88"}, + {file = "scipy-1.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:91e9e8a37befa5a69e9cacbe0bcb79ae5afb4a0b130fd6db6ee6cc0d491695fa"}, + {file = "scipy-1.16.2-cp311-cp311-win_arm64.whl", hash = "sha256:f3bf75a6dcecab62afde4d1f973f1692be013110cad5338007927db8da73249c"}, + {file = "scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d"}, + {file = "scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371"}, + {file = "scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0"}, + {file = "scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232"}, + {file = "scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1"}, + {file = "scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f"}, + {file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef"}, + {file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1"}, + {file = "scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e"}, + {file = "scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851"}, + {file = "scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70"}, + {file = "scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9"}, + {file = "scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5"}, + {file = "scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925"}, + {file = "scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9"}, + {file = "scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7"}, + {file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb"}, + {file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e"}, + {file = "scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c"}, + {file = "scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104"}, + {file = "scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1"}, + {file = "scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a"}, + {file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f"}, + {file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4"}, + {file = "scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21"}, + {file = "scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7"}, + {file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8"}, + {file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472"}, + {file = "scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351"}, + {file = "scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d"}, + {file = "scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77"}, + {file = "scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70"}, + {file = "scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88"}, + {file = "scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f"}, + {file = "scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb"}, + {file = "scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7"}, + {file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548"}, + {file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936"}, + {file = "scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff"}, + {file = "scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d"}, + {file = "scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8"}, + {file = "scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4"}, + {file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831"}, + {file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3"}, + {file = "scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac"}, + {file = "scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374"}, + {file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6"}, + {file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c"}, + {file = "scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9"}, + {file = "scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779"}, + {file = "scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b"}, +] + +[package.dependencies] +numpy = ">=1.25.2,<2.6" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "threadpoolctl" +version = "3.6.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, + {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "tzdata" +version = "2025.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +groups = ["main"] +files = [ + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, +] + +[[package]] +name = "umap-learn" +version = "0.5.9.post2" +description = "Uniform Manifold Approximation and Projection" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "umap_learn-0.5.9.post2-py3-none-any.whl", hash = "sha256:fbe51166561e0e7fab00ef3d516ac2621243b8d15cf4bef9f656d701736b16a0"}, + {file = "umap_learn-0.5.9.post2.tar.gz", hash = "sha256:bdf60462d779bd074ce177a0714ced17e6d161285590fa487f3f9548dd3c31c9"}, +] + +[package.dependencies] +numba = ">=0.51.2" +numpy = ">=1.23" +pynndescent = ">=0.5" +scikit-learn = ">=1.6" +scipy = ">=1.3.1" +tqdm = "*" + +[package.extras] +parametric-umap = ["tensorflow (>=2.1)"] +plot = ["bokeh", "colorcet", "dask", "datashader", "holoviews", "matplotlib", "pandas", "scikit-image", "seaborn"] +tbb = ["tbb (>=2019.0)"] +test = ["pytest"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.13" +content-hash = "e3ffd60ecb4ec7f7c207eea1fb1b29231aaeddf55d9f7276ea1784d98dd14d15" diff --git a/app/src/content/embeds/typography/pyproject.toml b/app/src/content/embeds/typography/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..943bb593602b0cc77dbb7ff668d033becbaafcc7 --- /dev/null +++ b/app/src/content/embeds/typography/pyproject.toml @@ -0,0 +1,21 @@ +[project] +name = "typography-umap" +version = "0.1.0" +description = "" +authors = [ + {name = "Your Name",email = "you@example.com"} +] +readme = "README.md" +requires-python = ">=3.13" +dependencies = [ + "umap-learn (>=0.5.9.post2,<0.6.0)", + "pillow (>=11.3.0,<12.0.0)", + "scikit-learn (>=1.7.2,<2.0.0)", + "numpy (>=2.3.3,<3.0.0)", + "pandas (>=2.3.2,<3.0.0)" +] + + +[build-system] +requires = ["poetry-core>=2.0.0,<3.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/app/src/content/embeds/typography/run-full-pipeline.sh b/app/src/content/embeds/typography/run-full-pipeline.sh new file mode 100644 index 0000000000000000000000000000000000000000..0813c9232b9cddd3612052e7c8ce22b6bcac9196 --- /dev/null +++ b/app/src/content/embeds/typography/run-full-pipeline.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +echo "🚀 Starting full typography pipeline for 300 fonts..." + +# Step 1: Download fonts (already running) +echo "Step 1: Downloading fonts... (in progress)" + +# Wait for step 1 to complete, then run remaining steps +echo "Step 2: Generating SVGs..." +node 2-generate-svgs.mjs + +if [ $? -eq 0 ]; then + echo "✅ Step 2 completed successfully" + + echo "Step 3: Converting to PNGs..." + node 3-generate-pngs.mjs + + if [ $? -eq 0 ]; then + echo "✅ Step 3 completed successfully" + + echo "Step 4: Generating UMAP analysis..." + poetry run python 4-generate-umap.py + + if [ $? -eq 0 ]; then + echo "✅ Step 4 completed successfully" + + echo "Step 5: Generating sprite..." + node 5-generate-sprite.mjs + + if [ $? -eq 0 ]; then + echo "✅ Step 5 completed successfully" + echo "🎉 Full pipeline completed with 300 fonts!" + + # Display final stats + echo "📊 Final results:" + echo "📁 Fonts: $(ls generated/fonts/ | wc -l) TTF files" + echo "🎨 SVGs: $(ls generated/svgs/ | wc -l) SVG files" + echo "🖼️ PNGs: $(ls generated/pngs/ | wc -l) PNG files" + echo "📄 Data files:" + ls -la generated/data/ + else + echo "❌ Step 5 failed" + exit 1 + fi + else + echo "❌ Step 4 failed" + exit 1 + fi + else + echo "❌ Step 3 failed" + exit 1 + fi +else + echo "❌ Step 2 failed" + exit 1 +fi \ No newline at end of file diff --git a/app/src/pages/index.astro b/app/src/pages/index.astro index cc6b3b54f5e5b2081d4101e6d287816a43de2cae..2a764a2fc56e6ffd7d78c052201b651cce1b8a99 100644 --- a/app/src/pages/index.astro +++ b/app/src/pages/index.astro @@ -206,21 +206,16 @@ const licence = src="https://cdn.jsdelivr.net/npm/medium-zoom@1.1.0/dist/medium-zoom.min.js" >