omarkamali commited on
Commit
d07523d
·
verified ·
1 Parent(s): 0a72378

Upload all models and assets for anp (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +190 -156
  3. models/embeddings/aligned/anp_128d.bin +3 -0
  4. models/embeddings/aligned/anp_128d.meta.json +1 -0
  5. models/embeddings/aligned/anp_128d.projection.npy +3 -0
  6. models/embeddings/aligned/anp_128d_metadata.json +8 -0
  7. models/embeddings/aligned/anp_32d.bin +3 -0
  8. models/embeddings/aligned/anp_32d.meta.json +1 -0
  9. models/embeddings/aligned/anp_32d.projection.npy +3 -0
  10. models/embeddings/aligned/anp_32d_metadata.json +8 -0
  11. models/embeddings/aligned/anp_64d.bin +3 -0
  12. models/embeddings/aligned/anp_64d.meta.json +1 -0
  13. models/embeddings/aligned/anp_64d.projection.npy +3 -0
  14. models/embeddings/aligned/anp_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/anp_128d.bin +2 -2
  16. models/embeddings/monolingual/anp_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/anp_32d.bin +2 -2
  18. models/embeddings/monolingual/anp_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/anp_64d.bin +2 -2
  20. models/embeddings/monolingual/anp_64d_metadata.json +1 -1
  21. models/subword_markov/anp_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/anp_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/anp_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/anp_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/anp_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/anp_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/anp_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/anp_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/anp_2gram_subword.parquet +2 -2
  30. models/subword_ngram/anp_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/anp_3gram_subword.parquet +2 -2
  32. models/subword_ngram/anp_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/anp_4gram_subword.parquet +2 -2
  34. models/subword_ngram/anp_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/anp_5gram_subword.parquet +3 -0
  36. models/subword_ngram/anp_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/anp_tokenizer_16k.model +2 -2
  38. models/tokenizer/anp_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/anp_tokenizer_32k.model +2 -2
  40. models/tokenizer/anp_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/anp_tokenizer_8k.model +2 -2
  42. models/tokenizer/anp_tokenizer_8k.vocab +0 -0
  43. models/vocabulary/anp_vocabulary.parquet +2 -2
  44. models/vocabulary/anp_vocabulary_metadata.json +9 -9
  45. models/word_markov/anp_markov_ctx1_word.parquet +2 -2
  46. models/word_markov/anp_markov_ctx1_word_metadata.json +2 -2
  47. models/word_markov/anp_markov_ctx2_word.parquet +2 -2
  48. models/word_markov/anp_markov_ctx2_word_metadata.json +2 -2
  49. models/word_markov/anp_markov_ctx3_word.parquet +2 -2
  50. models/word_markov/anp_markov_ctx3_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: anp
3
- language_name: ANP
4
  language_family: indoaryan_central
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-indoaryan_central
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 3.779
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8284
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # ANP - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **ANP** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,43 +90,43 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 3.293x | 3.29 | 0.1162% | 454,392 |
84
- | **16k** | 3.578x | 3.58 | 0.1263% | 418,207 |
85
- | **32k** | 3.779x 🏆 | 3.78 | 0.1334% | 395,905 |
86
 
87
  ### Tokenization Examples
88
 
89
  Below are sample sentences tokenized with each vocabulary size:
90
 
91
- **Sample 1:** `मई ग्रेगोरी कैलंडर 5मां महीना छेकै। सात महीना मँ सँ एक छेकै जेकरौ दिन सिनी...`
92
 
93
  | Vocab | Tokens | Count |
94
  |-------|--------|-------|
95
- | 8k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
96
- | 16k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
97
- | 32k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
98
 
99
- **Sample 2:** `राजा महेश ठाकुर ई. तक मधुबनी जिला के भउर (भौर) गांव म॑ छेलै, जे मधुबनी स॑ करीब...`
100
 
101
  | Vocab | Tokens | Count |
102
  |-------|--------|-------|
103
- | 8k | `▁राजा ▁महेश ▁ठाकुर ▁– ▁ई . ▁तक ▁मधुबनी ▁जिला ▁के ... (+28 more)` | 38 |
104
- | 16k | `▁राजा ▁महेश ▁ठाकुर ▁– ▁ई . ▁तक ▁मधुबनी ▁जिला ▁के ... (+27 more)` | 37 |
105
- | 32k | `▁राजा ▁महेश ▁ठाकुर ▁– ▁ई . ▁तक ▁मधुबनी ▁जिला ▁के ... (+24 more)` | 34 |
106
 
107
- **Sample 3:** `पति पत्नी नंदा केरऽ ई. मं॑ बनलऽ हिंदी फ़िल्म छेकै.`
108
 
109
  | Vocab | Tokens | Count |
110
  |-------|--------|-------|
111
- | 8k | `▁पति ▁पत्नी ▁नंदा ▁केरऽ ▁ई . ▁मं॑ ▁बनलऽ ▁हिंदी ▁फ़िल्म ... (+2 more)` | 12 |
112
- | 16k | `▁पति ▁पत्नी ▁नंदा ▁केरऽ ▁ई . ▁मं॑ ▁बनलऽ ▁हिंदी ▁फ़िल्म ... (+2 more)` | 12 |
113
- | 32k | `▁पति ▁पत्नी ▁नंदा ▁केरऽ ▁ई . ▁मं॑ ▁बनलऽ ▁हिंदी ▁फ़िल्म ... (+2 more)` | 12 |
114
 
115
 
116
  ### Key Findings
117
 
118
- - **Best Compression:** 32k achieves 3.779x compression
119
- - **Lowest UNK Rate:** 8k with 0.1162% unknown tokens
120
  - **Trade-off:** Larger vocabularies improve compression but increase model size
121
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
122
 
@@ -133,12 +143,14 @@ Below are sample sentences tokenized with each vocabulary size:
133
 
134
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
135
  |--------|---------|------------|---------|----------------|------------------|-------------------|
136
- | **2-gram** | Word | 5,052 | 12.30 | 15,168 | 20.6% | 52.3% |
137
- | **2-gram** | Subword | 1,738 🏆 | 10.76 | 17,876 | 38.2% | 73.8% |
138
- | **3-gram** | Word | 4,130 | 12.01 | 14,962 | 20.9% | 59.9% |
139
- | **3-gram** | Subword | 12,170 | 13.57 | 72,480 | 14.9% | 40.7% |
140
- | **4-gram** | Word | 6,457 | 12.66 | 28,266 | 18.2% | 56.2% |
141
- | **4-gram** | Subword | 41,486 | 15.34 | 205,918 | 8.4% | 27.1% |
 
 
142
 
143
  ### Top 5 N-grams by Size
144
 
@@ -146,19 +158,19 @@ Below are sample sentences tokenized with each vocabulary size:
146
 
147
  | Rank | N-gram | Count |
148
  |------|--------|-------|
149
- | 1 | `के लिए` | 2,018 |
150
  | 2 | `के अनुसार` | 1,711 |
151
- | 3 | `छै जे` | 1,623 |
152
- | 4 | `छै जेकरा` | 1,477 |
153
  | 5 | `के औसत` | 1,421 |
154
 
155
  **3-grams (Word):**
156
 
157
  | Rank | N-gram | Count |
158
  |------|--------|-------|
159
- | 1 | `छै जेकरा म` | 1,239 |
160
  | 2 | `जनगणना के अनुसार` | 1,231 |
161
- | 3 | `के रूप में` | 808 |
162
  | 4 | `परिवार रहै छै` | 789 |
163
  | 5 | `म स्थित ऐगो` | 690 |
164
 
@@ -172,42 +184,62 @@ Below are sample sentences tokenized with each vocabulary size:
172
  | 4 | `के जनगणना के अनुसार` | 498 |
173
  | 5 | `गाँव छै जेकरा म` | 479 |
174
 
 
 
 
 
 
 
 
 
 
 
175
  **2-grams (Subword):**
176
 
177
  | Rank | N-gram | Count |
178
  |------|--------|-------|
179
- | 1 | `र _` | 44,044 |
180
- | 2 | `_ के` | 42,780 |
181
- | 3 | `के _` | 39,580 |
182
- | 4 | `, _` | 27,198 |
183
- | 5 | `। _` | 27,084 |
184
 
185
  **3-grams (Subword):**
186
 
187
  | Rank | N-gram | Count |
188
  |------|--------|-------|
189
- | 1 | `_ के _` | 37,016 |
190
- | 2 | `_ में _` | 14,280 |
191
- | 3 | `_ की _` | 9,494 |
192
- | 4 | `_ औ र` | 9,303 |
193
- | 5 | `औ र _` | 9,298 |
194
 
195
  **4-grams (Subword):**
196
 
197
  | Rank | N-gram | Count |
198
  |------|--------|-------|
199
- | 1 | `_ औ र _` | 9,269 |
200
- | 2 | `_ है । _` | 6,536 |
201
- | 3 | `_ छै । _` | 5,833 |
202
- | 4 | `_ ए क _` | 4,768 |
203
- | 5 | `र _ के _` | 3,598 |
 
 
 
 
 
 
 
 
 
 
204
 
205
 
206
  ### Key Findings
207
 
208
- - **Best Perplexity:** 2-gram (subword) with 1,738
209
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
210
- - **Coverage:** Top-1000 patterns cover ~27% of corpus
211
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
212
 
213
  ---
@@ -223,14 +255,14 @@ Below are sample sentences tokenized with each vocabulary size:
223
 
224
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
225
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
226
- | **1** | Word | 0.8691 | 1.827 | 5.82 | 57,434 | 13.1% |
227
- | **1** | Subword | 0.9723 | 1.962 | 11.43 | 4,617 | 2.8% |
228
- | **2** | Word | 0.2533 | 1.192 | 1.57 | 333,590 | 74.7% |
229
- | **2** | Subword | 0.5474 | 1.461 | 3.83 | 52,772 | 45.3% |
230
- | **3** | Word | 0.0719 | 1.051 | 1.12 | 522,356 | 92.8% |
231
- | **3** | Subword | 0.4946 | 1.409 | 2.66 | 202,187 | 50.5% |
232
- | **4** | Word | 0.0215 🏆 | 1.015 | 1.03 | 583,736 | 97.9% |
233
- | **4** | Subword | 0.2981 | 1.230 | 1.71 | 538,126 | 70.2% |
234
 
235
  ### Generated Text Samples (Word-based)
236
 
@@ -238,27 +270,27 @@ Below are text samples generated from each word-based Markov chain model:
238
 
239
  **Context Size 1:**
240
 
241
- 1. `के बीच शासन और विज्ञापन और २०० फिल्में रिलीज़ हुआ है जिसके कारण ही मैन 2`
242
- 2. `में पाकिस्तान श्रीलंका मे पुणे शहर आरू एकरऽ द्रव्यमान संरक्षण के स्थान पऽ एकाग्र करै लेली`
243
- 3. `है कि विक्की ग्युरेरो के बच्चा के बच्चा के 61 80 मीटर 16 राज्यो मँ संकटग्रस्त`
244
 
245
  **Context Size 2:**
246
 
247
- 1. `के लिए पौधों को जिन्हें फूलने से पहले उन्होंने डस्टी रोहड्स का भी समर्थन प्राप्त हो सकती`
248
- 2. `के अनुसार मुंजथ गांव के कुल आबादी के साथ दो दुर्भाग्यपूर्ण मामलों के लिए लिख लेते थे`
249
- 3. `छै जे बिहार राज्य मँ स्थित छै जिला पौराणिक काल म॑ विश्व भर में ���० १४`
250
 
251
  **Context Size 3:**
252
 
253
- 1. `छै जेकरा म कुल 22 परिवार रहै छै तुम्बापहाड़ गांव के जनसंख्या 188 छै जेकरा पुरुष आरू`
254
- 2. `जनगणना के अनुसार गोबिंदपुर गाँव के जनसंख्या छै जेकरा पुरुष आरु महिला छै गौरीपुर गांव के औसत लिंग`
255
- 3. `के रूप में अरबी गोंद के साथ मिलाया जा सकता था इसी कारणवश बादशाह मुहम्‍मद बिन तुगलक ने`
256
 
257
  **Context Size 4:**
258
 
259
- 1. `छै जेकरा म कुल 545 परिवार रहै छै के जनगणना के अनुसार मुस्तफाबाद गाँव के जनसंख्या 291 छै जे`
260
- 2. `के औसत लिंग अनुपात 782 छै जे बिहार राज्य के औसत 918 स कम छै जनगणना के अनुसार तेतरिया`
261
- 3. `छै जनगणना के अनुसार सहनी खेड़ा के बाल लिंग अनुपात 836 छै जे बिहार राज्य के औसत 918 स`
262
 
263
 
264
  ### Generated Text Samples (Subword-based)
@@ -267,34 +299,34 @@ Below are text samples generated from each subword-based Markov chain model:
267
 
268
  **Context Size 1:**
269
 
270
- 1. `_ला_का_अनुसाय_उसनता_शि`
271
- 2. `रचरण-हा_जे_के_शिय-_में`
272
- 3. `क_किंगर_मशिक्षा_बांकारलिंग_`
273
 
274
  **Context Size 2:**
275
 
276
- 1. `र_सम्पादक_छै__नीतिक_न॑_`
277
- 2. `_के_कुल_रूप_पुर_सक्षमता_`
278
- 3. `के_लिमिटेड_श्रेणी:_भास्कराचार_`
279
 
280
  **Context Size 3:**
281
 
282
- 1. `_के_रूप_से_एक_छै।_जनसंख्या`
283
- 2. `_में_रूचि_रखै_वाला_नहीं_है।_`
284
- 3. `_की_भी_हैं_दाग_डॉक्टर_के_रूप`
285
 
286
  **Context Size 4:**
287
 
288
- 1. `_और_श्रुति_साहित्य,_दर्शन_हेतु_`
289
- 2. `_है।_इसके_बजाय_व्याख्यान__ए`
290
- 3. `_छै।_चूना_ऐगो_अंतर_छै,_अपि`
291
 
292
 
293
  ### Key Findings
294
 
295
  - **Best Predictability:** Context-4 (word) with 97.9% predictability
296
  - **Branching Factor:** Decreases with context size (more deterministic)
297
- - **Memory Trade-off:** Larger contexts require more storage (538,126 contexts)
298
  - **Recommendation:** Context-3 or Context-4 for text generation
299
 
300
  ---
@@ -310,64 +342,64 @@ Below are text samples generated from each subword-based Markov chain model:
310
 
311
  | Metric | Value |
312
  |--------|-------|
313
- | Vocabulary Size | 26,612 |
314
- | Total Tokens | 692,487 |
315
- | Mean Frequency | 26.02 |
316
  | Median Frequency | 4 |
317
- | Frequency Std Dev | 316.81 |
318
 
319
  ### Most Common Words
320
 
321
  | Rank | Word | Frequency |
322
  |------|------|-----------|
323
- | 1 | के | 37,114 |
324
- | 2 | में | 15,064 |
325
- | 3 | छै | 12,685 |
326
- | 4 | है | 12,473 |
327
- | 5 | की | 9,887 |
328
- | 6 | और | 9,313 |
329
- | 7 | का | 7,757 |
330
- | 8 | से | 7,397 |
331
- | 9 | को | 5,594 |
332
- | 10 | हैं | 5,335 |
333
 
334
  ### Least Common Words (from vocabulary)
335
 
336
  | Rank | Word | Frequency |
337
  |------|------|-----------|
338
- | 1 | pmegp | 2 |
339
- | 2 | odop | 2 |
340
- | 3 | naps | 2 |
341
- | 4 | संवर्द्धन | 2 |
342
- | 5 | आईज़ | 2 |
343
- | 6 | रिटेल | 2 |
344
- | 7 | एक्सीलेंस | 2 |
345
- | 8 | इंस्टाग्राम | 2 |
346
- | 9 | कास्टिंग | 2 |
347
- | 10 | ईयर | 2 |
348
 
349
  ### Zipf's Law Analysis
350
 
351
  | Metric | Value |
352
  |--------|-------|
353
- | Zipf Coefficient | 1.1238 |
354
- | R² (Goodness of Fit) | 0.994960 |
355
  | Adherence Quality | **excellent** |
356
 
357
  ### Coverage Analysis
358
 
359
  | Top N Words | Coverage |
360
  |-------------|----------|
361
- | Top 100 | 40.3% |
362
- | Top 1,000 | 69.8% |
363
- | Top 5,000 | 87.2% |
364
- | Top 10,000 | 93.2% |
365
 
366
  ### Key Findings
367
 
368
- - **Zipf Compliance:** R²=0.9950 indicates excellent adherence to Zipf's law
369
- - **High Frequency Dominance:** Top 100 words cover 40.3% of corpus
370
- - **Long Tail:** 16,612 words needed for remaining 6.8% coverage
371
 
372
  ---
373
  ## 5. Word Embeddings Evaluation
@@ -383,37 +415,40 @@ Below are text samples generated from each subword-based Markov chain model:
383
 
384
  ### 5.1 Cross-Lingual Alignment
385
 
386
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
387
 
388
 
389
  ### 5.2 Model Comparison
390
 
391
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
392
  |-------|-----------|----------|------------------|---------------|----------------|
393
- | **mono_32d** | 32 | 0.8284 🏆 | 0.3485 | N/A | N/A |
394
- | **mono_64d** | 64 | 0.6880 | 0.2899 | N/A | N/A |
395
- | **mono_128d** | 128 | 0.3275 | 0.2699 | N/A | N/A |
 
 
 
396
 
397
  ### Key Findings
398
 
399
- - **Best Isotropy:** mono_32d with 0.8284 (more uniform distribution)
400
- - **Semantic Density:** Average pairwise similarity of 0.3027. Lower values indicate better semantic separation.
401
- - **Alignment Quality:** No aligned models evaluated in this run.
402
  - **Recommendation:** 128d aligned for best cross-lingual performance
403
 
404
  ---
405
  ## 6. Morphological Analysis (Experimental)
406
 
407
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
408
-
409
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
410
 
411
  ### 6.1 Productivity & Complexity
412
 
413
  | Metric | Value | Interpretation | Recommendation |
414
  |--------|-------|----------------|----------------|
415
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
416
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
417
 
418
  ### 6.2 Affix Inventory (Productive Units)
419
 
@@ -422,13 +457,11 @@ These are the most productive prefixes and suffixes identified by sampling the v
422
  #### Productive Prefixes
423
  | Prefix | Examples |
424
  |--------|----------|
425
- | `-प्` | प्रिंत्सीप, प्रतिअंकन, प्रभाग |
426
- | `-प्र` | प्रिंत्सीप, प्रतिअंकन, प्रभाग |
427
 
428
  #### Productive Suffixes
429
  | Suffix | Examples |
430
  |--------|----------|
431
- | `-ों` | साँपों, अनुभववादियों, रेलों |
432
 
433
  ### 6.3 Bound Stems (Lexical Roots)
434
 
@@ -436,17 +469,16 @@ Bound stems are high-frequency subword units that are semantically cohesive but
436
 
437
  | Stem | Cohesion | Substitutability | Examples |
438
  |------|----------|------------------|----------|
439
- | `tion` | 2.57x | 12 contexts | motion, action, section |
440
- | `atio` | 2.57x | 12 contexts | station, nations, stations |
441
- | `stat` | 2.59x | 6 contexts | state, states, status |
442
 
443
  ### 6.4 Affix Compatibility (Co-occurrence)
444
 
445
  This table shows which prefixes and suffixes most frequently co-occur on the same stems, revealing the 'stacking' rules of the language's morphology.
446
 
447
- | Prefix | Suffix | Frequency | Examples |
448
- |--------|--------|-----------|----------|
449
- | `-प्` | `-ों` | 20 words | प्रयासों, प्रकृतिवादियों |
450
 
451
  ### 6.5 Recursive Morpheme Segmentation
452
 
@@ -454,26 +486,28 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
454
 
455
  | Word | Suggested Split | Confidence | Stem |
456
  |------|-----------------|------------|------|
457
- | प्रवृत्ति | **`प्र-वृत्ति`** | 4.5 | `वृत्ति` |
458
- | अक्षांशों | **`अक्षांश-ों`** | 4.5 | `अक्षांश` |
459
- | व्यवसायों | **`व्यवसाय-ों`** | 4.5 | `व्यवसाय` |
460
- | चक्रवातों | **`चक्रवात-ों`** | 4.5 | `चक्रवात` |
461
- | भागीदारों | **`भागीदार-ों`** | 4.5 | `भागीदार` |
462
- | तीर्थंकरों | **`तीर्थंकर-ों`** | 4.5 | `तीर्थंकर` |
463
- | उद्दीपकों | **`उद्दीपक-ों`** | 4.5 | `उद्दीपक` |
464
- | काव्यतत्वों | **`काव्यतत्व-ों`** | 4.5 | `काव्यतत्व` |
465
- | संग्रहालयों | **`संग्रहालय-ों`** | 4.5 | `संग्रहालय` |
466
- | साहित्यकारों | **`साहित्यकार-ों`** | 4.5 | `साहित्यकार` |
467
- | चिकित्सकों | **`चिकित्सक-ों`** | 4.5 | `चिकित्सक` |
468
- | उद्देश्यों | **`उद्देश्य-ों`** | 4.5 | `उद्देश्य` |
469
- | विश्वकोशों | **`विश्वकोश-ों`** | 4.5 | `विश्वकोश` |
470
- | निष्कर्षों | **`निष्कर्ष-ों`** | 4.5 | `निष्कर्ष` |
471
- | प्रशंसकों | **`प्र-शंसक-ों`** | 3.0 | `शंसक` |
472
 
473
  ### 6.6 Linguistic Interpretation
474
 
475
  > **Automated Insight:**
476
- The language ANP appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
 
 
477
 
478
  ---
479
  ## 7. Summary & Recommendations
@@ -485,7 +519,7 @@ The language ANP appears to be more isolating or has a highly fixed vocabulary.
485
  | Component | Recommended | Rationale |
486
  |-----------|-------------|-----------|
487
  | Tokenizer | **32k BPE** | Best compression (3.78x) |
488
- | N-gram | **2-gram** | Lowest perplexity (1,738) |
489
  | Markov | **Context-4** | Highest predictability (97.9%) |
490
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
491
 
@@ -700,4 +734,4 @@ MIT License - Free for academic and commercial use.
700
  ---
701
  *Generated by Wikilangs Models Pipeline*
702
 
703
- *Report Date: 2026-01-03 05:15:16*
 
1
  ---
2
  language: anp
3
+ language_name: Angika
4
  language_family: indoaryan_central
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-indoaryan_central
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 3.777
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8282
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Angika - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Angika** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.298x | 3.30 | 0.1077% | 449,296 |
94
+ | **16k** | 3.575x | 3.58 | 0.1168% | 414,503 |
95
+ | **32k** | 3.777x 🏆 | 3.78 | 0.1234% | 392,298 |
96
 
97
  ### Tokenization Examples
98
 
99
  Below are sample sentences tokenized with each vocabulary size:
100
 
101
+ **Sample 1:** `ई लेख खाली रंगौ के सूची लेख केरौ सूची अँग्रेजी़ वर्णक्रम मँ रखै लेली बनलौ छै। ...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
+ | 8k | `▁ई ▁लेख ▁खाली ▁रंग ▁के ▁सूची ▁लेख ▁केरौ ▁सूची ... (+15 more)` | 25 |
106
+ | 16k | `▁ई ▁लेख ▁खाली ▁रंग ▁के ▁सूची ▁लेख ▁केरौ ▁सूची ... (+13 more)` | 23 |
107
+ | 32k | `▁ई ▁लेख ▁खाली ▁रंगौ ▁के ▁सूची ▁लेख ▁केरौ ▁सूची ▁क ... (+9 more)` | 19 |
108
 
109
+ **Sample 2:** `तत्व छीकै जेकरा भौतिक रासियनिक विधि द्वारा तोड़लो नय जाबे सकै छै। तत्त्व (जै...`
110
 
111
  | Vocab | Tokens | Count |
112
  |-------|--------|-------|
113
+ | 8k | `▁तत्व ▁उ ▁छीकै ▁जेकरा ▁भौतिक ▁व ▁रा िय निक ... (+30 more)` | 40 |
114
+ | 16k | `▁तत्व ▁उ ▁छीकै ▁जेकरा ▁भौतिक ▁व ▁रास िय निक ▁विधि ... (+27 more)` | 37 |
115
+ | 32k | `▁तत्व ▁उ ▁छीकै ▁जेकरा ▁भौतिक ▁व ▁रासियनिक ▁विधि ▁द्वारा ▁तोड़लो ... (+22 more)` | 32 |
116
 
117
+ **Sample 3:** `मई ग्रेगोरी कैलंडर 5मां महीना छेकै। सात महीना मँ सँ एक छेकै जेकरौ दिन सिनी...`
118
 
119
  | Vocab | Tokens | Count |
120
  |-------|--------|-------|
121
+ | 8k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
122
+ | 16k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
123
+ | 32k | `▁मई ▁ग्रेगोरी ▁कैलंडर ▁क 5 मां ▁महीना ▁छेकै ... (+24 more)` | 34 |
124
 
125
 
126
  ### Key Findings
127
 
128
+ - **Best Compression:** 32k achieves 3.777x compression
129
+ - **Lowest UNK Rate:** 8k with 0.1077% unknown tokens
130
  - **Trade-off:** Larger vocabularies improve compression but increase model size
131
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
132
 
 
143
 
144
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
145
  |--------|---------|------------|---------|----------------|------------------|-------------------|
146
+ | **2-gram** | Word | 5,133 | 12.33 | 15,401 | 20.6% | 52.0% |
147
+ | **2-gram** | Subword | 1,763 🏆 | 10.78 | 18,130 | 37.8% | 73.7% |
148
+ | **3-gram** | Word | 4,136 | 12.01 | 14,976 | 21.1% | 59.7% |
149
+ | **3-gram** | Subword | 12,510 | 13.61 | 74,071 | 14.6% | 40.2% |
150
+ | **4-gram** | Word | 6,638 | 12.70 | 28,729 | 18.3% | 55.5% |
151
+ | **4-gram** | Subword | 43,295 | 15.40 | 212,245 | 8.3% | 26.6% |
152
+ | **5-gram** | Word | 4,565 | 12.16 | 20,947 | 20.4% | 62.1% |
153
+ | **5-gram** | Subword | 74,529 | 16.19 | 271,380 | 5.9% | 20.8% |
154
 
155
  ### Top 5 N-grams by Size
156
 
 
158
 
159
  | Rank | N-gram | Count |
160
  |------|--------|-------|
161
+ | 1 | `के लिए` | 1,987 |
162
  | 2 | `के अनुसार` | 1,711 |
163
+ | 3 | `छै जे` | 1,664 |
164
+ | 4 | `छै जेकरा` | 1,521 |
165
  | 5 | `के औसत` | 1,421 |
166
 
167
  **3-grams (Word):**
168
 
169
  | Rank | N-gram | Count |
170
  |------|--------|-------|
171
+ | 1 | `छै जेकरा म` | 1,240 |
172
  | 2 | `जनगणना के अनुसार` | 1,231 |
173
+ | 3 | `के रूप में` | 796 |
174
  | 4 | `परिवार रहै छै` | 789 |
175
  | 5 | `म स्थित ऐगो` | 690 |
176
 
 
184
  | 4 | `के जनगणना के अनुसार` | 498 |
185
  | 5 | `गाँव छै जेकरा म` | 479 |
186
 
187
+ **5-grams (Word):**
188
+
189
+ | Rank | N-gram | Count |
190
+ |------|--------|-------|
191
+ | 1 | `गाँव छै जेकरा म कुल` | 476 |
192
+ | 2 | `छै के जनगणना के अनुसार` | 438 |
193
+ | 3 | `0 6 आयु वर्ग के` | 436 |
194
+ | 4 | `6 आयु वर्ग के बच्चा` | 435 |
195
+ | 5 | `आयु वर्ग के बच्चा के` | 432 |
196
+
197
  **2-grams (Subword):**
198
 
199
  | Rank | N-gram | Count |
200
  |------|--------|-------|
201
+ | 1 | `र _` | 44,141 |
202
+ | 2 | `_ के` | 43,544 |
203
+ | 3 | `के _` | 39,889 |
204
+ | 4 | `, _` | 27,806 |
205
+ | 5 | `। _` | 27,568 |
206
 
207
  **3-grams (Subword):**
208
 
209
  | Rank | N-gram | Count |
210
  |------|--------|-------|
211
+ | 1 | `_ के _` | 37,379 |
212
+ | 2 | `_ में _` | 14,100 |
213
+ | 3 | `_ की _` | 9,283 |
214
+ | 4 | `_ औ र` | 9,137 |
215
+ | 5 | `औ र _` | 9,133 |
216
 
217
  **4-grams (Subword):**
218
 
219
  | Rank | N-gram | Count |
220
  |------|--------|-------|
221
+ | 1 | `_ औ र _` | 9,104 |
222
+ | 2 | `_ है । _` | 6,415 |
223
+ | 3 | `_ छै । _` | 6,096 |
224
+ | 4 | `_ ए क _` | 4,687 |
225
+ | 5 | `_ छै , _` | 3,618 |
226
+
227
+ **5-grams (Subword):**
228
+
229
+ | Rank | N-gram | Count |
230
+ |------|--------|-------|
231
+ | 1 | `_ छै , _ जे` | 2,233 |
232
+ | 2 | `_ भा र त _` | 2,072 |
233
+ | 3 | `ता _ है । _` | 2,029 |
234
+ | 4 | `_ अ नु सा र` | 2,019 |
235
+ | 5 | `_ के _ लि ए` | 1,986 |
236
 
237
 
238
  ### Key Findings
239
 
240
+ - **Best Perplexity:** 2-gram (subword) with 1,763
241
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
242
+ - **Coverage:** Top-1000 patterns cover ~21% of corpus
243
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
244
 
245
  ---
 
255
 
256
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
257
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
258
+ | **1** | Word | 0.8698 | 1.827 | 5.82 | 59,321 | 13.0% |
259
+ | **1** | Subword | 0.9730 | 1.963 | 11.48 | 4,665 | 2.7% |
260
+ | **2** | Word | 0.2523 | 1.191 | 1.56 | 344,866 | 74.8% |
261
+ | **2** | Subword | 0.5491 | 1.463 | 3.85 | 53,547 | 45.1% |
262
+ | **3** | Word | 0.0707 | 1.050 | 1.12 | 537,872 | 92.9% |
263
+ | **3** | Subword | 0.4976 | 1.412 | 2.68 | 206,241 | 50.2% |
264
+ | **4** | Word | 0.0212 🏆 | 1.015 | 1.03 | 599,865 | 97.9% |
265
+ | **4** | Subword | 0.3012 | 1.232 | 1.72 | 551,827 | 69.9% |
266
 
267
  ### Generated Text Samples (Word-based)
268
 
 
270
 
271
  **Context Size 1:**
272
 
273
+ 1. `के सूची लेख न्यूयॉर्क 5 7 8 839 परिवार रहै के छेलै आरू सॉफ्ट लैंडिंग का`
274
+ 2. `में बर्फ़ के कुछ स्थानों पर दृष्टिपात करें पुणे शहर में आविष्कृत इक्वेटोरियम और दक्षिण जॉर्जिया`
275
+ 3. `छै जे अध्यक्ष बनान के बाल लिंग अनुपात 750 पुरुष आरु महिला छै जे संस्कृत अभिलेख`
276
 
277
  **Context Size 2:**
278
 
279
+ 1. `के लिए उपलब्ध हैं यह या तो एक दूसरे के बाद वू गुमला वर्तमान झारखंड मँ धर्म`
280
+ 2. `के अनुसार उचगांव गांव के कुल आबादी के 4 76 छै रघरिया गाँव के औसत लिंग अनुपात`
281
+ 3. `छै जे कुल जनसंख्या के 17 98 छै जे मुख्य भूमि के लिए विला के शिखर का`
282
 
283
  **Context Size 3:**
284
 
285
+ 1. `छै जेकरा म कुल 122 परिवार रहै छै जनगणना के अनुसार सरही गांव के आबादी 182 छेलै जेकरा`
286
+ 2. `जनगणना के अनुसार दिघी के बाल लिंग अनुपात 695 छै जे बिहार राज्य के औसत 918 स कम`
287
+ 3. `के रूप में लाल सेना का नेतृत्व किया और बर्मिंघम अलबामा में के कुछ अहिंसक विरोधों को आयोजित`
288
 
289
  **Context Size 4:**
290
 
291
+ 1. `छै जेकरा म कुल 64 परिवार रहै छै के जनगणना के अनुसार बरियारपुर के बाल लिंग अनुपात छै जे`
292
+ 2. `के औसत लिंग अनुपात 782 छै जे बिहार राज्य के औसत 918 स कम छै जनगणना के अनुसार टकटौली`
293
+ 3. `छै जनगणना के अनुसार अमलगरिया गाँव के जनसंख्या 91 छै जेकरा 1 939 पुरुष आरू 1 705`
294
 
295
 
296
  ### Generated Text Samples (Subword-based)
 
299
 
300
  **Context Size 1:**
301
 
302
+ 1. `_बादनसांख्यिकी_प्रभारत_देसदी`
303
+ 2. `रख_"_के_इति_बाल_कम्पनी`
304
+ 3. `करशान_जन_से_जुड़कर_वि`
305
 
306
  **Context Size 2:**
307
 
308
+ 1. `र_अपने_थे।वास्को_आड़े_और_`
309
+ 2. `_के_लिए_रहै_कित_दृष्टि)_के`
310
+ 3. `के_प्रारम्भिक_है_के_सार_चमत्का`
311
 
312
  **Context Size 3:**
313
 
314
+ 1. `_के_उच्च_पदार्थों_सँ_जुड़ली_गेले`
315
+ 2. `_में_तारे_गये_शिवनेरी_किये_जा`
316
+ 3. `_की_क्रियाक_सफल_करी_देलोगेल`
317
 
318
  **Context Size 4:**
319
 
320
+ 1. `_और_अंतरिक्ष_में_था।_इसके_म`
321
+ 2. `_है।_यह_फिल्म_अभिनेता_से_इन्हों`
322
+ 3. `_छै।_नाभिकीय_शक्ति_का_रूप_मँ_`
323
 
324
 
325
  ### Key Findings
326
 
327
  - **Best Predictability:** Context-4 (word) with 97.9% predictability
328
  - **Branching Factor:** Decreases with context size (more deterministic)
329
+ - **Memory Trade-off:** Larger contexts require more storage (551,827 contexts)
330
  - **Recommendation:** Context-3 or Context-4 for text generation
331
 
332
  ---
 
342
 
343
  | Metric | Value |
344
  |--------|-------|
345
+ | Vocabulary Size | 27,495 |
346
+ | Total Tokens | 705,736 |
347
+ | Mean Frequency | 25.67 |
348
  | Median Frequency | 4 |
349
+ | Frequency Std Dev | 313.78 |
350
 
351
  ### Most Common Words
352
 
353
  | Rank | Word | Frequency |
354
  |------|------|-----------|
355
+ | 1 | के | 37,476 |
356
+ | 2 | में | 14,866 |
357
+ | 3 | छै | 13,486 |
358
+ | 4 | है | 12,172 |
359
+ | 5 | की | 9,675 |
360
+ | 6 | और | 9,147 |
361
+ | 7 | का | 7,600 |
362
+ | 8 | से | 7,248 |
363
+ | 9 | को | 5,485 |
364
+ | 10 | हैं | 5,201 |
365
 
366
  ### Least Common Words (from vocabulary)
367
 
368
  | Rank | Word | Frequency |
369
  |------|------|-----------|
370
+ | 1 | zeros | 2 |
371
+ | 2 | ignored | 2 |
372
+ | 3 | dmy | 2 |
373
+ | 4 | mdy | 2 |
374
+ | 5 | paren | 2 |
375
+ | 6 | breaking | 2 |
376
+ | 7 | inserted | 2 |
377
+ | 8 | values | 2 |
378
+ | 9 | separator | 2 |
379
+ | 10 | days | 2 |
380
 
381
  ### Zipf's Law Analysis
382
 
383
  | Metric | Value |
384
  |--------|-------|
385
+ | Zipf Coefficient | 1.1206 |
386
+ | R² (Goodness of Fit) | 0.994934 |
387
  | Adherence Quality | **excellent** |
388
 
389
  ### Coverage Analysis
390
 
391
  | Top N Words | Coverage |
392
  |-------------|----------|
393
+ | Top 100 | 39.9% |
394
+ | Top 1,000 | 69.2% |
395
+ | Top 5,000 | 86.8% |
396
+ | Top 10,000 | 92.8% |
397
 
398
  ### Key Findings
399
 
400
+ - **Zipf Compliance:** R²=0.9949 indicates excellent adherence to Zipf's law
401
+ - **High Frequency Dominance:** Top 100 words cover 39.9% of corpus
402
+ - **Long Tail:** 17,495 words needed for remaining 7.2% coverage
403
 
404
  ---
405
  ## 5. Word Embeddings Evaluation
 
415
 
416
  ### 5.1 Cross-Lingual Alignment
417
 
418
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
419
+
420
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
421
 
422
 
423
  ### 5.2 Model Comparison
424
 
425
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
426
  |-------|-----------|----------|------------------|---------------|----------------|
427
+ | **mono_32d** | 32 | 0.8282 🏆 | 0.3565 | N/A | N/A |
428
+ | **mono_64d** | 64 | 0.7038 | 0.2985 | N/A | N/A |
429
+ | **mono_128d** | 128 | 0.3364 | 0.2651 | N/A | N/A |
430
+ | **aligned_32d** | 32 | 0.8282 | 0.3484 | 0.0140 | 0.1160 |
431
+ | **aligned_64d** | 64 | 0.7038 | 0.2963 | 0.0320 | 0.1400 |
432
+ | **aligned_128d** | 128 | 0.3364 | 0.2724 | 0.0320 | 0.1640 |
433
 
434
  ### Key Findings
435
 
436
+ - **Best Isotropy:** mono_32d with 0.8282 (more uniform distribution)
437
+ - **Semantic Density:** Average pairwise similarity of 0.3062. Lower values indicate better semantic separation.
438
+ - **Alignment Quality:** Aligned models achieve up to 3.2% R@1 in cross-lingual retrieval.
439
  - **Recommendation:** 128d aligned for best cross-lingual performance
440
 
441
  ---
442
  ## 6. Morphological Analysis (Experimental)
443
 
 
 
444
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
445
 
446
  ### 6.1 Productivity & Complexity
447
 
448
  | Metric | Value | Interpretation | Recommendation |
449
  |--------|-------|----------------|----------------|
450
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
451
+ | Idiomaticity Gap | **1.980** | High formulaic/idiomatic content | - |
452
 
453
  ### 6.2 Affix Inventory (Productive Units)
454
 
 
457
  #### Productive Prefixes
458
  | Prefix | Examples |
459
  |--------|----------|
 
 
460
 
461
  #### Productive Suffixes
462
  | Suffix | Examples |
463
  |--------|----------|
464
+ | `-ों` | कबीलों, राजकुमारियों, खातों |
465
 
466
  ### 6.3 Bound Stems (Lexical Roots)
467
 
 
469
 
470
  | Stem | Cohesion | Substitutability | Examples |
471
  |------|----------|------------------|----------|
472
+ | `tion` | 2.62x | 15 contexts | motion, action, nations |
473
+ | `atio` | 2.64x | 12 contexts | nations, station, equation |
474
+ | `stat` | 2.66x | 6 contexts | state, states, statea |
475
 
476
  ### 6.4 Affix Compatibility (Co-occurrence)
477
 
478
  This table shows which prefixes and suffixes most frequently co-occur on the same stems, revealing the 'stacking' rules of the language's morphology.
479
 
480
+ *No significant affix co-occurrences detected.*
481
+
 
482
 
483
  ### 6.5 Recursive Morpheme Segmentation
484
 
 
486
 
487
  | Word | Suggested Split | Confidence | Stem |
488
  |------|-----------------|------------|------|
489
+ | महाविद्यालयों | **`महाविद्यालय-ों`** | 4.5 | `महाविद्यालय` |
490
+ | प्रबंधकों | **`प्रबंधक-ों`** | 4.5 | `प्रबंधक` |
491
+ | चमत्कारों | **`चमत्कार-ों`** | 4.5 | `चमत्कार` |
492
+ | विद्वानों | **`विद्वान-ों`** | 4.5 | `विद्वान` |
493
+ | व्याख्यानों | **`व्याख्यान-ों`** | 4.5 | `व्याख्यान` |
494
+ | कार्टूनों | **`कार्टून-ों`** | 4.5 | `कार्टून` |
495
+ | शास्त्रों | **`श���स्त्र-ों`** | 4.5 | `शास्त्र` |
496
+ | कंप्यूटरों | **`कंप्यूटर-ों`** | 4.5 | `कंप्यूटर` |
497
+ | संस्कारों | **`संस्कार-ों`** | 4.5 | `संस्कार` |
498
+ | महासागरों | **`महासागर-ों`** | 4.5 | `महासागर` |
499
+ | पाठ्यक्रमों | **`पाठ्यक्रम-ों`** | 4.5 | `पाठ्यक्रम` |
500
+ | मुसलमानों | **`मुसलमान-ों`** | 4.5 | `मुसलमान` |
501
+ | महाद्वारों | **`महाद्वार-ों`** | 4.5 | `महाद्वार` |
502
+ | चालुक्यों | **`चालुक्य-ों`** | 4.5 | `चालुक्य` |
503
+ | प्रकाशकों | **`प्रकाशक-ों`** | 4.5 | `प्रकाशक` |
504
 
505
  ### 6.6 Linguistic Interpretation
506
 
507
  > **Automated Insight:**
508
+ The language Angika shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
509
+
510
+ > **Note on Idiomaticity:** The high Idiomaticity Gap suggests a large number of frequent multi-word expressions or formulaic sequences that are statistically distinct from their component parts.
511
 
512
  ---
513
  ## 7. Summary & Recommendations
 
519
  | Component | Recommended | Rationale |
520
  |-----------|-------------|-----------|
521
  | Tokenizer | **32k BPE** | Best compression (3.78x) |
522
+ | N-gram | **2-gram** | Lowest perplexity (1,763) |
523
  | Markov | **Context-4** | Highest predictability (97.9%) |
524
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
525
 
 
734
  ---
735
  *Generated by Wikilangs Models Pipeline*
736
 
737
+ *Report Date: 2026-01-03 14:14:54*
models/embeddings/aligned/anp_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7af5575e958475f78e2345f53a960547b317d08c4f564f1e287dc68e616de125
3
+ size 1036402426
models/embeddings/aligned/anp_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "anp", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/anp_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:211dd9cb35d6746640872c76eb78c86fe575a4d701ef5cf510e8d23359171c14
3
+ size 65664
models/embeddings/aligned/anp_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "anp",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1254,
7
+ "vocab_size": 11815
8
+ }
models/embeddings/aligned/anp_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62587f7000693b582c9132fe07d435b228ee6af1d47b7935f4e2b18424ab4eea
3
+ size 259328506
models/embeddings/aligned/anp_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "anp", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/anp_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:648c377b1a1a5d1b4666b6ec914da4647deef248b9e4cdfb29f1dc4a4348720b
3
+ size 4224
models/embeddings/aligned/anp_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "anp",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1254,
7
+ "vocab_size": 11815
8
+ }
models/embeddings/aligned/anp_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d070ab7b4e6d51df3ef5192423b2444c290dcd9623fdc52ba2c66ba353a51a22
3
+ size 518353146
models/embeddings/aligned/anp_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "anp", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/anp_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1502bda2f265ab094144f926c666d9545ee36a2e459b061c3e55b39d59ca2b6
3
+ size 16512
models/embeddings/aligned/anp_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "anp",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1254,
7
+ "vocab_size": 11815
8
+ }
models/embeddings/monolingual/anp_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9caacde775c5d9ed45854d4282f8461327a3fb82d634b04bde4c1a7010bfb01
3
- size 1036094760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7af5575e958475f78e2345f53a960547b317d08c4f564f1e287dc68e616de125
3
+ size 1036402426
models/embeddings/monolingual/anp_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 11521
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 11815
15
  }
models/embeddings/monolingual/anp_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:550b641eec37665cd8011ee7edf15fb97b5a67b4ea66eddae766e6ab6db5fa8b
3
- size 259246632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62587f7000693b582c9132fe07d435b228ee6af1d47b7935f4e2b18424ab4eea
3
+ size 259328506
models/embeddings/monolingual/anp_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 11521
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 11815
15
  }
models/embeddings/monolingual/anp_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d2fb6809bbbfd3fdc21a99d5055c629f05387c2aad2b8c7bd73362067d6a1b8
3
- size 518196008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d070ab7b4e6d51df3ef5192423b2444c290dcd9623fdc52ba2c66ba353a51a22
3
+ size 518353146
models/embeddings/monolingual/anp_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 11521
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 11815
15
  }
models/subword_markov/anp_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95c04e6c8a5663b3755575bf1c4f4bd7a74ee1859643e6a15404e0aefd1820ec
3
- size 364915
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f8c7d4097372261966c6bc97fb52eb94ecf05aba1b20a249f9d296e00c88066
3
+ size 365191
models/subword_markov/anp_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_contexts": 4617,
6
- "total_transitions": 2607475
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_contexts": 4665,
6
+ "total_transitions": 2668925
7
  }
models/subword_markov/anp_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:daa9cbe0600238ddf33258710db221432206594aadb8cd97e7c233736df61bc1
3
- size 1615892
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:181129c2fea7a83e1ee86d4443f411e555f84964df22997884ef74fa0443b196
3
+ size 1648766
models/subword_markov/anp_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_contexts": 52772,
6
- "total_transitions": 2604446
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_contexts": 53547,
6
+ "total_transitions": 2665844
7
  }
models/subword_markov/anp_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c285b9eb6f3174cff5126cc788535f54c2aac763cfabb71374694688d74bb8d7
3
- size 4795293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca8503e1796de5b1b63f4db40515f49a5fd0388d3ee34a09bdfe79795c374ca0
3
+ size 4906034
models/subword_markov/anp_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_contexts": 202187,
6
- "total_transitions": 2601417
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_contexts": 206241,
6
+ "total_transitions": 2662763
7
  }
models/subword_markov/anp_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf8bec6d17e4fe549aa7177f537ef96f42090d3142c162b4f6ddcecc9a3ca3f9
3
- size 10831894
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e36d66386ed99fb8158cd97838a5534c8d7168ccbbec8a30652e2563e832def1
3
+ size 11056053
models/subword_markov/anp_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_contexts": 538126,
6
- "total_transitions": 2598388
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_contexts": 551827,
6
+ "total_transitions": 2659682
7
  }
models/subword_ngram/anp_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:775113617eee04863418a0e3d9184050647852be0f10c589b8fe1da6543c4c55
3
- size 264208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d4934c85df0e19da88307fe4c1b2c6bd7f7d7584e4e152fff2c1d790e4b622
3
+ size 267872
models/subword_ngram/anp_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_ngrams": 17876,
6
- "total_ngrams": 2607475
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_ngrams": 18130,
6
+ "total_ngrams": 2668925
7
  }
models/subword_ngram/anp_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1beda611527797d1c53b1c09dc17f9ce7383569eb890ec2fb07d1f5209c0c22
3
- size 1044126
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ee608f9e770b8aaa0c3f06ad058eb1e57096805145b07b1f13bdd793e481aa4
3
+ size 1068374
models/subword_ngram/anp_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_ngrams": 72480,
6
- "total_ngrams": 2604446
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_ngrams": 74071,
6
+ "total_ngrams": 2665844
7
  }
models/subword_ngram/anp_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fe6cf4b2975ea16eac745cd73317aa126e7bd7842670a9adabb558d579c736b
3
- size 3083912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bae4cc49c8bd13ed2637bd3e31e241df8714d11482506bf0c1ab0ef70d2bf29
3
+ size 3164693
models/subword_ngram/anp_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "anp",
5
- "unique_ngrams": 205918,
6
- "total_ngrams": 2601417
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "anp",
5
+ "unique_ngrams": 212245,
6
+ "total_ngrams": 2662763
7
  }
models/subword_ngram/anp_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c567ba5d3c542b3799178199f32bd1bd08014f13b6b375808a83d18a7d741b
3
+ size 4356539
models/subword_ngram/anp_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "anp",
5
+ "unique_ngrams": 271380,
6
+ "total_ngrams": 2659682
7
+ }
models/tokenizer/anp_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dbad16cbb100bec382d11f61c37dc66eb44c51310eda04fcecebe9af7d4c358
3
- size 626731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c3741b80f17d1eae30558373a32b1e6916523b0ee40e8d07c0d64d3e8f1080
3
+ size 618098
models/tokenizer/anp_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/anp_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:307cc8ede7a5261bd978c0775a7d475811f92b91caf248ac449e45d8c1a3f03d
3
- size 1055594
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811d7d1db33e325f5e70f07d4c826bf83fbc30ebff039b047a22c1b821fb235f
3
+ size 1035857
models/tokenizer/anp_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/anp_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa6e8b6fe63bd83b4d41987afef044fc696594969b6cbbbe9dc22aee174de945
3
- size 430161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99896dd2de3109457e12c0c017a5ab599dab704efee42adbf365433c86e2b6e2
3
+ size 425391
models/tokenizer/anp_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/anp_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23c24e34a334b98f6f430ae4bfb9547516edad531380c24b4d8922e550ce220d
3
- size 502668
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8435ca8e03a3c4b629b3f26bb34fa4898fd59714c125734fffc27034f2525995
3
+ size 512958
models/vocabulary/anp_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "anp",
3
- "vocabulary_size": 26612,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.07945542193325766,
7
  "coverage": {
8
- "top_100": 0.3854363523002036,
9
- "top_1000": 0.6679237822959595,
10
- "top_5000": 0.8348141768358013,
11
- "top_10000": 0.8920299882905762
12
  },
13
- "hapax_count": 30862,
14
- "hapax_ratio": 0.5369732400737725,
15
- "total_documents": 3029
16
  }
17
  }
 
1
  {
2
  "language": "anp",
3
+ "vocabulary_size": 27495,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.0804783609588911,
7
  "coverage": {
8
+ "top_100": 0.38180617731513744,
9
+ "top_1000": 0.6624765117231244,
10
+ "top_5000": 0.8305210669168468,
11
+ "top_10000": 0.8883205305842446
12
  },
13
+ "hapax_count": 31866,
14
+ "hapax_ratio": 0.5368171021377672,
15
+ "total_documents": 3081
16
  }
17
  }
models/word_markov/anp_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d37e0230173ff329a06a1b9fde21c583b6b6fb6e94481f23a7a68e72460175c5
3
- size 2920307
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c751e08e10f23f5bf6fb2e82f4cd372794298d19dc29e73d0ad818a2cdb098
3
+ size 3049321
models/word_markov/anp_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "anp",
5
- "unique_contexts": 57434,
6
- "total_transitions": 720320
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "anp",
5
+ "unique_contexts": 59321,
6
+ "total_transitions": 734521
7
  }
models/word_markov/anp_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fdf85ce95679a912c641b5ffb10719ad0f16e152363efc3c3922f250107b17b8
3
- size 8558224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f465a758b1a2d405c79c4ab4f4895682189bc3945752cf5cdfa7a1df55522f
3
+ size 8916451
models/word_markov/anp_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "anp",
5
- "unique_contexts": 333590,
6
- "total_transitions": 717291
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "anp",
5
+ "unique_contexts": 344866,
6
+ "total_transitions": 731440
7
  }
models/word_markov/anp_markov_ctx3_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d46521c8f50238b1c6c19de18c1d6e31bb2ae9bb5e64373564f718f502ccb21a
3
- size 12610274
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37b7a565352c35ad4e7733cf1439619b49cd1bcf9a3246a903b5df21368a82f1
3
+ size 13044546
models/word_markov/anp_markov_ctx3_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "anp",
5
- "unique_contexts": 522356,
6
- "total_transitions": 714262
7
  }
 
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "anp",
5
+ "unique_contexts": 537872,
6
+ "total_transitions": 728359
7
  }