jonahdvt commited on
Commit
71da761
·
verified ·
1 Parent(s): 32516d1

End of training

Browse files
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ language:
4
+ - multilingual
5
+ license: cc-by-nc-4.0
6
+ base_model: facebook/seamless-m4t-v2-large
7
+ tags:
8
+ - hi,pa,ta,te,ml
9
+ - generated_from_trainer
10
+ datasets:
11
+ - google/fleurs
12
+ model-index:
13
+ - name: "Seamless M4T \u2013 FLEURS Adrican Multilingual Fine\u2011tuning"
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # Seamless M4T – FLEURS Adrican Multilingual Fine‑tuning
21
+
22
+ This model is a fine-tuned version of [facebook/seamless-m4t-v2-large](https://huggingface.co/facebook/seamless-m4t-v2-large) on the FLEURS dataset.
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 2e-05
42
+ - train_batch_size: 32
43
+ - eval_batch_size: 16
44
+ - seed: 42
45
+ - optimizer: Use adafactor and the args are:
46
+ No additional optimizer arguments
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 25
49
+ - training_steps: 925
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.48.3
58
+ - Pytorch 2.6.0+cu124
59
+ - Datasets 3.3.2
60
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/seamless-m4t-v2-large",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "adaptor_dropout": 0.1,
6
+ "adaptor_kernel_size": 8,
7
+ "adaptor_stride": 8,
8
+ "add_adapter": true,
9
+ "architectures": [
10
+ "SeamlessM4Tv2ForSpeechToText"
11
+ ],
12
+ "attention_dropout": 0.1,
13
+ "bos_token_id": 2,
14
+ "char_vocab_size": 10943,
15
+ "conv_depthwise_kernel_size": 31,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 8192,
18
+ "decoder_layerdrop": 0.05,
19
+ "decoder_layers": 24,
20
+ "decoder_start_token_id": 3,
21
+ "dropout": 0.1,
22
+ "encoder_attention_heads": 16,
23
+ "encoder_ffn_dim": 8192,
24
+ "encoder_layerdrop": 0.05,
25
+ "encoder_layers": 24,
26
+ "eos_token_id": 3,
27
+ "feature_projection_input_dim": 160,
28
+ "hidden_size": 1024,
29
+ "initializer_range": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "lang_embed_dim": 256,
32
+ "layer_norm_eps": 1e-05,
33
+ "leaky_relu_slope": 0.1,
34
+ "left_max_position_embeddings": 64,
35
+ "max_new_tokens": 256,
36
+ "max_position_embeddings": 4096,
37
+ "model_type": "seamless_m4t_v2",
38
+ "num_adapter_layers": 1,
39
+ "num_attention_heads": 16,
40
+ "num_hidden_layers": 24,
41
+ "pad_token_id": 0,
42
+ "position_embeddings_type": "relative_key",
43
+ "resblock_dilation_sizes": [
44
+ [
45
+ 1,
46
+ 3,
47
+ 5
48
+ ],
49
+ [
50
+ 1,
51
+ 3,
52
+ 5
53
+ ],
54
+ [
55
+ 1,
56
+ 3,
57
+ 5
58
+ ]
59
+ ],
60
+ "resblock_kernel_sizes": [
61
+ 3,
62
+ 7,
63
+ 11
64
+ ],
65
+ "right_max_position_embeddings": 8,
66
+ "sampling_rate": 16000,
67
+ "scale_embedding": true,
68
+ "speech_encoder_attention_heads": 16,
69
+ "speech_encoder_chunk_size": 20000,
70
+ "speech_encoder_dropout": 0.0,
71
+ "speech_encoder_hidden_act": "swish",
72
+ "speech_encoder_intermediate_size": 4096,
73
+ "speech_encoder_layerdrop": 0.1,
74
+ "speech_encoder_layers": 24,
75
+ "speech_encoder_left_chunk_num": 128,
76
+ "spkr_embed_dim": 256,
77
+ "t2u_bos_token_id": 0,
78
+ "t2u_decoder_attention_heads": 16,
79
+ "t2u_decoder_ffn_dim": 8192,
80
+ "t2u_decoder_layers": 6,
81
+ "t2u_encoder_attention_heads": 16,
82
+ "t2u_encoder_ffn_dim": 8192,
83
+ "t2u_encoder_layers": 6,
84
+ "t2u_eos_token_id": 2,
85
+ "t2u_max_position_embeddings": 4096,
86
+ "t2u_pad_token_id": 1,
87
+ "t2u_variance_pred_dropout": 0.5,
88
+ "t2u_variance_predictor_embed_dim": 1024,
89
+ "t2u_variance_predictor_hidden_dim": 256,
90
+ "t2u_variance_predictor_kernel_size": 3,
91
+ "t2u_vocab_size": 10082,
92
+ "torch_dtype": "float32",
93
+ "transformers_version": "4.48.3",
94
+ "unit_embed_dim": 1280,
95
+ "unit_hifi_gan_vocab_size": 10000,
96
+ "upsample_initial_channel": 512,
97
+ "upsample_kernel_sizes": [
98
+ 11,
99
+ 8,
100
+ 8,
101
+ 4,
102
+ 4
103
+ ],
104
+ "upsample_rates": [
105
+ 5,
106
+ 4,
107
+ 4,
108
+ 2,
109
+ 2
110
+ ],
111
+ "use_cache": true,
112
+ "var_pred_dropout": 0.5,
113
+ "variance_predictor_kernel_size": 3,
114
+ "vocab_size": 256102,
115
+ "vocoder_num_langs": 36,
116
+ "vocoder_num_spkrs": 200,
117
+ "vocoder_offset": 4
118
+ }
generation_config.json ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c1294702ae4e079ef12a331034455f1517fd87f7dd65dd2ec22c50a4081ac1
3
+ size 4999937160
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44eda871b76ab2ca3d3c1f43a1685a172b2b76b03ee93d8f4006348e5a81d9c5
3
+ size 1007614496
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
runs/Apr13_10-02-34_cn-g017.server.mila.quebec/events.out.tfevents.1744552965.cn-g017.server.mila.quebec.3425674.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e285412aefb9d848ac294d71b242934cf6c13ca12f5e51657fa3fe6733bec40
3
+ size 8194
runs/Apr13_12-04-30_cn-g008.server.mila.quebec/events.out.tfevents.1744560294.cn-g008.server.mila.quebec.521158.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a02a3596af34e695f104b843edcc41062236b294bc8344c970500fd100ef5071
3
+ size 8202
runs/Apr13_14-06-56_cn-g008.server.mila.quebec/events.out.tfevents.1744567631.cn-g008.server.mila.quebec.525464.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ff52b3660c12b8d018b9a6545daafb3664e557e328fbcd8bfddede686247be6
3
+ size 8413
runs/Apr13_16-05-52_cn-g008.server.mila.quebec/events.out.tfevents.1744574767.cn-g008.server.mila.quebec.529659.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6e72a69d6d1777494cfd8644f3d4af54e3d4f34ddb39dcebee4a04b7f1eefc4
3
+ size 8768
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9801bc9e9a5d4dc3f458101dfde18a040b1b590cd29c35642e5103a68d5fffd
3
+ size 5496