End of training
Browse files- README.md +24 -2
- all_results.json +6 -6
- eval_results.json +3 -3
- runs/Jan01_18-34-16_vorace/events.out.tfevents.1704134182.vorace.75244.1 +3 -0
- train_results.json +3 -3
- trainer_state.json +3 -3
README.md
CHANGED
|
@@ -1,12 +1,31 @@
|
|
| 1 |
---
|
|
|
|
|
|
|
| 2 |
base_model: ylacombe/w2v-bert-2.0
|
| 3 |
tags:
|
|
|
|
|
|
|
| 4 |
- generated_from_trainer
|
| 5 |
datasets:
|
| 6 |
- common_voice_16_0
|
|
|
|
|
|
|
| 7 |
model-index:
|
| 8 |
- name: wav2vec2-common_voice-af-demo
|
| 9 |
-
results:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
@@ -14,7 +33,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 14 |
|
| 15 |
# wav2vec2-common_voice-af-demo
|
| 16 |
|
| 17 |
-
This model is a fine-tuned version of [ylacombe/w2v-bert-2.0](https://huggingface.co/ylacombe/w2v-bert-2.0) on the
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
## Model description
|
| 20 |
|
|
|
|
| 1 |
---
|
| 2 |
+
language:
|
| 3 |
+
- af
|
| 4 |
base_model: ylacombe/w2v-bert-2.0
|
| 5 |
tags:
|
| 6 |
+
- automatic-speech-recognition
|
| 7 |
+
- mozilla-foundation/common_voice_16_0
|
| 8 |
- generated_from_trainer
|
| 9 |
datasets:
|
| 10 |
- common_voice_16_0
|
| 11 |
+
metrics:
|
| 12 |
+
- wer
|
| 13 |
model-index:
|
| 14 |
- name: wav2vec2-common_voice-af-demo
|
| 15 |
+
results:
|
| 16 |
+
- task:
|
| 17 |
+
name: Automatic Speech Recognition
|
| 18 |
+
type: automatic-speech-recognition
|
| 19 |
+
dataset:
|
| 20 |
+
name: MOZILLA-FOUNDATION/COMMON_VOICE_16_0 - AF
|
| 21 |
+
type: common_voice_16_0
|
| 22 |
+
config: af
|
| 23 |
+
split: test
|
| 24 |
+
args: 'Config: af, Training split: train+validation, Eval split: test'
|
| 25 |
+
metrics:
|
| 26 |
+
- name: Wer
|
| 27 |
+
type: wer
|
| 28 |
+
value: 1.0
|
| 29 |
---
|
| 30 |
|
| 31 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
|
| 33 |
|
| 34 |
# wav2vec2-common_voice-af-demo
|
| 35 |
|
| 36 |
+
This model is a fine-tuned version of [ylacombe/w2v-bert-2.0](https://huggingface.co/ylacombe/w2v-bert-2.0) on the MOZILLA-FOUNDATION/COMMON_VOICE_16_0 - AF dataset.
|
| 37 |
+
It achieves the following results on the evaluation set:
|
| 38 |
+
- Loss: inf
|
| 39 |
+
- Wer: 1.0
|
| 40 |
|
| 41 |
## Model description
|
| 42 |
|
all_results.json
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"eval_loss": Infinity,
|
| 4 |
-
"eval_runtime": 1.
|
| 5 |
"eval_samples": 28,
|
| 6 |
-
"eval_samples_per_second":
|
| 7 |
-
"eval_steps_per_second": 3.
|
| 8 |
"eval_wer": 1.0,
|
| 9 |
"train_loss": 0.0,
|
| 10 |
-
"train_runtime":
|
| 11 |
"train_samples": 58,
|
| 12 |
-
"train_samples_per_second": 10.
|
| 13 |
-
"train_steps_per_second": 0.
|
| 14 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"eval_loss": Infinity,
|
| 4 |
+
"eval_runtime": 1.2428,
|
| 5 |
"eval_samples": 28,
|
| 6 |
+
"eval_samples_per_second": 22.53,
|
| 7 |
+
"eval_steps_per_second": 3.219,
|
| 8 |
"eval_wer": 1.0,
|
| 9 |
"train_loss": 0.0,
|
| 10 |
+
"train_runtime": 83.0972,
|
| 11 |
"train_samples": 58,
|
| 12 |
+
"train_samples_per_second": 10.47,
|
| 13 |
+
"train_steps_per_second": 0.361
|
| 14 |
}
|
eval_results.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"eval_loss": Infinity,
|
| 4 |
-
"eval_runtime": 1.
|
| 5 |
"eval_samples": 28,
|
| 6 |
-
"eval_samples_per_second":
|
| 7 |
-
"eval_steps_per_second": 3.
|
| 8 |
"eval_wer": 1.0
|
| 9 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"eval_loss": Infinity,
|
| 4 |
+
"eval_runtime": 1.2428,
|
| 5 |
"eval_samples": 28,
|
| 6 |
+
"eval_samples_per_second": 22.53,
|
| 7 |
+
"eval_steps_per_second": 3.219,
|
| 8 |
"eval_wer": 1.0
|
| 9 |
}
|
runs/Jan01_18-34-16_vorace/events.out.tfevents.1704134182.vorace.75244.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2ef51ab36cf9dd349ef3de5592691cd80bb20b7915b7d46040d4f81e7b98cc4
|
| 3 |
+
size 352
|
train_results.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"train_loss": 0.0,
|
| 4 |
-
"train_runtime":
|
| 5 |
"train_samples": 58,
|
| 6 |
-
"train_samples_per_second": 10.
|
| 7 |
-
"train_steps_per_second": 0.
|
| 8 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 15.0,
|
| 3 |
"train_loss": 0.0,
|
| 4 |
+
"train_runtime": 83.0972,
|
| 5 |
"train_samples": 58,
|
| 6 |
+
"train_samples_per_second": 10.47,
|
| 7 |
+
"train_steps_per_second": 0.361
|
| 8 |
}
|
trainer_state.json
CHANGED
|
@@ -13,9 +13,9 @@
|
|
| 13 |
"step": 30,
|
| 14 |
"total_flos": 2.076343463106048e+17,
|
| 15 |
"train_loss": 0.0,
|
| 16 |
-
"train_runtime":
|
| 17 |
-
"train_samples_per_second": 10.
|
| 18 |
-
"train_steps_per_second": 0.
|
| 19 |
}
|
| 20 |
],
|
| 21 |
"logging_steps": 500,
|
|
|
|
| 13 |
"step": 30,
|
| 14 |
"total_flos": 2.076343463106048e+17,
|
| 15 |
"train_loss": 0.0,
|
| 16 |
+
"train_runtime": 83.0972,
|
| 17 |
+
"train_samples_per_second": 10.47,
|
| 18 |
+
"train_steps_per_second": 0.361
|
| 19 |
}
|
| 20 |
],
|
| 21 |
"logging_steps": 500,
|