modelId
string
author
string
last_modified
timestamp[us, tz=UTC]
downloads
int64
likes
int64
library_name
string
tags
list
pipeline_tag
string
createdAt
timestamp[us, tz=UTC]
card
string
jonatasgrosman/exp_w2v2t_ar_wavlm_s3
jonatasgrosman
2022-07-10T16:49:12Z
5
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:48:27Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_wavlm_s3 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_wavlm_s95
jonatasgrosman
2022-07-10T16:45:40Z
5
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:44:57Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_wavlm_s95 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_no-pretraining_s368
jonatasgrosman
2022-07-10T16:42:21Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:41:56Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_no-pretraining_s368 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_no-pretraining_s808
jonatasgrosman
2022-07-10T16:39:13Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:38:27Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_no-pretraining_s808 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_no-pretraining_s6
jonatasgrosman
2022-07-10T16:35:47Z
5
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:35:23Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_no-pretraining_s6 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_vp-sv_s953
jonatasgrosman
2022-07-10T16:30:00Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:29:18Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_vp-sv_s953 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
hirohiroz/wav2vec2-base-timit-demo-google-colab
hirohiroz
2022-07-10T16:28:09Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:15:00Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-google-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5173 - Wer: 0.3399 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 3.5684 | 1.0 | 500 | 2.1662 | 1.0068 | | 0.9143 | 2.01 | 1000 | 0.5820 | 0.5399 | | 0.439 | 3.01 | 1500 | 0.4596 | 0.4586 | | 0.3122 | 4.02 | 2000 | 0.4623 | 0.4181 | | 0.2391 | 5.02 | 2500 | 0.4243 | 0.3938 | | 0.1977 | 6.02 | 3000 | 0.4421 | 0.3964 | | 0.1635 | 7.03 | 3500 | 0.5076 | 0.3977 | | 0.145 | 8.03 | 4000 | 0.4639 | 0.3754 | | 0.1315 | 9.04 | 4500 | 0.5181 | 0.3652 | | 0.1131 | 10.04 | 5000 | 0.4496 | 0.3778 | | 0.1005 | 11.04 | 5500 | 0.4438 | 0.3664 | | 0.0919 | 12.05 | 6000 | 0.4868 | 0.3865 | | 0.0934 | 13.05 | 6500 | 0.5163 | 0.3694 | | 0.076 | 14.06 | 7000 | 0.4543 | 0.3719 | | 0.0727 | 15.06 | 7500 | 0.5296 | 0.3807 | | 0.0657 | 16.06 | 8000 | 0.4715 | 0.3699 | | 0.0578 | 17.07 | 8500 | 0.4927 | 0.3699 | | 0.057 | 18.07 | 9000 | 0.4767 | 0.3660 | | 0.0493 | 19.08 | 9500 | 0.5306 | 0.3623 | | 0.0425 | 20.08 | 10000 | 0.4828 | 0.3561 | | 0.0431 | 21.08 | 10500 | 0.4875 | 0.3620 | | 0.0366 | 22.09 | 11000 | 0.4984 | 0.3482 | | 0.0332 | 23.09 | 11500 | 0.5375 | 0.3477 | | 0.0348 | 24.1 | 12000 | 0.5406 | 0.3361 | | 0.0301 | 25.1 | 12500 | 0.4954 | 0.3381 | | 0.0294 | 26.1 | 13000 | 0.5033 | 0.3424 | | 0.026 | 27.11 | 13500 | 0.5254 | 0.3384 | | 0.0243 | 28.11 | 14000 | 0.5189 | 0.3402 | | 0.0221 | 29.12 | 14500 | 0.5173 | 0.3399 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_ar_hubert_s290
jonatasgrosman
2022-07-10T16:16:34Z
5
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:16:09Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_hubert_s290 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_unispeech_s574
jonatasgrosman
2022-07-10T16:07:15Z
5
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T16:06:49Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_unispeech_s574 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_xlsr-53_s34
jonatasgrosman
2022-07-10T16:00:21Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:59:56Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_xlsr-53_s34 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_vp-100k_s564
jonatasgrosman
2022-07-10T15:53:59Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:53:33Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_vp-100k_s564 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_wav2vec2_s108
jonatasgrosman
2022-07-10T15:36:24Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:35:41Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_wav2vec2_s108 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_ar_wav2vec2_s364
jonatasgrosman
2022-07-10T15:30:16Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:29:50Z
--- language: - ar license: apache-2.0 tags: - automatic-speech-recognition - ar datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_ar_wav2vec2_s364 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (ar)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-it_s557
jonatasgrosman
2022-07-10T15:23:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:23:28Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-it_s557 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-it_s579
jonatasgrosman
2022-07-10T15:21:00Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:20:35Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-it_s579 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
infinitejoy/MLAgents-Pyramids
infinitejoy
2022-07-10T15:20:19Z
11
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2022-07-10T06:12:31Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: infinitejoy/MLAgents-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
ShooterRon/mt5-small_summarization
ShooterRon
2022-07-10T15:19:23Z
3
0
transformers
[ "transformers", "pytorch", "mt5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-10T11:58:45Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: mt5-small_summarization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small_summarization This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.1774 - Rouge1: 18.2118 - Rouge2: 6.6244 - Rougel: 15.4682 - Rougelsum: 15.3942 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 17.7253 | 1.0 | 50 | 7.6921 | 6.677 | 1.1111 | 6.5586 | 6.6861 | | 9.8457 | 2.0 | 100 | 4.5604 | 12.8991 | 1.9103 | 11.2559 | 10.9036 | | 6.2403 | 3.0 | 150 | 3.9071 | 16.463 | 4.0695 | 14.3098 | 14.4065 | | 5.2032 | 4.0 | 200 | 3.4869 | 17.6601 | 4.0878 | 14.2931 | 14.2743 | | 4.8331 | 5.0 | 250 | 3.3472 | 18.5241 | 5.3312 | 15.8993 | 16.0559 | | 4.526 | 6.0 | 300 | 3.2346 | 19.0264 | 5.7839 | 15.8013 | 16.1208 | | 4.5378 | 7.0 | 350 | 3.1927 | 18.9843 | 6.992 | 16.3787 | 16.3574 | | 4.3278 | 8.0 | 400 | 3.1774 | 18.2118 | 6.6244 | 15.4682 | 15.3942 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_uk_r-wav2vec2_s714
jonatasgrosman
2022-07-10T15:18:04Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:17:17Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_r-wav2vec2_s714 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_r-wav2vec2_s626
jonatasgrosman
2022-07-10T15:14:40Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:13:54Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_r-wav2vec2_s626 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_r-wav2vec2_s156
jonatasgrosman
2022-07-10T15:11:20Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:10:40Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_r-wav2vec2_s156 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_xls-r_s246
jonatasgrosman
2022-07-10T15:05:00Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:04:35Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_xls-r_s246 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_xls-r_s784
jonatasgrosman
2022-07-10T15:02:05Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T15:01:41Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_xls-r_s784 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech-sat_s335
jonatasgrosman
2022-07-10T14:59:02Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:58:37Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech-sat_s335 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech-sat_s27
jonatasgrosman
2022-07-10T14:56:11Z
4
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:55:46Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech-sat_s27 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonaskoenig/xtremedistil-l6-h384-uncased-future-time-references
jonaskoenig
2022-07-10T14:55:35Z
4
0
transformers
[ "transformers", "tf", "bert", "text-classification", "generated_from_keras_callback", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-04T16:16:38Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: xtremedistil-l6-h384-uncased-future-time-references results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # xtremedistil-l6-h384-uncased-future-time-references This model is a fine-tuned version of [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0279 - Train Binary Crossentropy: 0.4809 - Epoch: 9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 3e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Binary Crossentropy | Epoch | |:----------:|:-------------------------:|:-----:| | 0.0487 | 0.6401 | 0 | | 0.0348 | 0.5925 | 1 | | 0.0319 | 0.5393 | 2 | | 0.0306 | 0.5168 | 3 | | 0.0298 | 0.5045 | 4 | | 0.0292 | 0.4970 | 5 | | 0.0288 | 0.4916 | 6 | | 0.0284 | 0.4878 | 7 | | 0.0282 | 0.4836 | 8 | | 0.0279 | 0.4809 | 9 | ### Framework versions - Transformers 4.20.1 - TensorFlow 2.9.1 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_uk_vp-nl_s469
jonatasgrosman
2022-07-10T14:45:36Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:44:42Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-nl_s469 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-nl_s934
jonatasgrosman
2022-07-10T14:42:05Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:41:24Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-nl_s934 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-es_s692
jonatasgrosman
2022-07-10T14:38:57Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:38:31Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-es_s692 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
Varnez/username-model_architecture-end_id
Varnez
2022-07-10T14:38:22Z
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2022-07-09T19:29:36Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 222.70 +/- 27.92 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
jonatasgrosman/exp_w2v2t_uk_vp-es_s609
jonatasgrosman
2022-07-10T14:35:58Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:35:11Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-es_s609 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-fr_s255
jonatasgrosman
2022-07-10T14:29:35Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:28:50Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-fr_s255 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech-ml_s226
jonatasgrosman
2022-07-10T14:19:36Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:19:11Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech-ml_s226 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech-ml_s156
jonatasgrosman
2022-07-10T14:16:33Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:16:08Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech-ml_s156 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wavlm_s21
jonatasgrosman
2022-07-10T14:10:30Z
4
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:09:55Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wavlm_s21 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wavlm_s474
jonatasgrosman
2022-07-10T14:07:30Z
5
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:07:02Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wavlm_s474 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_no-pretraining_s467
jonatasgrosman
2022-07-10T14:00:49Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T14:00:03Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_no-pretraining_s467 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-sv_s911
jonatasgrosman
2022-07-10T13:43:28Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:42:47Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-sv_s911 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_hubert_s878
jonatasgrosman
2022-07-10T13:39:58Z
4
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:39:33Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_hubert_s878 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_hubert_s496
jonatasgrosman
2022-07-10T13:37:03Z
4
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:36:38Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_hubert_s496 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_hubert_s33
jonatasgrosman
2022-07-10T13:34:02Z
5
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:33:37Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_hubert_s33 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_unispeech_s558
jonatasgrosman
2022-07-10T13:24:48Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:24:23Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_unispeech_s558 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_xlsr-53_s965
jonatasgrosman
2022-07-10T13:18:51Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:18:27Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_xlsr-53_s965 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_xlsr-53_s411
jonatasgrosman
2022-07-10T13:16:00Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:15:35Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_xlsr-53_s411 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-100k_s1
jonatasgrosman
2022-07-10T13:13:07Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:12:42Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-100k_s1 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_vp-100k_s791
jonatasgrosman
2022-07-10T13:07:14Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T13:06:50Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_vp-100k_s791 Fine-tuned [facebook/wav2vec2-large-100k-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wav2vec2_s317
jonatasgrosman
2022-07-10T12:56:58Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:56:24Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wav2vec2_s317 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_uk_wav2vec2_s578
jonatasgrosman
2022-07-10T12:49:32Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "uk", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:49:06Z
--- language: - uk license: apache-2.0 tags: - automatic-speech-recognition - uk datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_uk_wav2vec2_s578 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (uk)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_r-wav2vec2_s466
jonatasgrosman
2022-07-10T12:28:19Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:27:31Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_r-wav2vec2_s466 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_r-wav2vec2_s460
jonatasgrosman
2022-07-10T12:19:59Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:19:30Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_r-wav2vec2_s460 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_xls-r_s143
jonatasgrosman
2022-07-10T12:13:39Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:12:57Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_xls-r_s143 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech-sat_s480
jonatasgrosman
2022-07-10T12:06:37Z
5
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:05:47Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech-sat_s480 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech-sat_s75
jonatasgrosman
2022-07-10T12:02:58Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T12:02:15Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech-sat_s75 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-nl_s247
jonatasgrosman
2022-07-10T11:51:55Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:51:10Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-nl_s247 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-nl_s8
jonatasgrosman
2022-07-10T11:48:28Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:47:59Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-nl_s8 Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-fr_s830
jonatasgrosman
2022-07-10T11:33:48Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:33:16Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-fr_s830 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech-ml_s952
jonatasgrosman
2022-07-10T11:17:22Z
5
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:16:50Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech-ml_s952 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_wavlm_s101
jonatasgrosman
2022-07-10T11:14:10Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:13:26Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wavlm_s101 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_wavlm_s295
jonatasgrosman
2022-07-10T11:07:17Z
5
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T11:06:30Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wavlm_s295 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_no-pretraining_s539
jonatasgrosman
2022-07-10T10:55:36Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:54:46Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_no-pretraining_s539 Fine-tuned randomly initialized wav2vec2 model for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_vp-sv_s347
jonatasgrosman
2022-07-10T10:50:07Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:49:18Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-sv_s347 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
dingusagar/vit-base-avengers-v1
dingusagar
2022-07-10T10:47:03Z
52
0
transformers
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "generated_from_trainer", "dataset:imagefolder", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2022-07-09T15:05:01Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: vit-base-avengers-v1 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder args: avengers-dataset metrics: - name: Accuracy type: accuracy value: 0.8683385579937304 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-avengers-v1 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5324 - Accuracy: 0.8683 Refer to this [medium article](https://medium.com/@dingusagar/marvel-character-classification-by-fine-tuning-vision-transformer-45c14a7d8719) for more info on how it was trained. ## Limitations Training was done on google images for these search terms each representing a class. Iron Man,Captain America,Thor,Spider Man,Docter Strage,Black Panther,Ant Man,Captain Marvel,Hulk,Black Widow,Hawkeye Avengers,Scarlet Witch,Vision Avengers,Bucky Barnes,Falcon Avengers,Loki Therefore it has seen more of images where these super heros are in their suit or superhero outfit. For example an image of hulk is detected correctly, but an image of Bruce Banner is not simply because the model has't seen those images. A little bit of data augmentation will help. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8183 | 1.27 | 100 | 1.0134 | 0.8464 | | 0.2234 | 2.53 | 200 | 0.6146 | 0.8495 | | 0.1206 | 3.8 | 300 | 0.5324 | 0.8683 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_de_vp-sv_s22
jonatasgrosman
2022-07-10T10:42:55Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:42:10Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_vp-sv_s22 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_hubert_s55
jonatasgrosman
2022-07-10T10:39:16Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:38:28Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_hubert_s55 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_hubert_s921
jonatasgrosman
2022-07-10T10:35:39Z
3
0
transformers
[ "transformers", "pytorch", "hubert", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:34:53Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_hubert_s921 Fine-tuned [facebook/hubert-large-ll60k](https://huggingface.co/facebook/hubert-large-ll60k) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_unispeech_s587
jonatasgrosman
2022-07-10T10:19:04Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T10:18:16Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_unispeech_s587 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_wav2vec2_s144
jonatasgrosman
2022-07-10T09:51:12Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:50:40Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wav2vec2_s144 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_de_wav2vec2_s982
jonatasgrosman
2022-07-10T09:47:46Z
10
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:46:58Z
--- language: - de license: apache-2.0 tags: - automatic-speech-recognition - de datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_de_wav2vec2_s982 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (de)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-it_s692
jonatasgrosman
2022-07-10T09:43:20Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:42:43Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-it_s692 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
geninhu/fastai-style-transfer-japanese-art
geninhu
2022-07-10T09:35:47Z
0
0
fastai
[ "fastai", "region:us" ]
null
2022-07-10T09:35:21Z
--- tags: - fastai --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
jonatasgrosman/exp_w2v2t_id_vp-it_s211
jonatasgrosman
2022-07-10T09:32:10Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:31:46Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-it_s211 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-it_s609
jonatasgrosman
2022-07-10T09:29:17Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:28:44Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-it_s609 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_r-wav2vec2_s79
jonatasgrosman
2022-07-10T09:18:59Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:18:33Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_r-wav2vec2_s79 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-sat_s477
jonatasgrosman
2022-07-10T09:06:39Z
3
0
transformers
[ "transformers", "pytorch", "unispeech-sat", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T09:06:15Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-sat_s477 Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
casasdorjunior/t5-small-finetuned-xlsum
casasdorjunior
2022-07-10T08:50:55Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:xlsum", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2022-07-10T06:26:38Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xlsum metrics: - rouge model-index: - name: t5-small-finetuned-xlsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xlsum type: xlsum args: spanish metrics: - name: Rouge1 type: rouge value: 15.4289 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-xlsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 2.6974 - Rouge1: 15.4289 - Rouge2: 3.146 - Rougel: 12.7682 - Rougelsum: 12.912 - Gen Len: 18.9889 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.9764 | 1.0 | 2382 | 2.6974 | 15.4289 | 3.146 | 12.7682 | 12.912 | 18.9889 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_id_vp-es_s632
jonatasgrosman
2022-07-10T08:48:32Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:48:08Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-es_s632 Fine-tuned [facebook/wav2vec2-large-es-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-es-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-fr_s27
jonatasgrosman
2022-07-10T08:39:22Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:38:58Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-fr_s27 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-fr_s335
jonatasgrosman
2022-07-10T08:36:26Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:36:02Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-fr_s335 Fine-tuned [facebook/wav2vec2-large-fr-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-fr-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-ml_s418
jonatasgrosman
2022-07-10T08:30:13Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:29:37Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-ml_s418 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech-ml_s325
jonatasgrosman
2022-07-10T08:18:06Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:17:22Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech-ml_s325 Fine-tuned [microsoft/unispeech-large-multi-lingual-1500h-cv](https://huggingface.co/microsoft/unispeech-large-multi-lingual-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
freedomking/ernie-ctm-base
freedomking
2022-07-10T08:04:18Z
8
0
transformers
[ "transformers", "pytorch", "bert", "endpoints_compatible", "region:us" ]
null
2022-07-10T07:47:44Z
## Introduction ### ERNIE-CTM(ERNIE for Chinese Text Mining) ERNIE-CTM是适用于中文文本挖掘任务的预训练语言模型,拥有更全面的汉字字表集合,更优的中文文本挖掘任务表现,与PaddleNLP深度结合,提供更加便捷的应用实践。 ### ERNIE-CTM特点 * 全面的中文汉字字表扩充 ERNIE-CTM的字符集包含2万+汉字,以及中文常用符号(常用标点、汉语拼音、编号)、部分外语符号(假名、单位)等,大幅减少中文解析挖掘任务中UNK(未识别字符)引发的标注问题。同时,ERNIE-CTM使用了embedding分解,可以更加灵活地扩充应用字表。 * 更加适配中文文本挖掘任务 ERNIE-CTM中在每个表示后面添加了全局信息,在序列特征上叠加了全局的信息,使得在文本挖掘任务上有更加强力的表现。 * 支持多种特征训练的模型结构 ERNIE-CTM的模型结构中,支持多种特征训练,用户可按照自己的需求任意添加任务及对应特征训练模型,而无需考虑任务之间的冲突所造成的灾难性遗忘。 More detail: https://github.com/PaddlePaddle/PaddleNLP/tree/develop/examples/text_to_knowledge/ernie-ctm
jonatasgrosman/exp_w2v2t_id_wavlm_s821
jonatasgrosman
2022-07-10T08:00:42Z
3
0
transformers
[ "transformers", "pytorch", "wavlm", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T08:00:08Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wavlm_s821 Fine-tuned [microsoft/wavlm-large](https://huggingface.co/microsoft/wavlm-large) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-sv_s116
jonatasgrosman
2022-07-10T06:57:13Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:56:48Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-sv_s116 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-sv_s363
jonatasgrosman
2022-07-10T06:47:57Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:47:33Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-sv_s363 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_vp-sv_s331
jonatasgrosman
2022-07-10T06:38:13Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T06:37:44Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_vp-sv_s331 Fine-tuned [facebook/wav2vec2-large-sv-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-sv-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech_s149
jonatasgrosman
2022-07-10T05:42:13Z
3
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T05:41:43Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech_s149 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_unispeech_s1
jonatasgrosman
2022-07-10T05:30:27Z
4
0
transformers
[ "transformers", "pytorch", "unispeech", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T05:30:04Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_unispeech_s1 Fine-tuned [microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
freedomking/ernie-ctm-nptag
freedomking
2022-07-10T05:13:13Z
8
0
transformers
[ "transformers", "pytorch", "bert", "endpoints_compatible", "region:us" ]
null
2022-07-10T05:03:13Z
## Introduction ### Ernie-CTM-NPTag Ernie-CTM-NPTag使用ERNIE-CTM+prompt训练而成,使用启发式搜索解码,保证分类结果都在标签体系之内。在微调任务中提供了一个中文名词短语标注的任务,旨在对中文名词短语进行细粒度分类。 More detail: https://github.com/PaddlePaddle/PaddleNLP/tree/develop/examples/text_to_knowledge/nptag
jonatasgrosman/exp_w2v2t_id_xlsr-53_s149
jonatasgrosman
2022-07-10T04:50:13Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:49:38Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_xlsr-53_s149 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_xlsr-53_s449
jonatasgrosman
2022-07-10T04:39:32Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:38:58Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_xlsr-53_s449 Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_wav2vec2_s156
jonatasgrosman
2022-07-10T04:02:08Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T04:01:39Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wav2vec2_s156 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
huggingtweets/06melihgokcek
huggingtweets
2022-07-10T03:44:22Z
3
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2022-07-10T03:42:53Z
--- language: en thumbnail: http://www.huggingtweets.com/06melihgokcek/1657424657914/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1419298461/Baskan_0383_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">İbrahim Melih Gökçek</div> <div style="text-align: center; font-size: 14px;">@06melihgokcek</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from İbrahim Melih Gökçek. | Data | İbrahim Melih Gökçek | | --- | --- | | Tweets downloaded | 3237 | | Retweets | 457 | | Short tweets | 307 | | Tweets kept | 2473 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/b48osocr/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @06melihgokcek's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3d3h0tqk) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3d3h0tqk/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/06melihgokcek') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
jonatasgrosman/exp_w2v2t_id_wav2vec2_s226
jonatasgrosman
2022-07-10T03:44:05Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T03:43:40Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wav2vec2_s226 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_id_wav2vec2_s417
jonatasgrosman
2022-07-10T03:23:58Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "id", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T03:23:23Z
--- language: - id license: apache-2.0 tags: - automatic-speech-recognition - id datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_id_wav2vec2_s417 Fine-tuned [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60) for speech recognition using the train split of [Common Voice 7.0 (id)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
Cleyden/roberta-base-prop-16-train-set
Cleyden
2022-07-10T03:20:39Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-07-10T02:46:41Z
--- license: mit tags: - generated_from_trainer model-index: - name: roberta-base-prop-16-train-set results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-prop-16-train-set This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
jonatasgrosman/exp_w2v2t_zh-cn_vp-it_s42
jonatasgrosman
2022-07-10T02:57:00Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:56:33Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-it_s42 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_vp-it_s607
jonatasgrosman
2022-07-10T02:53:51Z
4
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:53:25Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_vp-it_s607 Fine-tuned [facebook/wav2vec2-large-it-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-it-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_r-wav2vec2_s237
jonatasgrosman
2022-07-10T02:50:53Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:50:11Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_r-wav2vec2_s237 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_r-wav2vec2_s387
jonatasgrosman
2022-07-10T02:47:37Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:46:55Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_r-wav2vec2_s387 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_r-wav2vec2_s79
jonatasgrosman
2022-07-10T02:44:23Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:43:39Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_r-wav2vec2_s79 Fine-tuned [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
jonatasgrosman/exp_w2v2t_zh-cn_xls-r_s108
jonatasgrosman
2022-07-10T02:33:57Z
3
0
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "zh-CN", "dataset:mozilla-foundation/common_voice_7_0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-07-10T02:33:16Z
--- language: - zh-CN license: apache-2.0 tags: - automatic-speech-recognition - zh-CN datasets: - mozilla-foundation/common_voice_7_0 --- # exp_w2v2t_zh-cn_xls-r_s108 Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (zh-CN)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.