pipeline_tag
stringclasses
48 values
library_name
stringclasses
198 values
text
stringlengths
1
900k
metadata
stringlengths
2
438k
id
stringlengths
5
122
last_modified
null
tags
listlengths
1
1.84k
sha
null
created_at
stringlengths
25
25
arxiv
listlengths
0
201
languages
listlengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
listlengths
0
722
processed_texts
listlengths
1
723
text-generation
transformers
# DioloGPT KaeyaBot model
{"tags": ["conversational"]}
felinecity/DioloGPT-small-KaeyaBot
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# DioloGPT KaeyaBot model
[ "# DioloGPT KaeyaBot model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# DioloGPT KaeyaBot model" ]
text-generation
transformers
# DioloGPT KaeyaBot model
{"tags": ["conversational"]}
felinecity/DioloGPT-small-KaeyaBot2
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# DioloGPT KaeyaBot model
[ "# DioloGPT KaeyaBot model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# DioloGPT KaeyaBot model" ]
text-generation
transformers
# DioloGPT LisaBot model
{"tags": ["conversational"]}
felinecity/DioloGPT-small-LisaBot
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# DioloGPT LisaBot model
[ "# DioloGPT LisaBot model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# DioloGPT LisaBot model" ]
text-generation
transformers
# DioloGPT KaeyaBot model
{"tags": ["conversational"]}
felinecity/ScaraBot
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# DioloGPT KaeyaBot model
[ "# DioloGPT KaeyaBot model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# DioloGPT KaeyaBot model" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-de-en-finetuned-de-to-en-second This model is a fine-tuned version of [Helsinki-NLP/opus-mt-de-en](https://huggingface.co/Helsinki-NLP/opus-mt-de-en) on the wmt16 dataset. It achieves the following results on the evaluation set: - Loss: 1.2282 - Bleu: 37.9762 - Gen Len: 25.3696 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:| | No log | 1.0 | 157 | 1.1837 | 38.8278 | 25.22 | | No log | 2.0 | 314 | 1.2057 | 38.3047 | 25.2908 | | No log | 3.0 | 471 | 1.2167 | 38.231 | 25.316 | | 1.4808 | 4.0 | 628 | 1.2256 | 37.9871 | 25.3556 | | 1.4808 | 5.0 | 785 | 1.2282 | 37.9762 | 25.3696 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["wmt16"], "metrics": ["bleu"], "model-index": [{"name": "opus-mt-de-en-finetuned-de-to-en-second", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16", "type": "wmt16", "args": "de-en"}, "metrics": [{"type": "bleu", "value": 37.9762, "name": "Bleu"}]}]}]}
felipetanios/opus-mt-de-en-finetuned-de-to-en-second
null
[ "transformers", "pytorch", "tensorboard", "marian", "text2text-generation", "generated_from_trainer", "dataset:wmt16", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #marian #text2text-generation #generated_from_trainer #dataset-wmt16 #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us
opus-mt-de-en-finetuned-de-to-en-second ======================================= This model is a fine-tuned version of Helsinki-NLP/opus-mt-de-en on the wmt16 dataset. It achieves the following results on the evaluation set: * Loss: 1.2282 * Bleu: 37.9762 * Gen Len: 25.3696 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 5 ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu111 * Datasets 1.16.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu111\n* Datasets 1.16.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #marian #text2text-generation #generated_from_trainer #dataset-wmt16 #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu111\n* Datasets 1.16.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
# mbart for 9-3
{}
felixai/distilmbart-9-3
null
[ "transformers", "pytorch", "mbart", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #mbart #text2text-generation #autotrain_compatible #endpoints_compatible #region-us
# mbart for 9-3
[ "# mbart for 9-3" ]
[ "TAGS\n#transformers #pytorch #mbart #text2text-generation #autotrain_compatible #endpoints_compatible #region-us \n", "# mbart for 9-3" ]
image-classification
transformers
# rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
{"tags": ["image-classification", "pytorch", "huggingpics"], "metrics": ["accuracy"]}
ferdinand/rare-puppers
null
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us
# rare-puppers Autogenerated by HuggingPics️ Create your own image classifier for anything by running the demo on Google Colab. Report any issues with the demo at the github repo. ## Example Images #### corgi !corgi #### samoyed !samoyed #### shiba inu !shiba inu
[ "# rare-puppers\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### corgi\n\n!corgi", "#### samoyed\n\n!samoyed", "#### shiba inu\n\n!shiba inu" ]
[ "TAGS\n#transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "# rare-puppers\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### corgi\n\n!corgi", "#### samoyed\n\n!samoyed", "#### shiba inu\n\n!shiba inu" ]
text-classification
transformers
# FinBERT fine-tuned with the FinnSentiment dataset This is a FinBERT model fine-tuned with the [FinnSentiment dataset](https://arxiv.org/pdf/2012.02613.pdf). 90% of sentences were used for training and 10% for evaluation. ## Evaluation results |Metric|Score| |--|--| |Accuracy|0.8639028475711893| |F1-score|0.8643024701696561| |Precision|0.8653866541244811| |Recall|0.8639028475711893| |Matthews|0.6764924917164834| ![kuva.png](https://s3.amazonaws.com/moonup/production/uploads/1661156173672-61561a042387f285c1f8aec3.png) ## License FinBERT-FinnSentiment is licensed under the [CC BY 4.0 License](https://creativecommons.org/licenses/by/4.0/deed.en) (same as FinBERT and the FinnSentiment dataset).
{"language": "fi", "license": "cc-by-4.0"}
fergusq/finbert-finnsentiment
null
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "fi", "arxiv:2012.02613", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2012.02613" ]
[ "fi" ]
TAGS #transformers #pytorch #safetensors #bert #text-classification #fi #arxiv-2012.02613 #license-cc-by-4.0 #autotrain_compatible #endpoints_compatible #region-us
FinBERT fine-tuned with the FinnSentiment dataset ================================================= This is a FinBERT model fine-tuned with the FinnSentiment dataset. 90% of sentences were used for training and 10% for evaluation. Evaluation results ------------------ !URL License ------- FinBERT-FinnSentiment is licensed under the CC BY 4.0 License (same as FinBERT and the FinnSentiment dataset).
[]
[ "TAGS\n#transformers #pytorch #safetensors #bert #text-classification #fi #arxiv-2012.02613 #license-cc-by-4.0 #autotrain_compatible #endpoints_compatible #region-us \n" ]
null
null
<br /> <p align="center"> <a href="https://github.com/FernandoPerezLara/image-preprocessing-layer"> <img src="https://huggingface.co/fernandoperlar/preprocessing_image/resolve/main/duck.png" alt="Logo" width="100" height="146"> </a> <h3 align="center">Image Preprocessing Model</h3> <p align="center"> Image preprocessing in a convolutional model <br /> <a href="https://github.com/FernandoPerezLara/image-preprocessing-layer"><strong>Read more about the model »</strong></a> <br /> <br /> <a href="https://github.com/FernandoPerezLara/image-preprocessing-layer">View Code</a> · <a href="https://github.com/FernandoPerezLara/image-preprocessing-layer/issues">Report Bug</a> · <a href="https://github.com/FernandoPerezLara/image-preprocessing-layer/discussions">Start a discussion</a> </p> </p> <br /> The main objective of this project is to apply preprocessing to an image dataset while the model is being trained. The solution has been taken because we do not want to apply preprocessing to the data before training (i.e. create a copy of the data but already preprocessed) because we want to apply data augmentation while the model trains. The use of `Lambda` layers has been discarded because they do not allow the use of external libraries that do not work with tensors, since we want to use the functions provided by *OpenCV* and *NumPy*. ## Preprocessing In this example found in this repository we wanted to divide the images from HSV color masks, where it is divided into: * **Warm zones**: red and white colors are obtained. * **Warm zones**: The green color is obtained. * **Cold zones**: The color blue is obtained. Within the code you can find the declaration of these filters as: ```python filters = { "original": lambda x: x, "red": lambda x: data.getImageTensor(x, (330, 0, 0), (360, 255, 255)) + data.getImageTensor(x, (0, 0, 0), (50, 255, 255)), "green": lambda x: data.getImageTensor(x, (60, 0, 0), (130, 255, 255)), "blue": lambda x: data.getImageTensor(x, (180, 0, 0), (270, 255, 255)), } ``` On the other hand, the preprocessing functions are located inside `scripts/Data.py` file as follows: ```python def detectColor(self, image, lower, upper): if tf.is_tensor(image): temp_image = image.numpy().copy() # Used for training else: temp_image = image.copy() # Used for displaying the image hsv_image = temp_image.copy() hsv_image = cv.cvtColor(hsv_image, cv.COLOR_RGB2HSV) mask = cv.inRange(hsv_image, lower, upper) result = temp_image.copy() result[np.where(mask == 0)] = 0 return result def getImageTensor(self, images, lower, upper): results = [] for img in images: results.append(np.expand_dims(self.detectColor(img, lower, upper), axis=0)) return np.concatenate(results, axis=0) ``` ## Model The model used to solve our problem was a *CNN* with a preprocessing layer: ![Model](./model.png "Model") This model can be found in the `scripts/Model.py` file in the following function: ```python def create_model(): class FilterLayer(layers.Layer): def __init__(self, filter, **kwargs): self.filter = filter super(FilterLayer, self).__init__(name="filter_layer", **kwargs) def call(self, image): shape = image.shape [image, ] = tf.py_function(self.filter, [image], [tf.float32]) image = backend.stop_gradient(image) image.set_shape(shape) return image def get_config(self): return super().get_config() model = models.Sequential() model.add(layers.Input(shape=(215, 538, 3))) model.add(FilterLayer(filter=self.filter)) model.add(layers.Conv2D(32, (3, 3), activation="relu")) model.add(layers.MaxPooling2D(pool_size=(2, 2))) model.add(layers.Conv2D(32, (3, 3), activation="relu")) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(rate=0.4)) model.add(layers.Dense(32, activation="relu")) model.add(layers.Dropout(rate=0.4)) model.add(layers.Dense(2, activation="softmax")) return model ``` ## Contributors This work has been possible thanks to: - [Fernando Pérez Lara](https://www.linkedin.com/in/fernandoperezlara/) ([**@FernandoPerezLara**](https://github.com/FernandoPerezLara)) for having developed the model to make this idea come true. ## License Copyright (c) 2021 Fernando Pérez Lara. Licensed and distributed under the [MIT](LICENSE.txt) license.
{}
fernandoperlar/preprocessing_image
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #region-us
<br /> <p align="center"> <a href="URL <img src="URL alt="Logo" width="100" height="146"> </a> <h3 align="center">Image Preprocessing Model</h3> <p align="center"> Image preprocessing in a convolutional model <br /> <a href="URL more about the model »</strong></a> <br /> <br /> <a href="URL Code</a> · <a href="URL Bug</a> · <a href="URL a discussion</a> </p> </p> <br /> The main objective of this project is to apply preprocessing to an image dataset while the model is being trained. The solution has been taken because we do not want to apply preprocessing to the data before training (i.e. create a copy of the data but already preprocessed) because we want to apply data augmentation while the model trains. The use of 'Lambda' layers has been discarded because they do not allow the use of external libraries that do not work with tensors, since we want to use the functions provided by *OpenCV* and *NumPy*. ## Preprocessing In this example found in this repository we wanted to divide the images from HSV color masks, where it is divided into: * Warm zones: red and white colors are obtained. * Warm zones: The green color is obtained. * Cold zones: The color blue is obtained. Within the code you can find the declaration of these filters as: On the other hand, the preprocessing functions are located inside 'scripts/URL' file as follows: ## Model The model used to solve our problem was a *CNN* with a preprocessing layer: !Model This model can be found in the 'scripts/URL' file in the following function: ## Contributors This work has been possible thanks to: - Fernando Pérez Lara (@FernandoPerezLara) for having developed the model to make this idea come true. ## License Copyright (c) 2021 Fernando Pérez Lara. Licensed and distributed under the MIT license.
[ "## Preprocessing\nIn this example found in this repository we wanted to divide the images from HSV color masks, where it is divided into:\n* Warm zones: red and white colors are obtained.\n* Warm zones: The green color is obtained.\n* Cold zones: The color blue is obtained.\n\nWithin the code you can find the declaration of these filters as:\n\n\nOn the other hand, the preprocessing functions are located inside 'scripts/URL' file as follows:", "## Model\nThe model used to solve our problem was a *CNN* with a preprocessing layer:\n\n!Model\n\nThis model can be found in the 'scripts/URL' file in the following function:", "## Contributors\nThis work has been possible thanks to:\n- Fernando Pérez Lara (@FernandoPerezLara) for having developed the model to make this idea come true.", "## License\nCopyright (c) 2021 Fernando Pérez Lara.\n\nLicensed and distributed under the MIT license." ]
[ "TAGS\n#region-us \n", "## Preprocessing\nIn this example found in this repository we wanted to divide the images from HSV color masks, where it is divided into:\n* Warm zones: red and white colors are obtained.\n* Warm zones: The green color is obtained.\n* Cold zones: The color blue is obtained.\n\nWithin the code you can find the declaration of these filters as:\n\n\nOn the other hand, the preprocessing functions are located inside 'scripts/URL' file as follows:", "## Model\nThe model used to solve our problem was a *CNN* with a preprocessing layer:\n\n!Model\n\nThis model can be found in the 'scripts/URL' file in the following function:", "## Contributors\nThis work has been possible thanks to:\n- Fernando Pérez Lara (@FernandoPerezLara) for having developed the model to make this idea come true.", "## License\nCopyright (c) 2021 Fernando Pérez Lara.\n\nLicensed and distributed under the MIT license." ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2108 - Accuracy: 0.9265 - F1: 0.9265 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8108 | 1.0 | 250 | 0.3101 | 0.903 | 0.8995 | | 0.2423 | 2.0 | 500 | 0.2108 | 0.9265 | 0.9265 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.13.1+cu116 - Datasets 2.8.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["emotion"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9265, "name": "Accuracy"}, {"type": "f1", "value": 0.9264826040883781, "name": "F1"}]}]}]}
ffalcao/distilbert-base-uncased-finetuned-emotion
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #dataset-emotion #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us
distilbert-base-uncased-finetuned-emotion ========================================= This model is a fine-tuned version of distilbert-base-uncased on the emotion dataset. It achieves the following results on the evaluation set: * Loss: 0.2108 * Accuracy: 0.9265 * F1: 0.9265 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 64 * eval\_batch\_size: 64 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.13.0 * Pytorch 1.13.1+cu116 * Datasets 2.8.0 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.13.0\n* Pytorch 1.13.1+cu116\n* Datasets 2.8.0\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #dataset-emotion #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.13.0\n* Pytorch 1.13.1+cu116\n* Datasets 2.8.0\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [patrickvonplaten/t5-tiny-random](https://huggingface.co/patrickvonplaten/t5-tiny-random) on the wmt16_en_ro_pre_processed dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "model-index": [{"name": "t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro", "results": []}]}
ffsouza/t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
[ "# t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro\n\nThis model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Framework versions\n\n- Transformers 4.12.5\n- Pytorch 1.10.0+cu102\n- Datasets 1.15.1\n- Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# t5-tiny-random-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro\n\nThis model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Framework versions\n\n- Transformers 4.12.5\n- Pytorch 1.10.0+cu102\n- Datasets 1.15.1\n- Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-tiny-random-length-96-learning_rate-0.0002-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [patrickvonplaten/t5-tiny-random](https://huggingface.co/patrickvonplaten/t5-tiny-random) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 4.6426 - Bleu: 0.0617 - Gen Len: 8.9895 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:------:|:---------------:|:------:|:-------:| | 4.5828 | 1.0 | 76290 | 5.5397 | 0.0089 | 8.981 | | 4.187 | 2.0 | 152580 | 5.2241 | 0.0172 | 8.989 | | 3.9612 | 3.0 | 228870 | 5.0092 | 0.034 | 8.988 | | 3.8151 | 4.0 | 305160 | 4.8688 | 0.0365 | 8.9865 | | 3.7162 | 5.0 | 381450 | 4.7656 | 0.0469 | 8.9865 | | 3.6498 | 6.0 | 457740 | 4.6874 | 0.0531 | 8.9885 | | 3.6147 | 7.0 | 534030 | 4.6612 | 0.0585 | 8.9875 | | 3.5972 | 8.0 | 610320 | 4.6426 | 0.0617 | 8.9895 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "t5-tiny-random-length-96-learning_rate-0.0002-weight_decay-0.01-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0617, "name": "Bleu"}]}]}]}
ffsouza/t5-tiny-random-length-96-learning_rate-0.0002-weight_decay-0.01-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
t5-tiny-random-length-96-learning\_rate-0.0002-weight\_decay-0.01-finetuned-en-to-ro ==================================================================================== This model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 4.6426 * Bleu: 0.0617 * Gen Len: 8.9895 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0002 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 8 ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 8", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 8", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [patrickvonplaten/t5-tiny-random](https://huggingface.co/patrickvonplaten/t5-tiny-random) on the wmt16_en_ro_pre_processed dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "model-index": [{"name": "t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro", "results": []}]}
ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
[ "# t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro\n\nThis model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Framework versions\n\n- Transformers 4.12.5\n- Pytorch 1.10.0+cu102\n- Datasets 1.15.1\n- Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro\n\nThis model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16_en_ro_pre_processed dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1", "### Framework versions\n\n- Transformers 4.12.5\n- Pytorch 1.10.0+cu102\n- Datasets 1.15.1\n- Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro This model is a fine-tuned version of [patrickvonplaten/t5-tiny-random](https://huggingface.co/patrickvonplaten/t5-tiny-random) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 6.4854 - Bleu: 0.0002 - Gen Len: 9.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:-------:| | 6.2568 | 1.0 | 76290 | 6.4854 | 0.0002 | 9.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0002, "name": "Bleu"}]}]}]}
ffsouza/t5-tiny-random-length-96-learning_rate-2e-05-weight_decay-0.02-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
t5-tiny-random-length-96-learning\_rate-2e-05-weight\_decay-0.02-finetuned-en-to-ro =================================================================================== This model is a fine-tuned version of patrickvonplaten/t5-tiny-random on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 6.4854 * Bleu: 0.0002 * Gen Len: 9.0 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #t5 #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-mbart-finetuned-en-to-ro This model is a fine-tuned version of [sshleifer/tiny-mbart](https://huggingface.co/sshleifer/tiny-mbart) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 8.4792 - Bleu: 0.0 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:----:|:-------:| | 8.2425 | 1.0 | 76290 | 8.4792 | 0.0 | 20.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "tiny-mbart-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0, "name": "Bleu"}]}]}]}
ffsouza/tiny-mbart-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us
tiny-mbart-finetuned-en-to-ro ============================= This model is a fine-tuned version of sshleifer/tiny-mbart on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 8.4792 * Bleu: 0.0 * Gen Len: 20.0 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-mbart-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [sshleifer/tiny-mbart](https://huggingface.co/sshleifer/tiny-mbart) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 8.4656 - Bleu: 0.0 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:----:|:-------:| | 8.2268 | 1.0 | 76290 | 8.4656 | 0.0 | 20.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "tiny-mbart-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0, "name": "Bleu"}]}]}]}
ffsouza/tiny-mbart-length-128-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us
tiny-mbart-length-128-learning\_rate-2e-05-weight\_decay-0.01-finetuned-en-to-ro ================================================================================ This model is a fine-tuned version of sshleifer/tiny-mbart on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 8.4656 * Bleu: 0.0 * Gen Len: 20.0 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.005-finetuned-en-to-ro This model is a fine-tuned version of [sshleifer/tiny-mbart](https://huggingface.co/sshleifer/tiny-mbart) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 8.5983 - Bleu: 0.0 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:----:|:-------:| | 8.3753 | 1.0 | 76290 | 8.5983 | 0.0 | 20.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.005-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0, "name": "Bleu"}]}]}]}
ffsouza/tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.005-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us
tiny-mbart-length-96-learning\_rate-2e-05-weight\_decay-0.005-finetuned-en-to-ro ================================================================================ This model is a fine-tuned version of sshleifer/tiny-mbart on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 8.5983 * Bleu: 0.0 * Gen Len: 20.0 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro This model is a fine-tuned version of [sshleifer/tiny-mbart](https://huggingface.co/sshleifer/tiny-mbart) on the wmt16_en_ro_pre_processed dataset. It achieves the following results on the evaluation set: - Loss: 8.5137 - Bleu: 0.0 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:----:|:-------:| | 8.2817 | 1.0 | 76290 | 8.5137 | 0.0 | 20.0 | ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wmt16_en_ro_pre_processed"], "metrics": ["bleu"], "model-index": [{"name": "tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16_en_ro_pre_processed", "type": "wmt16_en_ro_pre_processed", "args": "enro"}, "metrics": [{"type": "bleu", "value": 0.0, "name": "Bleu"}]}]}]}
ffsouza/tiny-mbart-length-96-learning_rate-2e-05-weight_decay-0.01-finetuned-en-to-ro
null
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "generated_from_trainer", "dataset:wmt16_en_ro_pre_processed", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us
tiny-mbart-length-96-learning\_rate-2e-05-weight\_decay-0.01-finetuned-en-to-ro =============================================================================== This model is a fine-tuned version of sshleifer/tiny-mbart on the wmt16\_en\_ro\_pre\_processed dataset. It achieves the following results on the evaluation set: * Loss: 8.5137 * Bleu: 0.0 * Gen Len: 20.0 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.12.5 * Pytorch 1.10.0+cu102 * Datasets 1.15.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #mbart #text2text-generation #generated_from_trainer #dataset-wmt16_en_ro_pre_processed #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.12.5\n* Pytorch 1.10.0+cu102\n* Datasets 1.15.1\n* Tokenizers 0.10.3" ]
text2text-generation
transformers
T5-small for QA --- [Google's T5-small](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) pre-trained on the [C4](https://huggingface.co/datasets/c4) dataset, fine-tuned for Question-Answering on [SQuAD v2](https://huggingface.co/datasets/squad_v2) with the following hyperparameters: ``` optimizer=adamw_hf learning_rate=3e-5 adam_beta1=0.9 adam_beta2=0.999 adam_epsilon=1e-08 num_train_epochs=2 per_device_train_batch_size=12 ``` Usage --- The input [context and question] has to be prepared in a specific way as follows: ```python from transformers import pipeline def prep_input(_context, _question): return " ".join(["question:", _question.strip(), "context:", _context.strip()]) t5qa = pipeline("text2text-generation", "fgaim/t5-small-squad-v2") context = """ Oxygen is a chemical element with symbol O and atomic number 8. It is a member of the chalcogen group on the periodic table and is a highly reactive nonmetal and oxidizing agent that readily forms compounds (notably oxides) with most elements. By mass, oxygen is the third-most abundant element in the universe, after hydrogen and helium. At standard temperature and pressure, two atoms of the element bind to form dioxygen, a colorless and odorless diatomic gas with the formula O. """ t5qa(prep_input(context, "How many atoms combine to form dioxygen?")) # [{'generated_text': 'two'}] t5qa(prep_input(context, "What element makes up almost half of the earth's crust by mass?")) # [{'generated_text': 'oxygen'}] t5qa(prep_input(context, "What are the most abundent elements of the universe by mass?")) # [{'generated_text': 'hydrogen and helium'}] ```
{"language": ["en"], "license": "apache-2.0", "tags": ["text2text-generation"], "datasets": ["c4", "squad"], "widget": [{"text": "question: What is the atomic number for oxygen? context: Oxygen is a chemical element with symbol O and atomic number 8."}, {"text": "question: What is the chemical symbol of Oxygen? context: Oxygen is a chemical element with symbol O and atomic number 8."}]}
fgaim/t5-small-squad-v2
null
[ "transformers", "pytorch", "t5", "text2text-generation", "en", "dataset:c4", "dataset:squad", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #t5 #text2text-generation #en #dataset-c4 #dataset-squad #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
T5-small for QA --- Google's T5-small pre-trained on the C4 dataset, fine-tuned for Question-Answering on SQuAD v2 with the following hyperparameters: Usage --- The input [context and question] has to be prepared in a specific way as follows:
[]
[ "TAGS\n#transformers #pytorch #t5 #text2text-generation #en #dataset-c4 #dataset-squad #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
fill-mask
transformers
# BERT Base for Tigrinya Language We pre-train a BERT base-uncased model for Tigrinya on a dataset of 40 million tokens trained for 40 epochs. This repo contains the original pre-trained Flax model that was trained on a TPU v3.8 and its corresponding PyTorch version. ## Hyperparameters The hyperparameters corresponding to the model sizes mentioned above are as follows: | Model Size | L | AH | HS | FFN | P | Seq | |------------|----|----|-----|------|------|------| | BASE | 12 | 12 | 768 | 3072 | 110M | 512 | (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher={WiNLP 2021 at EMNLP 2021} } ```
{"language": "ti", "widget": [{"text": "\u12d3\u1255\u121a \u12f0\u1242\u12a3\u1295\u1235\u1275\u12ee [MASK] \u1265\u130d\u1265\u122a \u1270\u122b\u12a5\u12e9"}]}
fgaim/tibert-base
null
[ "transformers", "pytorch", "jax", "bert", "fill-mask", "ti", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #jax #bert #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us
BERT Base for Tigrinya Language =============================== We pre-train a BERT base-uncased model for Tigrinya on a dataset of 40 million tokens trained for 40 epochs. This repo contains the original pre-trained Flax model that was trained on a TPU v3.8 and its corresponding PyTorch version. Hyperparameters --------------- The hyperparameters corresponding to the model sizes mentioned above are as follows: (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) If you use this model in your product or research, please cite as follows:
[]
[ "TAGS\n#transformers #pytorch #jax #bert #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us \n" ]
token-classification
transformers
# Tigrinya POS tagging with TiELECTRA This model is a fine-tuned version of [TiELECTRA](https://huggingface.co/fgaim/tielectra-small) on the NTC-v1 dataset (Tedla et al. 2016). ## Basic usage ```python from transformers import pipeline ti_pos = pipeline("token-classification", model="fgaim/tielectra-small-pos") ti_pos("ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር") ``` ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Results The model achieves the following results on the test set: - Loss: 0.2236 - Adj Precision: 0.9148 - Adj Recall: 0.9192 - Adj F1: 0.9170 - Adj Number: 1670 - Adv Precision: 0.8228 - Adv Recall: 0.8058 - Adv F1: 0.8142 - Adv Number: 484 - Con Precision: 0.9793 - Con Recall: 0.9743 - Con F1: 0.9768 - Con Number: 972 - Fw Precision: 0.5 - Fw Recall: 0.3214 - Fw F1: 0.3913 - Fw Number: 28 - Int Precision: 0.64 - Int Recall: 0.6154 - Int F1: 0.6275 - Int Number: 26 - N Precision: 0.9525 - N Recall: 0.9587 - N F1: 0.9556 - N Number: 3992 - Num Precision: 0.9825 - Num Recall: 0.9372 - Num F1: 0.9593 - Num Number: 239 - N Prp Precision: 0.9132 - N Prp Recall: 0.9404 - N Prp F1: 0.9266 - N Prp Number: 470 - N V Precision: 0.9667 - N V Recall: 0.9760 - N V F1: 0.9713 - N V Number: 416 - Pre Precision: 0.9645 - Pre Recall: 0.9592 - Pre F1: 0.9619 - Pre Number: 907 - Pro Precision: 0.9395 - Pro Recall: 0.9079 - Pro F1: 0.9234 - Pro Number: 445 - Pun Precision: 1.0 - Pun Recall: 0.9994 - Pun F1: 0.9997 - Pun Number: 1607 - Unc Precision: 0.9286 - Unc Recall: 0.8125 - Unc F1: 0.8667 - Unc Number: 16 - V Precision: 0.7609 - V Recall: 0.8974 - V F1: 0.8235 - V Number: 78 - V Aux Precision: 0.9581 - V Aux Recall: 0.9786 - V Aux F1: 0.9682 - V Aux Number: 654 - V Ger Precision: 0.9183 - V Ger Recall: 0.9415 - V Ger F1: 0.9297 - V Ger Number: 513 - V Imf Precision: 0.9473 - V Imf Recall: 0.9442 - V Imf F1: 0.9458 - V Imf Number: 914 - V Imv Precision: 0.8163 - V Imv Recall: 0.5714 - V Imv F1: 0.6723 - V Imv Number: 70 - V Prf Precision: 0.8927 - V Prf Recall: 0.8776 - V Prf F1: 0.8851 - V Prf Number: 294 - V Rel Precision: 0.9535 - V Rel Recall: 0.9485 - V Rel F1: 0.9510 - V Rel Number: 757 - Overall Precision: 0.9456 - Overall Recall: 0.9456 - Overall F1: 0.9456 - Overall Accuracy: 0.9456 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author= {Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title= {Monolingual Pre-trained Language Models for Tigrinya}, year= 2021, publisher= {WiNLP 2021/EMNLP 2021} } ``` ## References ``` Tedla, Y., Yamamoto, K. & Marasinghe, A. 2016. Tigrinya Part-of-Speech Tagging with Morphological Patterns and the New Nagaoka Tigrinya Corpus. International Journal Of Computer Applications 146 pp. 33-41 (2016). ```
{"language": "ti", "datasets": ["TLMD", "NTC"], "metrics": ["f1", "precision", "recall", "accuracy"], "widget": [{"text": "\u12f5\u121d\u133b\u12ca \u12a3\u1265\u122d\u1203\u121d \u12a3\u1348\u12c8\u122d\u1242 \u1295\u12d8\u120d\u12a3\u1208\u121d \u1205\u12eb\u12cd \u12ae\u12ed\u1291 \u12a3\u1265 \u120d\u1265\u1293 \u12ed\u1290\u1265\u122d"}]}
fgaim/tielectra-small-pos
null
[ "transformers", "pytorch", "electra", "token-classification", "ti", "dataset:TLMD", "dataset:NTC", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #electra #token-classification #ti #dataset-TLMD #dataset-NTC #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us
# Tigrinya POS tagging with TiELECTRA This model is a fine-tuned version of TiELECTRA on the NTC-v1 dataset (Tedla et al. 2016). ## Basic usage ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Results The model achieves the following results on the test set: - Loss: 0.2236 - Adj Precision: 0.9148 - Adj Recall: 0.9192 - Adj F1: 0.9170 - Adj Number: 1670 - Adv Precision: 0.8228 - Adv Recall: 0.8058 - Adv F1: 0.8142 - Adv Number: 484 - Con Precision: 0.9793 - Con Recall: 0.9743 - Con F1: 0.9768 - Con Number: 972 - Fw Precision: 0.5 - Fw Recall: 0.3214 - Fw F1: 0.3913 - Fw Number: 28 - Int Precision: 0.64 - Int Recall: 0.6154 - Int F1: 0.6275 - Int Number: 26 - N Precision: 0.9525 - N Recall: 0.9587 - N F1: 0.9556 - N Number: 3992 - Num Precision: 0.9825 - Num Recall: 0.9372 - Num F1: 0.9593 - Num Number: 239 - N Prp Precision: 0.9132 - N Prp Recall: 0.9404 - N Prp F1: 0.9266 - N Prp Number: 470 - N V Precision: 0.9667 - N V Recall: 0.9760 - N V F1: 0.9713 - N V Number: 416 - Pre Precision: 0.9645 - Pre Recall: 0.9592 - Pre F1: 0.9619 - Pre Number: 907 - Pro Precision: 0.9395 - Pro Recall: 0.9079 - Pro F1: 0.9234 - Pro Number: 445 - Pun Precision: 1.0 - Pun Recall: 0.9994 - Pun F1: 0.9997 - Pun Number: 1607 - Unc Precision: 0.9286 - Unc Recall: 0.8125 - Unc F1: 0.8667 - Unc Number: 16 - V Precision: 0.7609 - V Recall: 0.8974 - V F1: 0.8235 - V Number: 78 - V Aux Precision: 0.9581 - V Aux Recall: 0.9786 - V Aux F1: 0.9682 - V Aux Number: 654 - V Ger Precision: 0.9183 - V Ger Recall: 0.9415 - V Ger F1: 0.9297 - V Ger Number: 513 - V Imf Precision: 0.9473 - V Imf Recall: 0.9442 - V Imf F1: 0.9458 - V Imf Number: 914 - V Imv Precision: 0.8163 - V Imv Recall: 0.5714 - V Imv F1: 0.6723 - V Imv Number: 70 - V Prf Precision: 0.8927 - V Prf Recall: 0.8776 - V Prf F1: 0.8851 - V Prf Number: 294 - V Rel Precision: 0.9535 - V Rel Recall: 0.9485 - V Rel F1: 0.9510 - V Rel Number: 757 - Overall Precision: 0.9456 - Overall Recall: 0.9456 - Overall F1: 0.9456 - Overall Accuracy: 0.9456 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 If you use this model in your product or research, please cite as follows: ## References
[ "# Tigrinya POS tagging with TiELECTRA\n\nThis model is a fine-tuned version of TiELECTRA on the NTC-v1 dataset (Tedla et al. 2016).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10.0", "### Results\n\nThe model achieves the following results on the test set:\n- Loss: 0.2236\n- Adj Precision: 0.9148\n- Adj Recall: 0.9192\n- Adj F1: 0.9170\n- Adj Number: 1670\n- Adv Precision: 0.8228\n- Adv Recall: 0.8058\n- Adv F1: 0.8142\n- Adv Number: 484\n- Con Precision: 0.9793\n- Con Recall: 0.9743\n- Con F1: 0.9768\n- Con Number: 972\n- Fw Precision: 0.5\n- Fw Recall: 0.3214\n- Fw F1: 0.3913\n- Fw Number: 28\n- Int Precision: 0.64\n- Int Recall: 0.6154\n- Int F1: 0.6275\n- Int Number: 26\n- N Precision: 0.9525\n- N Recall: 0.9587\n- N F1: 0.9556\n- N Number: 3992\n- Num Precision: 0.9825\n- Num Recall: 0.9372\n- Num F1: 0.9593\n- Num Number: 239\n- N Prp Precision: 0.9132\n- N Prp Recall: 0.9404\n- N Prp F1: 0.9266\n- N Prp Number: 470\n- N V Precision: 0.9667\n- N V Recall: 0.9760\n- N V F1: 0.9713\n- N V Number: 416\n- Pre Precision: 0.9645\n- Pre Recall: 0.9592\n- Pre F1: 0.9619\n- Pre Number: 907\n- Pro Precision: 0.9395\n- Pro Recall: 0.9079\n- Pro F1: 0.9234\n- Pro Number: 445\n- Pun Precision: 1.0\n- Pun Recall: 0.9994\n- Pun F1: 0.9997\n- Pun Number: 1607\n- Unc Precision: 0.9286\n- Unc Recall: 0.8125\n- Unc F1: 0.8667\n- Unc Number: 16\n- V Precision: 0.7609\n- V Recall: 0.8974\n- V F1: 0.8235\n- V Number: 78\n- V Aux Precision: 0.9581\n- V Aux Recall: 0.9786\n- V Aux F1: 0.9682\n- V Aux Number: 654\n- V Ger Precision: 0.9183\n- V Ger Recall: 0.9415\n- V Ger F1: 0.9297\n- V Ger Number: 513\n- V Imf Precision: 0.9473\n- V Imf Recall: 0.9442\n- V Imf F1: 0.9458\n- V Imf Number: 914\n- V Imv Precision: 0.8163\n- V Imv Recall: 0.5714\n- V Imv F1: 0.6723\n- V Imv Number: 70\n- V Prf Precision: 0.8927\n- V Prf Recall: 0.8776\n- V Prf F1: 0.8851\n- V Prf Number: 294\n- V Rel Precision: 0.9535\n- V Rel Recall: 0.9485\n- V Rel F1: 0.9510\n- V Rel Number: 757\n- Overall Precision: 0.9456\n- Overall Recall: 0.9456\n- Overall F1: 0.9456\n- Overall Accuracy: 0.9456", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
[ "TAGS\n#transformers #pytorch #electra #token-classification #ti #dataset-TLMD #dataset-NTC #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Tigrinya POS tagging with TiELECTRA\n\nThis model is a fine-tuned version of TiELECTRA on the NTC-v1 dataset (Tedla et al. 2016).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10.0", "### Results\n\nThe model achieves the following results on the test set:\n- Loss: 0.2236\n- Adj Precision: 0.9148\n- Adj Recall: 0.9192\n- Adj F1: 0.9170\n- Adj Number: 1670\n- Adv Precision: 0.8228\n- Adv Recall: 0.8058\n- Adv F1: 0.8142\n- Adv Number: 484\n- Con Precision: 0.9793\n- Con Recall: 0.9743\n- Con F1: 0.9768\n- Con Number: 972\n- Fw Precision: 0.5\n- Fw Recall: 0.3214\n- Fw F1: 0.3913\n- Fw Number: 28\n- Int Precision: 0.64\n- Int Recall: 0.6154\n- Int F1: 0.6275\n- Int Number: 26\n- N Precision: 0.9525\n- N Recall: 0.9587\n- N F1: 0.9556\n- N Number: 3992\n- Num Precision: 0.9825\n- Num Recall: 0.9372\n- Num F1: 0.9593\n- Num Number: 239\n- N Prp Precision: 0.9132\n- N Prp Recall: 0.9404\n- N Prp F1: 0.9266\n- N Prp Number: 470\n- N V Precision: 0.9667\n- N V Recall: 0.9760\n- N V F1: 0.9713\n- N V Number: 416\n- Pre Precision: 0.9645\n- Pre Recall: 0.9592\n- Pre F1: 0.9619\n- Pre Number: 907\n- Pro Precision: 0.9395\n- Pro Recall: 0.9079\n- Pro F1: 0.9234\n- Pro Number: 445\n- Pun Precision: 1.0\n- Pun Recall: 0.9994\n- Pun F1: 0.9997\n- Pun Number: 1607\n- Unc Precision: 0.9286\n- Unc Recall: 0.8125\n- Unc F1: 0.8667\n- Unc Number: 16\n- V Precision: 0.7609\n- V Recall: 0.8974\n- V F1: 0.8235\n- V Number: 78\n- V Aux Precision: 0.9581\n- V Aux Recall: 0.9786\n- V Aux F1: 0.9682\n- V Aux Number: 654\n- V Ger Precision: 0.9183\n- V Ger Recall: 0.9415\n- V Ger F1: 0.9297\n- V Ger Number: 513\n- V Imf Precision: 0.9473\n- V Imf Recall: 0.9442\n- V Imf F1: 0.9458\n- V Imf Number: 914\n- V Imv Precision: 0.8163\n- V Imv Recall: 0.5714\n- V Imv F1: 0.6723\n- V Imv Number: 70\n- V Prf Precision: 0.8927\n- V Prf Recall: 0.8776\n- V Prf F1: 0.8851\n- V Prf Number: 294\n- V Rel Precision: 0.9535\n- V Rel Recall: 0.9485\n- V Rel F1: 0.9510\n- V Rel Number: 757\n- Overall Precision: 0.9456\n- Overall Recall: 0.9456\n- Overall F1: 0.9456\n- Overall Accuracy: 0.9456", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
text-classification
transformers
# Sentiment Analysis for Tigrinya with TiELECTRA small This model is a fine-tuned version of [TiELECTRA small](https://huggingface.co/fgaim/tielectra-small) on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020). ## Basic usage ```python from transformers import pipeline ti_sent = pipeline("sentiment-analysis", model="fgaim/tielectra-small-sentiment") ti_sent("ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር") ``` ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Results The model achieves the following results on the evaluation set: - F1: 0.8229 - Precision: 0.8056 - Recall: 0.841 - Accuracy: 0.819 - Loss: 0.4299 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher= {WiNLP 2021/EMNLP 2021} } ``` ## References ``` Tela, A., Woubie, A. and Hautamäki, V. 2020. Transferring Monolingual Model to Low-Resource Language: The Case of Tigrinya. ArXiv, abs/2006.07698. ```
{"language": "ti", "metrics": ["f1", "precision", "recall", "accuracy"], "widget": [{"text": "\u12f5\u121d\u133b\u12ca \u12a3\u1265\u122d\u1203\u121d \u12a3\u1348\u12c8\u122d\u1242 \u1295\u12d8\u120d\u12a3\u1208\u121d \u1205\u12eb\u12cd \u12ae\u12ed\u1291 \u12a3\u1265 \u120d\u1265\u1293 \u12ed\u1290\u1265\u122d"}]}
fgaim/tielectra-small-sentiment
null
[ "transformers", "pytorch", "electra", "text-classification", "ti", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #electra #text-classification #ti #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us
# Sentiment Analysis for Tigrinya with TiELECTRA small This model is a fine-tuned version of TiELECTRA small on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020). ## Basic usage ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Results The model achieves the following results on the evaluation set: - F1: 0.8229 - Precision: 0.8056 - Recall: 0.841 - Accuracy: 0.819 - Loss: 0.4299 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 If you use this model in your product or research, please cite as follows: ## References
[ "# Sentiment Analysis for Tigrinya with TiELECTRA small\n\nThis model is a fine-tuned version of TiELECTRA small on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3.0", "### Results\n\nThe model achieves the following results on the evaluation set:\n- F1: 0.8229\n- Precision: 0.8056\n- Recall: 0.841\n- Accuracy: 0.819\n- Loss: 0.4299", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
[ "TAGS\n#transformers #pytorch #electra #text-classification #ti #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Sentiment Analysis for Tigrinya with TiELECTRA small\n\nThis model is a fine-tuned version of TiELECTRA small on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3.0", "### Results\n\nThe model achieves the following results on the evaluation set:\n- F1: 0.8229\n- Precision: 0.8056\n- Recall: 0.841\n- Accuracy: 0.819\n- Loss: 0.4299", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
fill-mask
transformers
# Pre-trained ELECTRA small for Tigrinya Language We pre-train ELECTRA small on the [TLMD](https://zenodo.org/record/5139094) dataset, with over 40 million tokens. Contained are trained Flax and PyTorch models. ## Hyperparameters The hyperparameters corresponding to model sizes mentioned above are as follows: | Model Size | L | AH | HS | FFN | P | Seq | |------------|----|----|-----|------|------|------| | SMALL | 12 | 4 | 256 | 1024 | 14M | 512 | (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) ### Framework versions - Transformers 4.12.0.dev0 - Pytorch 1.9.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher={WiNLP 2021 at EMNLP 2021} } ```
{"language": "ti", "widget": [{"text": "\u12d3\u1255\u121a \u1218\u1295\u12a5\u1230\u12ed \u12a4\u122d\u1275\u122b [MASK] \u1270\u122b\u12a5\u12e9"}]}
fgaim/tielectra-small
null
[ "transformers", "pytorch", "jax", "electra", "fill-mask", "ti", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #jax #electra #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us
Pre-trained ELECTRA small for Tigrinya Language =============================================== We pre-train ELECTRA small on the TLMD dataset, with over 40 million tokens. Contained are trained Flax and PyTorch models. Hyperparameters --------------- The hyperparameters corresponding to model sizes mentioned above are as follows: (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) ### Framework versions * Transformers 4.12.0.dev0 * Pytorch 1.9.0+cu111 * Datasets 1.13.3 * Tokenizers 0.10.3 If you use this model in your product or research, please cite as follows:
[ "### Framework versions\n\n\n* Transformers 4.12.0.dev0\n* Pytorch 1.9.0+cu111\n* Datasets 1.13.3\n* Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:" ]
[ "TAGS\n#transformers #pytorch #jax #electra #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "### Framework versions\n\n\n* Transformers 4.12.0.dev0\n* Pytorch 1.9.0+cu111\n* Datasets 1.13.3\n* Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:" ]
fill-mask
transformers
# TiRoBERTa: RoBERTa Pretrained for the Tigrinya Language We pretrain a RoBERTa base model for Tigrinya on a dataset of 40 million tokens trained for 40 epochs. Contained in this repo is the original pretrained Flax model that was trained on a TPU v3.8 and it's corresponding PyTorch version. ## Hyperparameters The hyperparameters corresponding to model sizes mentioned above are as follows: | Model Size | L | AH | HS | FFN | P | Seq | |------------|----|----|-----|------|------|------| | BASE | 12 | 12 | 768 | 3072 | 125M | 512 | (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) ### Framework versions - Transformers 4.12.0.dev0 - Pytorch 1.9.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher={WiNLP 2021 at EMNLP 2021} } ```
{"language": "ti", "widget": [{"text": "\u12d3\u1255\u121a \u1218\u1295\u12a5\u1230\u12ed \u12a4\u122d\u1275\u122b <mask> \u1270\u122b\u12a5\u12e9"}]}
fgaim/tiroberta-base
null
[ "transformers", "pytorch", "jax", "safetensors", "roberta", "fill-mask", "ti", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #jax #safetensors #roberta #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us
TiRoBERTa: RoBERTa Pretrained for the Tigrinya Language ======================================================= We pretrain a RoBERTa base model for Tigrinya on a dataset of 40 million tokens trained for 40 epochs. Contained in this repo is the original pretrained Flax model that was trained on a TPU v3.8 and it's corresponding PyTorch version. Hyperparameters --------------- The hyperparameters corresponding to model sizes mentioned above are as follows: (L = number of layers; AH = number of attention heads; HS = hidden size; FFN = feedforward network dimension; P = number of parameters; Seq = maximum sequence length.) ### Framework versions * Transformers 4.12.0.dev0 * Pytorch 1.9.0+cu111 * Datasets 1.13.3 * Tokenizers 0.10.3 If you use this model in your product or research, please cite as follows:
[ "### Framework versions\n\n\n* Transformers 4.12.0.dev0\n* Pytorch 1.9.0+cu111\n* Datasets 1.13.3\n* Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:" ]
[ "TAGS\n#transformers #pytorch #jax #safetensors #roberta #fill-mask #ti #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "### Framework versions\n\n\n* Transformers 4.12.0.dev0\n* Pytorch 1.9.0+cu111\n* Datasets 1.13.3\n* Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:" ]
token-classification
transformers
# Tigrinya POS tagging with TiRoBERTa This model is a fine-tuned version of [TiRoBERTa](https://huggingface.co/fgaim/tiroberta) on the NTC-v1 dataset (Tedla et al. 2016). ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Results The model achieves the following results on the test set: - Loss: 0.3194 - Adj Precision: 0.9219 - Adj Recall: 0.9335 - Adj F1: 0.9277 - Adj Number: 1670 - Adv Precision: 0.8297 - Adv Recall: 0.8554 - Adv F1: 0.8423 - Adv Number: 484 - Con Precision: 0.9844 - Con Recall: 0.9763 - Con F1: 0.9804 - Con Number: 972 - Fw Precision: 0.7895 - Fw Recall: 0.5357 - Fw F1: 0.6383 - Fw Number: 28 - Int Precision: 0.6552 - Int Recall: 0.7308 - Int F1: 0.6909 - Int Number: 26 - N Precision: 0.9650 - N Recall: 0.9662 - N F1: 0.9656 - N Number: 3992 - Num Precision: 0.9747 - Num Recall: 0.9665 - Num F1: 0.9706 - Num Number: 239 - N Prp Precision: 0.9308 - N Prp Recall: 0.9447 - N Prp F1: 0.9377 - N Prp Number: 470 - N V Precision: 0.9854 - N V Recall: 0.9736 - N V F1: 0.9794 - N V Number: 416 - Pre Precision: 0.9722 - Pre Recall: 0.9625 - Pre F1: 0.9673 - Pre Number: 907 - Pro Precision: 0.9448 - Pro Recall: 0.9236 - Pro F1: 0.9341 - Pro Number: 445 - Pun Precision: 1.0 - Pun Recall: 0.9994 - Pun F1: 0.9997 - Pun Number: 1607 - Unc Precision: 1.0 - Unc Recall: 0.875 - Unc F1: 0.9333 - Unc Number: 16 - V Precision: 0.8780 - V Recall: 0.9231 - V F1: 0.9 - V Number: 78 - V Aux Precision: 0.9685 - V Aux Recall: 0.9878 - V Aux F1: 0.9780 - V Aux Number: 654 - V Ger Precision: 0.9388 - V Ger Recall: 0.9571 - V Ger F1: 0.9479 - V Ger Number: 513 - V Imf Precision: 0.9634 - V Imf Recall: 0.9497 - V Imf F1: 0.9565 - V Imf Number: 914 - V Imv Precision: 0.8793 - V Imv Recall: 0.7286 - V Imv F1: 0.7969 - V Imv Number: 70 - V Prf Precision: 0.8960 - V Prf Recall: 0.9082 - V Prf F1: 0.9020 - V Prf Number: 294 - V Rel Precision: 0.9678 - V Rel Recall: 0.9538 - V Rel F1: 0.9607 - V Rel Number: 757 - Overall Precision: 0.9562 - Overall Recall: 0.9562 - Overall F1: 0.9562 - Overall Accuracy: 0.9562 ### Framework versions - Transformers 4.12.0.dev0 - Pytorch 1.9.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher={WiNLP 2021/EMNLP 2021} } ``` ## References ``` Tedla, Y., Yamamoto, K. & Marasinghe, A. 2016. Tigrinya Part-of-Speech Tagging with Morphological Patterns and the New Nagaoka Tigrinya Corpus. International Journal Of Computer Applications 146 pp. 33-41 (2016). ```
{"language": "ti", "datasets": ["TLMD", "NTC"], "metrics": ["f1", "precision", "recall", "accuracy"], "widget": [{"text": "\u12f5\u121d\u133b\u12ca \u12a3\u1265\u122d\u1203\u121d \u12a3\u1348\u12c8\u122d\u1242 \u1295\u12d8\u120d\u12a3\u1208\u121d \u1205\u12eb\u12cd \u12ae\u12ed\u1291 \u12a3\u1265 \u120d\u1265\u1293 \u12ed\u1290\u1265\u122d"}]}
fgaim/tiroberta-pos
null
[ "transformers", "pytorch", "safetensors", "roberta", "token-classification", "ti", "dataset:TLMD", "dataset:NTC", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #safetensors #roberta #token-classification #ti #dataset-TLMD #dataset-NTC #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us
# Tigrinya POS tagging with TiRoBERTa This model is a fine-tuned version of TiRoBERTa on the NTC-v1 dataset (Tedla et al. 2016). ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Results The model achieves the following results on the test set: - Loss: 0.3194 - Adj Precision: 0.9219 - Adj Recall: 0.9335 - Adj F1: 0.9277 - Adj Number: 1670 - Adv Precision: 0.8297 - Adv Recall: 0.8554 - Adv F1: 0.8423 - Adv Number: 484 - Con Precision: 0.9844 - Con Recall: 0.9763 - Con F1: 0.9804 - Con Number: 972 - Fw Precision: 0.7895 - Fw Recall: 0.5357 - Fw F1: 0.6383 - Fw Number: 28 - Int Precision: 0.6552 - Int Recall: 0.7308 - Int F1: 0.6909 - Int Number: 26 - N Precision: 0.9650 - N Recall: 0.9662 - N F1: 0.9656 - N Number: 3992 - Num Precision: 0.9747 - Num Recall: 0.9665 - Num F1: 0.9706 - Num Number: 239 - N Prp Precision: 0.9308 - N Prp Recall: 0.9447 - N Prp F1: 0.9377 - N Prp Number: 470 - N V Precision: 0.9854 - N V Recall: 0.9736 - N V F1: 0.9794 - N V Number: 416 - Pre Precision: 0.9722 - Pre Recall: 0.9625 - Pre F1: 0.9673 - Pre Number: 907 - Pro Precision: 0.9448 - Pro Recall: 0.9236 - Pro F1: 0.9341 - Pro Number: 445 - Pun Precision: 1.0 - Pun Recall: 0.9994 - Pun F1: 0.9997 - Pun Number: 1607 - Unc Precision: 1.0 - Unc Recall: 0.875 - Unc F1: 0.9333 - Unc Number: 16 - V Precision: 0.8780 - V Recall: 0.9231 - V F1: 0.9 - V Number: 78 - V Aux Precision: 0.9685 - V Aux Recall: 0.9878 - V Aux F1: 0.9780 - V Aux Number: 654 - V Ger Precision: 0.9388 - V Ger Recall: 0.9571 - V Ger F1: 0.9479 - V Ger Number: 513 - V Imf Precision: 0.9634 - V Imf Recall: 0.9497 - V Imf F1: 0.9565 - V Imf Number: 914 - V Imv Precision: 0.8793 - V Imv Recall: 0.7286 - V Imv F1: 0.7969 - V Imv Number: 70 - V Prf Precision: 0.8960 - V Prf Recall: 0.9082 - V Prf F1: 0.9020 - V Prf Number: 294 - V Rel Precision: 0.9678 - V Rel Recall: 0.9538 - V Rel F1: 0.9607 - V Rel Number: 757 - Overall Precision: 0.9562 - Overall Recall: 0.9562 - Overall F1: 0.9562 - Overall Accuracy: 0.9562 ### Framework versions - Transformers 4.12.0.dev0 - Pytorch 1.9.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3 If you use this model in your product or research, please cite as follows: ## References
[ "# Tigrinya POS tagging with TiRoBERTa\n\nThis model is a fine-tuned version of TiRoBERTa on the NTC-v1 dataset (Tedla et al. 2016).", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10.0", "### Results\n\nThe model achieves the following results on the test set:\n- Loss: 0.3194\n- Adj Precision: 0.9219\n- Adj Recall: 0.9335\n- Adj F1: 0.9277\n- Adj Number: 1670\n- Adv Precision: 0.8297\n- Adv Recall: 0.8554\n- Adv F1: 0.8423\n- Adv Number: 484\n- Con Precision: 0.9844\n- Con Recall: 0.9763\n- Con F1: 0.9804\n- Con Number: 972\n- Fw Precision: 0.7895\n- Fw Recall: 0.5357\n- Fw F1: 0.6383\n- Fw Number: 28\n- Int Precision: 0.6552\n- Int Recall: 0.7308\n- Int F1: 0.6909\n- Int Number: 26\n- N Precision: 0.9650\n- N Recall: 0.9662\n- N F1: 0.9656\n- N Number: 3992\n- Num Precision: 0.9747\n- Num Recall: 0.9665\n- Num F1: 0.9706\n- Num Number: 239\n- N Prp Precision: 0.9308\n- N Prp Recall: 0.9447\n- N Prp F1: 0.9377\n- N Prp Number: 470\n- N V Precision: 0.9854\n- N V Recall: 0.9736\n- N V F1: 0.9794\n- N V Number: 416\n- Pre Precision: 0.9722\n- Pre Recall: 0.9625\n- Pre F1: 0.9673\n- Pre Number: 907\n- Pro Precision: 0.9448\n- Pro Recall: 0.9236\n- Pro F1: 0.9341\n- Pro Number: 445\n- Pun Precision: 1.0\n- Pun Recall: 0.9994\n- Pun F1: 0.9997\n- Pun Number: 1607\n- Unc Precision: 1.0\n- Unc Recall: 0.875\n- Unc F1: 0.9333\n- Unc Number: 16\n- V Precision: 0.8780\n- V Recall: 0.9231\n- V F1: 0.9\n- V Number: 78\n- V Aux Precision: 0.9685\n- V Aux Recall: 0.9878\n- V Aux F1: 0.9780\n- V Aux Number: 654\n- V Ger Precision: 0.9388\n- V Ger Recall: 0.9571\n- V Ger F1: 0.9479\n- V Ger Number: 513\n- V Imf Precision: 0.9634\n- V Imf Recall: 0.9497\n- V Imf F1: 0.9565\n- V Imf Number: 914\n- V Imv Precision: 0.8793\n- V Imv Recall: 0.7286\n- V Imv F1: 0.7969\n- V Imv Number: 70\n- V Prf Precision: 0.8960\n- V Prf Recall: 0.9082\n- V Prf F1: 0.9020\n- V Prf Number: 294\n- V Rel Precision: 0.9678\n- V Rel Recall: 0.9538\n- V Rel F1: 0.9607\n- V Rel Number: 757\n- Overall Precision: 0.9562\n- Overall Recall: 0.9562\n- Overall F1: 0.9562\n- Overall Accuracy: 0.9562", "### Framework versions\n\n- Transformers 4.12.0.dev0\n- Pytorch 1.9.0+cu111\n- Datasets 1.13.3\n- Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
[ "TAGS\n#transformers #pytorch #safetensors #roberta #token-classification #ti #dataset-TLMD #dataset-NTC #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Tigrinya POS tagging with TiRoBERTa\n\nThis model is a fine-tuned version of TiRoBERTa on the NTC-v1 dataset (Tedla et al. 2016).", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10.0", "### Results\n\nThe model achieves the following results on the test set:\n- Loss: 0.3194\n- Adj Precision: 0.9219\n- Adj Recall: 0.9335\n- Adj F1: 0.9277\n- Adj Number: 1670\n- Adv Precision: 0.8297\n- Adv Recall: 0.8554\n- Adv F1: 0.8423\n- Adv Number: 484\n- Con Precision: 0.9844\n- Con Recall: 0.9763\n- Con F1: 0.9804\n- Con Number: 972\n- Fw Precision: 0.7895\n- Fw Recall: 0.5357\n- Fw F1: 0.6383\n- Fw Number: 28\n- Int Precision: 0.6552\n- Int Recall: 0.7308\n- Int F1: 0.6909\n- Int Number: 26\n- N Precision: 0.9650\n- N Recall: 0.9662\n- N F1: 0.9656\n- N Number: 3992\n- Num Precision: 0.9747\n- Num Recall: 0.9665\n- Num F1: 0.9706\n- Num Number: 239\n- N Prp Precision: 0.9308\n- N Prp Recall: 0.9447\n- N Prp F1: 0.9377\n- N Prp Number: 470\n- N V Precision: 0.9854\n- N V Recall: 0.9736\n- N V F1: 0.9794\n- N V Number: 416\n- Pre Precision: 0.9722\n- Pre Recall: 0.9625\n- Pre F1: 0.9673\n- Pre Number: 907\n- Pro Precision: 0.9448\n- Pro Recall: 0.9236\n- Pro F1: 0.9341\n- Pro Number: 445\n- Pun Precision: 1.0\n- Pun Recall: 0.9994\n- Pun F1: 0.9997\n- Pun Number: 1607\n- Unc Precision: 1.0\n- Unc Recall: 0.875\n- Unc F1: 0.9333\n- Unc Number: 16\n- V Precision: 0.8780\n- V Recall: 0.9231\n- V F1: 0.9\n- V Number: 78\n- V Aux Precision: 0.9685\n- V Aux Recall: 0.9878\n- V Aux F1: 0.9780\n- V Aux Number: 654\n- V Ger Precision: 0.9388\n- V Ger Recall: 0.9571\n- V Ger F1: 0.9479\n- V Ger Number: 513\n- V Imf Precision: 0.9634\n- V Imf Recall: 0.9497\n- V Imf F1: 0.9565\n- V Imf Number: 914\n- V Imv Precision: 0.8793\n- V Imv Recall: 0.7286\n- V Imv F1: 0.7969\n- V Imv Number: 70\n- V Prf Precision: 0.8960\n- V Prf Recall: 0.9082\n- V Prf F1: 0.9020\n- V Prf Number: 294\n- V Rel Precision: 0.9678\n- V Rel Recall: 0.9538\n- V Rel F1: 0.9607\n- V Rel Number: 757\n- Overall Precision: 0.9562\n- Overall Recall: 0.9562\n- Overall F1: 0.9562\n- Overall Accuracy: 0.9562", "### Framework versions\n\n- Transformers 4.12.0.dev0\n- Pytorch 1.9.0+cu111\n- Datasets 1.13.3\n- Tokenizers 0.10.3\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
text-classification
transformers
# Sentiment Analysis for Tigrinya with TiRoBERTa This model is a fine-tuned version of [TiRoBERTa](https://huggingface.co/fgaim/roberta-base-tigrinya) on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020). ## Basic usage ```python from transformers import pipeline ti_sent = pipeline("sentiment-analysis", model="fgaim/tiroberta-sentiment") ti_sent("ድምጻዊ ኣብርሃም ኣፈወርቂ ንዘልኣለም ህያው ኮይኑ ኣብ ልብና ይነብር") ``` ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Results It achieves the following results on the evaluation set: - F1: 0.8477 - Precision: 0.7607 - Recall: 0.957 - Accuracy: 0.828 - Loss: 0.6796 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 ## Citation If you use this model in your product or research, please cite as follows: ``` @article{Fitsum2021TiPLMs, author={Fitsum Gaim and Wonsuk Yang and Jong C. Park}, title={Monolingual Pre-trained Language Models for Tigrinya}, year=2021, publisher={WiNLP 2021/EMNLP 2021} } ``` ## References ``` Tela, A., Woubie, A. and Hautamäki, V. 2020. Transferring Monolingual Model to Low-Resource Language: The Case of Tigrinya. ArXiv, abs/2006.07698. ```
{"language": "ti", "datasets": ["TLMD"], "metrics": ["accuracy", "f1", "precision", "recall"], "widget": [{"text": "\u12f5\u121d\u133b\u12ca \u12a3\u1265\u122d\u1203\u121d \u12a3\u1348\u12c8\u122d\u1242 \u1295\u12d8\u120d\u12a3\u1208\u121d \u1205\u12eb\u12cd \u12ae\u12ed\u1291 \u12a3\u1265 \u120d\u1265\u1293 \u12ed\u1290\u1265\u122d"}]}
fgaim/tiroberta-sentiment
null
[ "transformers", "pytorch", "roberta", "text-classification", "ti", "dataset:TLMD", "model-index", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ti" ]
TAGS #transformers #pytorch #roberta #text-classification #ti #dataset-TLMD #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us
# Sentiment Analysis for Tigrinya with TiRoBERTa This model is a fine-tuned version of TiRoBERTa on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020). ## Basic usage ## Training ### Hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Results It achieves the following results on the evaluation set: - F1: 0.8477 - Precision: 0.7607 - Recall: 0.957 - Accuracy: 0.828 - Loss: 0.6796 ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu111 - Datasets 1.10.2 - Tokenizers 0.10.1 If you use this model in your product or research, please cite as follows: ## References
[ "# Sentiment Analysis for Tigrinya with TiRoBERTa\n\nThis model is a fine-tuned version of TiRoBERTa on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3.0", "### Results\n\nIt achieves the following results on the evaluation set:\n- F1: 0.8477\n- Precision: 0.7607\n- Recall: 0.957\n- Accuracy: 0.828\n- Loss: 0.6796", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
[ "TAGS\n#transformers #pytorch #roberta #text-classification #ti #dataset-TLMD #model-index #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Sentiment Analysis for Tigrinya with TiRoBERTa\n\nThis model is a fine-tuned version of TiRoBERTa on a YouTube comments Sentiment Analysis dataset for Tigrinya (Tela et al. 2020).", "## Basic usage", "## Training", "### Hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3.0", "### Results\n\nIt achieves the following results on the evaluation set:\n- F1: 0.8477\n- Precision: 0.7607\n- Recall: 0.957\n- Accuracy: 0.828\n- Loss: 0.6796", "### Framework versions\n\n- Transformers 4.10.3\n- Pytorch 1.9.0+cu111\n- Datasets 1.10.2\n- Tokenizers 0.10.1\n\n\nIf you use this model in your product or research, please cite as follows:", "## References" ]
text-classification
transformers
# NewsSentiment: easy-to-use, high-quality target-dependent sentiment classification for news articles ## Important: [use our PyPI package](https://pypi.org/project/NewsSentiment/) instead of this model on the Hub The Huggingface Hub architecture currently [does not support](https://github.com/huggingface/transformers/issues/14785) target-dependent sentiment classification since you cannot provide the required inputs, i.e., sentence and target. Thus, we recommend that you use our easy-to-use [PyPI package NewsSentiment](https://pypi.org/project/NewsSentiment/). ## Description This model is the currently [best performing](https://aclanthology.org/2021.eacl-main.142.pdf) targeted sentiment classifier for news articles. In contrast to regular sentiment classification, targeted sentiment classification allows you to provide a target in a sentence. Only for this target, the sentiment is then predicted. This is more reliable in many cases, as demonstrated by the following simplistic example: "I like Bert, but I hate Robert." This model is also available as an easy-to-use PyPI package named [`NewsSentiment`](https://pypi.org/project/NewsSentiment/) and in its original GitHub repository named [`NewsMTSC`](https://github.com/fhamborg/NewsMTSC), where you will find the dataset the model was trained on, other models for sentiment classification, and a training and testing framework. More information on the model and the dataset (consisting of more than 10k sentences sampled from news articles, each labeled and agreed upon by at least 5 annotators) can be found in our [EACL paper](https://aclanthology.org/2021.eacl-main.142.pdf). The dataset, the model, and its source code can be viewed in our [GitHub repository](https://github.com/fhamborg/NewsMTSC). We recommend to use our [PyPI package](https://pypi.org/project/NewsSentiment/) for sentiment classification since the Huggingface Hub platform seems to [not support](https://github.com/huggingface/transformers/issues/14785) target-dependent sentiment classification. # How to cite If you use the dataset or model, please cite our [paper](https://www.aclweb.org/anthology/2021.eacl-main.142/) ([PDF](https://www.aclweb.org/anthology/2021.eacl-main.142.pdf)): ``` @InProceedings{Hamborg2021b, author = {Hamborg, Felix and Donnay, Karsten}, title = {NewsMTSC: (Multi-)Target-dependent Sentiment Classification in News Articles}, booktitle = {Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2021)}, year = {2021}, month = {Apr.}, location = {Virtual Event}, } ```
{"language": ["en"], "license": "apache-2.0", "tags": ["text-classification", "sentiment-analysis", "sentiment-classification", "targeted-sentiment-classification", "target-depentent-sentiment-classification"], "datasets": "fhamborg/news_sentiment_newsmtsc"}
fhamborg/roberta-targeted-sentiment-classification-newsarticles
null
[ "transformers", "pytorch", "roberta", "text-classification", "sentiment-analysis", "sentiment-classification", "targeted-sentiment-classification", "target-depentent-sentiment-classification", "en", "dataset:fhamborg/news_sentiment_newsmtsc", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #roberta #text-classification #sentiment-analysis #sentiment-classification #targeted-sentiment-classification #target-depentent-sentiment-classification #en #dataset-fhamborg/news_sentiment_newsmtsc #license-apache-2.0 #endpoints_compatible #region-us
# NewsSentiment: easy-to-use, high-quality target-dependent sentiment classification for news articles ## Important: use our PyPI package instead of this model on the Hub The Huggingface Hub architecture currently does not support target-dependent sentiment classification since you cannot provide the required inputs, i.e., sentence and target. Thus, we recommend that you use our easy-to-use PyPI package NewsSentiment. ## Description This model is the currently best performing targeted sentiment classifier for news articles. In contrast to regular sentiment classification, targeted sentiment classification allows you to provide a target in a sentence. Only for this target, the sentiment is then predicted. This is more reliable in many cases, as demonstrated by the following simplistic example: "I like Bert, but I hate Robert." This model is also available as an easy-to-use PyPI package named 'NewsSentiment' and in its original GitHub repository named 'NewsMTSC', where you will find the dataset the model was trained on, other models for sentiment classification, and a training and testing framework. More information on the model and the dataset (consisting of more than 10k sentences sampled from news articles, each labeled and agreed upon by at least 5 annotators) can be found in our EACL paper. The dataset, the model, and its source code can be viewed in our GitHub repository. We recommend to use our PyPI package for sentiment classification since the Huggingface Hub platform seems to not support target-dependent sentiment classification. # How to cite If you use the dataset or model, please cite our paper (PDF):
[ "# NewsSentiment: easy-to-use, high-quality target-dependent sentiment classification for news articles", "## Important: use our PyPI package instead of this model on the Hub\nThe Huggingface Hub architecture currently does not support target-dependent sentiment classification since you cannot provide the required inputs, i.e., sentence and target. Thus, we recommend that you use our easy-to-use PyPI package NewsSentiment.", "## Description\n\nThis model is the currently best performing \ntargeted sentiment classifier for news articles. In contrast to regular sentiment\nclassification, targeted sentiment classification allows you to provide a target in a sentence. \nOnly for this target, the sentiment is then predicted. This is more reliable in many\ncases, as demonstrated by the following simplistic example: \"I like Bert, but I hate Robert.\"\n\nThis model is also available as an easy-to-use PyPI package named 'NewsSentiment' and \nin its original GitHub repository named 'NewsMTSC', where you will find the dataset the model was trained on, other models for sentiment classification, and a training and testing framework. More information on the model and the dataset (consisting of more than 10k sentences sampled from news articles, each \nlabeled and agreed upon by at least 5 annotators) can be found in our EACL paper. The\ndataset, the model, and its source code can be viewed in our GitHub repository.\n\nWe recommend to use our PyPI package for sentiment classification since the Huggingface Hub platform seems to not support target-dependent sentiment classification.", "# How to cite\nIf you use the dataset or model, please cite our paper (PDF):" ]
[ "TAGS\n#transformers #pytorch #roberta #text-classification #sentiment-analysis #sentiment-classification #targeted-sentiment-classification #target-depentent-sentiment-classification #en #dataset-fhamborg/news_sentiment_newsmtsc #license-apache-2.0 #endpoints_compatible #region-us \n", "# NewsSentiment: easy-to-use, high-quality target-dependent sentiment classification for news articles", "## Important: use our PyPI package instead of this model on the Hub\nThe Huggingface Hub architecture currently does not support target-dependent sentiment classification since you cannot provide the required inputs, i.e., sentence and target. Thus, we recommend that you use our easy-to-use PyPI package NewsSentiment.", "## Description\n\nThis model is the currently best performing \ntargeted sentiment classifier for news articles. In contrast to regular sentiment\nclassification, targeted sentiment classification allows you to provide a target in a sentence. \nOnly for this target, the sentiment is then predicted. This is more reliable in many\ncases, as demonstrated by the following simplistic example: \"I like Bert, but I hate Robert.\"\n\nThis model is also available as an easy-to-use PyPI package named 'NewsSentiment' and \nin its original GitHub repository named 'NewsMTSC', where you will find the dataset the model was trained on, other models for sentiment classification, and a training and testing framework. More information on the model and the dataset (consisting of more than 10k sentences sampled from news articles, each \nlabeled and agreed upon by at least 5 annotators) can be found in our EACL paper. The\ndataset, the model, and its source code can be viewed in our GitHub repository.\n\nWe recommend to use our PyPI package for sentiment classification since the Huggingface Hub platform seems to not support target-dependent sentiment classification.", "# How to cite\nIf you use the dataset or model, please cite our paper (PDF):" ]
token-classification
transformers
# BERT-DE-NER ## What is it? This is a German BERT model fine-tuned for named entity recognition. ## Base model & training This model is based on [bert-base-german-dbmdz-cased](https://huggingface.co/bert-base-german-dbmdz-cased) and has been fine-tuned for NER on the training data from [GermEval2014](https://sites.google.com/site/germeval2014ner). ## Model results The results on the test data from GermEval2014 are (entities only): | Precision | Recall | F1-Score | |----------:|-------:|---------:| | 0.817 | 0.842 | 0.829 | ## How to use ```Python >>> from transformers import pipeline >>> classifier = pipeline('ner', model="fhswf/bert_de_ner") >>> classifier('Von der Organisation „medico international“ hieß es, die EU entziehe sich seit vielen Jahren der Verantwortung für die Menschen an ihren Außengrenzen.') [{'word': 'med', 'score': 0.9996621608734131, 'entity': 'B-ORG', 'index': 6}, {'word': '##ico', 'score': 0.9995362162590027, 'entity': 'I-ORG', 'index': 7}, {'word': 'international', 'score': 0.9996932744979858, 'entity': 'I-ORG', 'index': 8}, {'word': 'eu', 'score': 0.9997008442878723, 'entity': 'B-ORG', 'index': 14}] ```
{"language": "de", "license": "cc-by-sa-4.0", "tags": ["German", "de", "NER"], "datasets": ["germeval_14"]}
fhswf/bert_de_ner
null
[ "transformers", "pytorch", "tf", "jax", "safetensors", "bert", "token-classification", "German", "de", "NER", "dataset:germeval_14", "doi:10.57967/hf/0655", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "de" ]
TAGS #transformers #pytorch #tf #jax #safetensors #bert #token-classification #German #de #NER #dataset-germeval_14 #doi-10.57967/hf/0655 #license-cc-by-sa-4.0 #autotrain_compatible #endpoints_compatible #region-us
BERT-DE-NER =========== What is it? ----------- This is a German BERT model fine-tuned for named entity recognition. Base model & training --------------------- This model is based on bert-base-german-dbmdz-cased and has been fine-tuned for NER on the training data from GermEval2014. Model results ------------- The results on the test data from GermEval2014 are (entities only): How to use ----------
[]
[ "TAGS\n#transformers #pytorch #tf #jax #safetensors #bert #token-classification #German #de #NER #dataset-germeval_14 #doi-10.57967/hf/0655 #license-cc-by-sa-4.0 #autotrain_compatible #endpoints_compatible #region-us \n" ]
text-generation
transformers
# Fibruh Bot Model
{"tags": ["conversational"]}
fibruh/DialoGPT-small-harrypotter
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Fibruh Bot Model
[ "# Fibruh Bot Model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Fibruh Bot Model" ]
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # biobert_v1.1_pubmed-finetuned-ner-finetuned-ner This model is a fine-tuned version of [fidukm34/biobert_v1.1_pubmed-finetuned-ner](https://huggingface.co/fidukm34/biobert_v1.1_pubmed-finetuned-ner) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.0715 - Precision: 0.8464 - Recall: 0.8872 - F1: 0.8663 - Accuracy: 0.9829 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 340 | 0.0715 | 0.8464 | 0.8872 | 0.8663 | 0.9829 | ### Framework versions - Transformers 4.8.1 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["ncbi_disease"], "metrics": ["precision", "recall", "f1", "accuracy"], "model_index": [{"name": "biobert_v1.1_pubmed-finetuned-ner-finetuned-ner", "results": [{"task": {"name": "Token Classification", "type": "token-classification"}, "dataset": {"name": "ncbi_disease", "type": "ncbi_disease", "args": "ncbi_disease"}, "metric": {"name": "Accuracy", "type": "accuracy", "value": 0.9829142288061745}}]}]}
fidukm34/biobert_v1.1_pubmed-finetuned-ner-finetuned-ner
null
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "dataset:ncbi_disease", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #bert #token-classification #generated_from_trainer #dataset-ncbi_disease #autotrain_compatible #endpoints_compatible #region-us
biobert\_v1.1\_pubmed-finetuned-ner-finetuned-ner ================================================= This model is a fine-tuned version of fidukm34/biobert\_v1.1\_pubmed-finetuned-ner on the ncbi\_disease dataset. It achieves the following results on the evaluation set: * Loss: 0.0715 * Precision: 0.8464 * Recall: 0.8872 * F1: 0.8663 * Accuracy: 0.9829 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.8.1 * Pytorch 1.9.0+cu102 * Datasets 1.11.0 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.8.1\n* Pytorch 1.9.0+cu102\n* Datasets 1.11.0\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #bert #token-classification #generated_from_trainer #dataset-ncbi_disease #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.8.1\n* Pytorch 1.9.0+cu102\n* Datasets 1.11.0\n* Tokenizers 0.10.3" ]
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # biobert_v1.1_pubmed-finetuned-ner This model is a fine-tuned version of [monologg/biobert_v1.1_pubmed](https://huggingface.co/monologg/biobert_v1.1_pubmed) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.0657 - Precision: 0.8338 - Recall: 0.8933 - F1: 0.8625 - Accuracy: 0.9827 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 340 | 0.0612 | 0.8268 | 0.85 | 0.8382 | 0.9806 | | 0.0987 | 2.0 | 680 | 0.0604 | 0.8397 | 0.8848 | 0.8616 | 0.9829 | | 0.0272 | 3.0 | 1020 | 0.0657 | 0.8338 | 0.8933 | 0.8625 | 0.9827 | ### Framework versions - Transformers 4.8.1 - Pytorch 1.9.0 - Datasets 1.6.2 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["ncbi_disease"], "metrics": ["precision", "recall", "f1", "accuracy"], "model_index": [{"name": "biobert_v1.1_pubmed-finetuned-ner", "results": [{"task": {"name": "Token Classification", "type": "token-classification"}, "dataset": {"name": "ncbi_disease", "type": "ncbi_disease", "args": "ncbi_disease"}, "metric": {"name": "Accuracy", "type": "accuracy", "value": 0.9827274990663513}}]}]}
fidukm34/biobert_v1.1_pubmed-finetuned-ner
null
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "dataset:ncbi_disease", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #bert #token-classification #generated_from_trainer #dataset-ncbi_disease #autotrain_compatible #endpoints_compatible #region-us
biobert\_v1.1\_pubmed-finetuned-ner =================================== This model is a fine-tuned version of monologg/biobert\_v1.1\_pubmed on the ncbi\_disease dataset. It achieves the following results on the evaluation set: * Loss: 0.0657 * Precision: 0.8338 * Recall: 0.8933 * F1: 0.8625 * Accuracy: 0.9827 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 3 ### Training results ### Framework versions * Transformers 4.8.1 * Pytorch 1.9.0 * Datasets 1.6.2 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* Transformers 4.8.1\n* Pytorch 1.9.0\n* Datasets 1.6.2\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #bert #token-classification #generated_from_trainer #dataset-ncbi_disease #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* Transformers 4.8.1\n* Pytorch 1.9.0\n* Datasets 1.6.2\n* Tokenizers 0.10.3" ]
null
transformers
This model can measure semantic similarity between pairs of texts containing figurative language. As far as we know, this model works slightly better than sup-simCSE-roberta-base. For example : **sentence 1**: I have been in seventh heaven since Harry entered my life . **sentence 2**: I have been in very happy since Harry entered my life. the cosin score of simcse: 0.897 the cosin score of us: 0.897 ------------------------------------------------------------------- **sentence 1**: I have been in seventh heaven since Harry entered my life . **sentence 2**: I have been in pain since Harry entered my life . the cosin score of simcse: 0.846 the cosin score of us: 0.753 -------------------------------------------------- It's still a big challenge for us to measure semantic similarity of figurative language from the sentence embedding perspective. unsupvised models may useless as the key is to infer the literal meaning of the figurative expression, since the annotated is rare.
{}
figurative-nlp/se4fig-roberta-base
null
[ "transformers", "pytorch", "roberta", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #roberta #endpoints_compatible #region-us
This model can measure semantic similarity between pairs of texts containing figurative language. As far as we know, this model works slightly better than sup-simCSE-roberta-base. For example : sentence 1: I have been in seventh heaven since Harry entered my life . sentence 2: I have been in very happy since Harry entered my life. the cosin score of simcse: 0.897 the cosin score of us: 0.897 ------------------------------------------------------------------- sentence 1: I have been in seventh heaven since Harry entered my life . sentence 2: I have been in pain since Harry entered my life . the cosin score of simcse: 0.846 the cosin score of us: 0.753 -------------------------------------------------- It's still a big challenge for us to measure semantic similarity of figurative language from the sentence embedding perspective. unsupvised models may useless as the key is to infer the literal meaning of the figurative expression, since the annotated is rare.
[]
[ "TAGS\n#transformers #pytorch #roberta #endpoints_compatible #region-us \n" ]
text2text-generation
transformers
This model can convert the literal expression to figurative/metaphorical expression. Below is the usage of our model: from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("figurative-nlp/t5-figurative-generation") model = AutoModelForSeq2SeqLM.from_pretrained("figurative-nlp/t5-figurative-generation") input_ids = tokenizer( "research is <m> very difficult </m> for me.", return_tensors="pt" ).input_ids # Batch size 1 outputs = model.generate(input_ids,beam_search = 5) result = tokenizer.decode(outputs[0], skip_special_tokens=True) #result : research is a tough nut to crack for me. For example (the &lt;m&gt; and &lt;/m&gt; is the mark that inform the model which literal expression we want to convert it as figurative expression): **Input**: as of a cloud that softly &lt;m&gt; covers &lt;/m&gt; the sun. **Output**: as of a cloud that softly drapes over the sun. **Input**: that car coming around the corner &lt;m&gt; surprised me. &lt;/m&gt; **Output**: that car coming around the corner knocked my socks off. Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model.
{}
figurative-nlp/t5-figurative-generation
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #t5 #text2text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
This model can convert the literal expression to figurative/metaphorical expression. Below is the usage of our model: from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("figurative-nlp/t5-figurative-generation") model = AutoModelForSeq2SeqLM.from_pretrained("figurative-nlp/t5-figurative-generation") input_ids = tokenizer( "research is <m> very difficult </m> for me.", return_tensors="pt" ).input_ids # Batch size 1 outputs = model.generate(input_ids,beam_search = 5) result = URL(outputs[0], skip_special_tokens=True) #result : research is a tough nut to crack for me. For example (the &lt;m&gt; and &lt;/m&gt; is the mark that inform the model which literal expression we want to convert it as figurative expression): Input: as of a cloud that softly &lt;m&gt; covers &lt;/m&gt; the sun. Output: as of a cloud that softly drapes over the sun. Input: that car coming around the corner &lt;m&gt; surprised me. &lt;/m&gt; Output: that car coming around the corner knocked my socks off. Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model.
[ "# Batch size 1\n outputs = model.generate(input_ids,beam_search = 5)\n result = URL(outputs[0], skip_special_tokens=True)\n #result : research is a tough nut to crack for me.\n\n\n\nFor example (the &lt;m&gt; and &lt;/m&gt; is the mark that inform the model which literal expression we want to convert it as figurative expression):\n\n Input: as of a cloud that softly &lt;m&gt; covers &lt;/m&gt; the sun.\n \n Output: as of a cloud that softly drapes over the sun. \n \n Input: that car coming around the corner &lt;m&gt; surprised me. &lt;/m&gt;\n \n Output: that car coming around the corner knocked my socks off.\n \n \n Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model." ]
[ "TAGS\n#transformers #pytorch #t5 #text2text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Batch size 1\n outputs = model.generate(input_ids,beam_search = 5)\n result = URL(outputs[0], skip_special_tokens=True)\n #result : research is a tough nut to crack for me.\n\n\n\nFor example (the &lt;m&gt; and &lt;/m&gt; is the mark that inform the model which literal expression we want to convert it as figurative expression):\n\n Input: as of a cloud that softly &lt;m&gt; covers &lt;/m&gt; the sun.\n \n Output: as of a cloud that softly drapes over the sun. \n \n Input: that car coming around the corner &lt;m&gt; surprised me. &lt;/m&gt;\n \n Output: that car coming around the corner knocked my socks off.\n \n \n Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model." ]
text2text-generation
transformers
This model can convert the figurative/metaphorical expression to the literal expression. Below is the usage of our model: from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("figurative-nlp/t5-figurative-paraphrase") model = AutoModelForSeq2SeqLM.from_pretrained("figurative-nlp/t5-figurative-paraphrase") input_ids = tokenizer( "paraphrase the sentence : i will talk this story to you from A to Z", return_tensors="pt" ).input_ids # Batch size 1 outputs = model.generate(input_ids,num_beams = 5) result = tokenizer.decode(outputs[0], skip_special_tokens=True) #result : i will talk this story to you from beginning to end.. For example: **Input**: He is always bang on when he makes a speech. **Output**: He is always presice when he makes a speech. **Input**: He always buy what he said. **Output**: He always agree with what he said. **Input**: Your team will be done like dinner if they play against the all-star team. **Output**: Your team will be defeated if they play against the all-star team. (the one is not particularly accurate) Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model.
{}
figurative-nlp/t5-figurative-paraphrase
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #t5 #text2text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
This model can convert the figurative/metaphorical expression to the literal expression. Below is the usage of our model: from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("figurative-nlp/t5-figurative-paraphrase") model = AutoModelForSeq2SeqLM.from_pretrained("figurative-nlp/t5-figurative-paraphrase") input_ids = tokenizer( "paraphrase the sentence : i will talk this story to you from A to Z", return_tensors="pt" ).input_ids # Batch size 1 outputs = model.generate(input_ids,num_beams = 5) result = URL(outputs[0], skip_special_tokens=True) #result : i will talk this story to you from beginning to end.. For example: Input: He is always bang on when he makes a speech. Output: He is always presice when he makes a speech. Input: He always buy what he said. Output: He always agree with what he said. Input: Your team will be done like dinner if they play against the all-star team. Output: Your team will be defeated if they play against the all-star team. (the one is not particularly accurate) Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model.
[ "# Batch size 1\n outputs = model.generate(input_ids,num_beams = 5)\n result = URL(outputs[0], skip_special_tokens=True)\n #result : i will talk this story to you from beginning to end..\n \n\n\n\nFor example:\n\n Input: He is always bang on when he makes a speech.\n \n Output: He is always presice when he makes a speech.\n \n Input: He always buy what he said.\n \n Output: He always agree with what he said. \n \n Input: Your team will be done like dinner if they play against the all-star team.\n \n Output: Your team will be defeated if they play against the all-star team. (the one is not particularly accurate)\n \n \n \n Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model." ]
[ "TAGS\n#transformers #pytorch #t5 #text2text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Batch size 1\n outputs = model.generate(input_ids,num_beams = 5)\n result = URL(outputs[0], skip_special_tokens=True)\n #result : i will talk this story to you from beginning to end..\n \n\n\n\nFor example:\n\n Input: He is always bang on when he makes a speech.\n \n Output: He is always presice when he makes a speech.\n \n Input: He always buy what he said.\n \n Output: He always agree with what he said. \n \n Input: Your team will be done like dinner if they play against the all-star team.\n \n Output: Your team will be defeated if they play against the all-star team. (the one is not particularly accurate)\n \n \n \n Note: the figurative language here includes metaphor, idiom and simile. We don't guarantee that the results generated results are satisfactory to you. We are trying to improve the effect of the model." ]
null
null
import requests API_URL = "https://api-inference.huggingface.co/models/huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad" headers = {"Authorization": "Bearer api_UXqrzQBiZKXaWxstVwEKcYvHQpGSGiQGbr"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.json() output = query({ "inputs": { "question": "What's my name?", "context": "My name is Clara and I live in Berkeley.", }, })
{}
fihtrotuld/123
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #region-us
import requests API_URL = "URL headers = {"Authorization": "Bearer api_UXqrzQBiZKXaWxstVwEKcYvHQpGSGiQGbr"} def query(payload): response = URL(API_URL, headers=headers, json=payload) return URL() output = query({ "inputs": { "question": "What's my name?", "context": "My name is Clara and I live in Berkeley.", }, })
[]
[ "TAGS\n#region-us \n" ]
text-generation
transformers
# GPT2 base style transfer paraphraser This is the trained base-model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-base-style-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us
# GPT2 base style transfer paraphraser This is the trained base-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 base style transfer paraphraser\n\nThis is the trained base-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us \n", "# GPT2 base style transfer paraphraser\n\nThis is the trained base-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
text-generation
transformers
# GPT2 Bible style transfer paraphraser This is the trained Bible model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-bible-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us
# GPT2 Bible style transfer paraphraser This is the trained Bible model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 Bible style transfer paraphraser\n\nThis is the trained Bible model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us \n", "# GPT2 Bible style transfer paraphraser\n\nThis is the trained Bible model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
text-generation
transformers
# GPT2 Romantic poetry style transfer paraphraser This is the trained Romantic poetry-model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-romantic-poetry-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us
# GPT2 Romantic poetry style transfer paraphraser This is the trained Romantic poetry-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 Romantic poetry style transfer paraphraser\n\nThis is the trained Romantic poetry-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us \n", "# GPT2 Romantic poetry style transfer paraphraser\n\nThis is the trained Romantic poetry-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
text-generation
transformers
# GPT2 Shakespeare style transfer paraphraser This is the trained Shakespeare-model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-shakespeare-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #has_space #region-us
# GPT2 Shakespeare style transfer paraphraser This is the trained Shakespeare-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 Shakespeare style transfer paraphraser\n\nThis is the trained Shakespeare-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# GPT2 Shakespeare style transfer paraphraser\n\nThis is the trained Shakespeare-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
text-generation
transformers
# GPT2 Switchboard style transfer paraphraser This is the trained Switchboard-model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-switchboard-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us
# GPT2 Switchboard style transfer paraphraser This is the trained Switchboard-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 Switchboard style transfer paraphraser\n\nThis is the trained Switchboard-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us \n", "# GPT2 Switchboard style transfer paraphraser\n\nThis is the trained Switchboard-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
text-generation
transformers
# GPT2 Tweet style transfer paraphraser This is the trained Tweet-model from the paper [Reformulating Unsupervised Style Transfer as Paraphrase Generation](https://arxiv.org/abs/2010.05700) by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. ## Citation If you found this model useful, please cite the original work: ``` @inproceedings{style20, author={Kalpesh Krishna and John Wieting and Mohit Iyyer}, Booktitle = {Empirical Methods in Natural Language Processing}, Year = "2020", Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation}, } ```
{}
filco306/gpt2-tweet-paraphraser
null
[ "transformers", "pytorch", "text-generation", "arxiv:2010.05700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2010.05700" ]
[]
TAGS #transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us
# GPT2 Tweet style transfer paraphraser This is the trained Tweet-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. If you found this model useful, please cite the original work:
[ "# GPT2 Tweet style transfer paraphraser\n\nThis is the trained Tweet-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
[ "TAGS\n#transformers #pytorch #text-generation #arxiv-2010.05700 #autotrain_compatible #endpoints_compatible #region-us \n", "# GPT2 Tweet style transfer paraphraser\n\nThis is the trained Tweet-model from the paper Reformulating Unsupervised Style Transfer as Paraphrase Generation by Krishna K. et al. Note that I (the uploader) am not the author of the paper. Permission to upload to Huggingface was given by the main author. \n\n\nIf you found this model useful, please cite the original work:" ]
image-classification
transformers
# beer_vs_wine Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### beer ![beer](images/beer.jpg) #### wine ![wine](images/wine.jpg)
{"tags": ["image-classification", "pytorch", "huggingpics"], "metrics": ["accuracy"]}
filipafcastro/beer_vs_wine
null
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us
# beer_vs_wine Autogenerated by HuggingPics️ Create your own image classifier for anything by running the demo on Google Colab. Report any issues with the demo at the github repo. ## Example Images #### beer !beer #### wine !wine
[ "# beer_vs_wine\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### beer\n\n!beer", "#### wine\n\n!wine" ]
[ "TAGS\n#transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "# beer_vs_wine\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### beer\n\n!beer", "#### wine\n\n!wine" ]
text-classification
transformers
# Emotion Analysis in English ## bertweet-base-emotion-analysis Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with EmoEvent corpus for Emotion detection in English. Base model is [BerTweet](https://huggingface.co/vinai/bertweet-base). ## License `pysentimiento` is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) 2. [SEMEval 2017 Dataset license]() ## Citation If you use `pysentimiento` in your work, please cite [this paper](https://arxiv.org/abs/2106.09462) ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` and also the dataset related paper ``` @inproceedings{del2020emoevent, title={EmoEvent: A multilingual emotion corpus based on different events}, author={del Arco, Flor Miriam Plaza and Strapparava, Carlo and Lopez, L Alfonso Urena and Mart{\'\i}n-Valdivia, M Teresa}, booktitle={Proceedings of the 12th Language Resources and Evaluation Conference}, pages={1492--1498}, year={2020} } ``` Enjoy! 🤗
{"language": ["en"], "tags": ["emotion-analysis"]}
finiteautomata/bertweet-base-emotion-analysis
null
[ "transformers", "pytorch", "safetensors", "roberta", "text-classification", "emotion-analysis", "en", "arxiv:2106.09462", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2106.09462" ]
[ "en" ]
TAGS #transformers #pytorch #safetensors #roberta #text-classification #emotion-analysis #en #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us
# Emotion Analysis in English ## bertweet-base-emotion-analysis Repository: URL Model trained with EmoEvent corpus for Emotion detection in English. Base model is BerTweet. ## License 'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. TASS Dataset license 2. [SEMEval 2017 Dataset license]() If you use 'pysentimiento' in your work, please cite this paper and also the dataset related paper Enjoy!
[ "# Emotion Analysis in English", "## bertweet-base-emotion-analysis\n\nRepository: URL\n\n\nModel trained with EmoEvent corpus for Emotion detection in English. Base model is BerTweet.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\n\nand also the dataset related paper\n\n\n\nEnjoy!" ]
[ "TAGS\n#transformers #pytorch #safetensors #roberta #text-classification #emotion-analysis #en #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Emotion Analysis in English", "## bertweet-base-emotion-analysis\n\nRepository: URL\n\n\nModel trained with EmoEvent corpus for Emotion detection in English. Base model is BerTweet.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\n\nand also the dataset related paper\n\n\n\nEnjoy!" ]
text-classification
transformers
# Sentiment Analysis in English ## bertweet-sentiment-analysis Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with SemEval 2017 corpus (around ~40k tweets). Base model is [BERTweet](https://github.com/VinAIResearch/BERTweet), a RoBERTa model trained on English tweets. Uses `POS`, `NEG`, `NEU` labels. ## License `pysentimiento` is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) 2. [SEMEval 2017 Dataset license]() ## Citation If you use `pysentimiento` in your work, please cite [this paper](https://arxiv.org/abs/2106.09462) ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` Enjoy! 🤗
{"language": ["en"], "tags": ["sentiment-analysis"]}
finiteautomata/bertweet-base-sentiment-analysis
null
[ "transformers", "pytorch", "tf", "roberta", "text-classification", "sentiment-analysis", "en", "arxiv:2106.09462", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2106.09462" ]
[ "en" ]
TAGS #transformers #pytorch #tf #roberta #text-classification #sentiment-analysis #en #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us
# Sentiment Analysis in English ## bertweet-sentiment-analysis Repository: URL Model trained with SemEval 2017 corpus (around ~40k tweets). Base model is BERTweet, a RoBERTa model trained on English tweets. Uses 'POS', 'NEG', 'NEU' labels. ## License 'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. TASS Dataset license 2. [SEMEval 2017 Dataset license]() If you use 'pysentimiento' in your work, please cite this paper Enjoy!
[ "# Sentiment Analysis in English", "## bertweet-sentiment-analysis\n\nRepository: URL\n\n\nModel trained with SemEval 2017 corpus (around ~40k tweets). Base model is BERTweet, a RoBERTa model trained on English tweets.\n\nUses 'POS', 'NEG', 'NEU' labels.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\nEnjoy!" ]
[ "TAGS\n#transformers #pytorch #tf #roberta #text-classification #sentiment-analysis #en #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Sentiment Analysis in English", "## bertweet-sentiment-analysis\n\nRepository: URL\n\n\nModel trained with SemEval 2017 corpus (around ~40k tweets). Base model is BERTweet, a RoBERTa model trained on English tweets.\n\nUses 'POS', 'NEG', 'NEU' labels.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\nEnjoy!" ]
text-classification
transformers
# Emotion Analysis in Spanish ## beto-emotion-analysis Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/) Model trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is [BETO](https://github.com/dccuchile/beto), a BERT model trained in Spanish. ## License `pysentimiento` is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) 2. [SEMEval 2017 Dataset license]() ## Citation If you use `pysentimiento` in your work, please cite [this paper](https://arxiv.org/abs/2106.09462) ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` and also the dataset related paper ``` @inproceedings{del2020emoevent, title={EmoEvent: A multilingual emotion corpus based on different events}, author={del Arco, Flor Miriam Plaza and Strapparava, Carlo and Lopez, L Alfonso Urena and Mart{\'\i}n-Valdivia, M Teresa}, booktitle={Proceedings of the 12th Language Resources and Evaluation Conference}, pages={1492--1498}, year={2020} } ``` Enjoy! 🤗
{"language": ["es"], "tags": ["emotion-analysis"]}
finiteautomata/beto-emotion-analysis
null
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "emotion-analysis", "es", "arxiv:2106.09462", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2106.09462" ]
[ "es" ]
TAGS #transformers #pytorch #safetensors #bert #text-classification #emotion-analysis #es #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us
# Emotion Analysis in Spanish ## beto-emotion-analysis Repository: URL Model trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is BETO, a BERT model trained in Spanish. ## License 'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. TASS Dataset license 2. [SEMEval 2017 Dataset license]() If you use 'pysentimiento' in your work, please cite this paper and also the dataset related paper Enjoy!
[ "# Emotion Analysis in Spanish", "## beto-emotion-analysis\n\nRepository: URL\n\n\nModel trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is BETO, a BERT model trained in Spanish.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\n\nand also the dataset related paper\n\n\n\nEnjoy!" ]
[ "TAGS\n#transformers #pytorch #safetensors #bert #text-classification #emotion-analysis #es #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Emotion Analysis in Spanish", "## beto-emotion-analysis\n\nRepository: URL\n\n\nModel trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is BETO, a BERT model trained in Spanish.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use 'pysentimiento' in your work, please cite this paper\n\n\n\nand also the dataset related paper\n\n\n\nEnjoy!" ]
text-classification
transformers
# Targeted Sentiment Analysis in News Headlines BERT classifier fine-tuned in a news headlines dataset annotated for target polarity. (details to be published) ## Examples Input is as follows `Headline [SEP] Target` where headline is the news title and target is an entity present in the headline. Try `Alberto Fernández: "El gobierno de Macri fue un desastre" [SEP] Macri` (should be NEG) and `Alberto Fernández: "El gobierno de Macri fue un desastre" [SEP] Alberto Fernández` (POS or NEU)
{}
finiteautomata/beto-headlines-sentiment-analysis
null
[ "transformers", "pytorch", "safetensors", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #safetensors #bert #text-classification #autotrain_compatible #endpoints_compatible #has_space #region-us
# Targeted Sentiment Analysis in News Headlines BERT classifier fine-tuned in a news headlines dataset annotated for target polarity. (details to be published) ## Examples Input is as follows 'Headline [SEP] Target' where headline is the news title and target is an entity present in the headline. Try 'Alberto Fernández: "El gobierno de Macri fue un desastre" [SEP] Macri' (should be NEG) and 'Alberto Fernández: "El gobierno de Macri fue un desastre" [SEP] Alberto Fernández' (POS or NEU)
[ "# Targeted Sentiment Analysis in News Headlines\n\nBERT classifier fine-tuned in a news headlines dataset annotated for target polarity.\n\n(details to be published)", "## Examples\n\nInput is as follows\n\n'Headline [SEP] Target'\n\nwhere headline is the news title and target is an entity present in the headline.\n\nTry\n\n'Alberto Fernández: \"El gobierno de Macri fue un desastre\" [SEP] Macri' (should be NEG)\n\nand\n\n'Alberto Fernández: \"El gobierno de Macri fue un desastre\" [SEP] Alberto Fernández' (POS or NEU)" ]
[ "TAGS\n#transformers #pytorch #safetensors #bert #text-classification #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Targeted Sentiment Analysis in News Headlines\n\nBERT classifier fine-tuned in a news headlines dataset annotated for target polarity.\n\n(details to be published)", "## Examples\n\nInput is as follows\n\n'Headline [SEP] Target'\n\nwhere headline is the news title and target is an entity present in the headline.\n\nTry\n\n'Alberto Fernández: \"El gobierno de Macri fue un desastre\" [SEP] Macri' (should be NEG)\n\nand\n\n'Alberto Fernández: \"El gobierno de Macri fue un desastre\" [SEP] Alberto Fernández' (POS or NEU)" ]
text-classification
transformers
# Sentiment Analysis in Spanish ## beto-sentiment-analysis **NOTE: this model will be removed soon -- use [pysentimiento/robertuito-sentiment-analysis](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) instead** Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/pysentimiento/pysentimiento/) Model trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is [BETO](https://github.com/dccuchile/beto), a BERT model trained in Spanish. Uses `POS`, `NEG`, `NEU` labels. ## License `pysentimiento` is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) 2. [SEMEval 2017 Dataset license]() ## Citation If you use this model in your work, please cite the following papers: ``` @misc{perez2021pysentimiento, title={pysentimiento: A Python Toolkit for Sentiment Analysis and SocialNLP tasks}, author={Juan Manuel Pérez and Juan Carlos Giudici and Franco Luque}, year={2021}, eprint={2106.09462}, archivePrefix={arXiv}, primaryClass={cs.CL} } @article{canete2020spanish, title={Spanish pre-trained bert model and evaluation data}, author={Ca{\~n}ete, Jos{\'e} and Chaperon, Gabriel and Fuentes, Rodrigo and Ho, Jou-Hui and Kang, Hojin and P{\'e}rez, Jorge}, journal={Pml4dc at iclr}, volume={2020}, number={2020}, pages={1--10}, year={2020} } ``` Enjoy! 🤗
{"language": ["es"], "tags": ["sentiment-analysis"]}
finiteautomata/beto-sentiment-analysis
null
[ "transformers", "pytorch", "jax", "bert", "text-classification", "sentiment-analysis", "es", "arxiv:2106.09462", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2106.09462" ]
[ "es" ]
TAGS #transformers #pytorch #jax #bert #text-classification #sentiment-analysis #es #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us
# Sentiment Analysis in Spanish ## beto-sentiment-analysis NOTE: this model will be removed soon -- use pysentimiento/robertuito-sentiment-analysis instead Repository: URL Model trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is BETO, a BERT model trained in Spanish. Uses 'POS', 'NEG', 'NEU' labels. ## License 'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. 1. TASS Dataset license 2. [SEMEval 2017 Dataset license]() If you use this model in your work, please cite the following papers: Enjoy!
[ "# Sentiment Analysis in Spanish", "## beto-sentiment-analysis\n\nNOTE: this model will be removed soon -- use pysentimiento/robertuito-sentiment-analysis instead\n\nRepository: URL\n\n\nModel trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is BETO, a BERT model trained in Spanish.\n\nUses 'POS', 'NEG', 'NEU' labels.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use this model in your work, please cite the following papers:\n\n\n\nEnjoy!" ]
[ "TAGS\n#transformers #pytorch #jax #bert #text-classification #sentiment-analysis #es #arxiv-2106.09462 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Sentiment Analysis in Spanish", "## beto-sentiment-analysis\n\nNOTE: this model will be removed soon -- use pysentimiento/robertuito-sentiment-analysis instead\n\nRepository: URL\n\n\nModel trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is BETO, a BERT model trained in Spanish.\n\nUses 'POS', 'NEG', 'NEU' labels.", "## License\n\n'pysentimiento' is an open-source library for non-commercial use and scientific research purposes only. Please be aware that models are trained with third-party datasets and are subject to their respective licenses. \n\n1. TASS Dataset license\n2. [SEMEval 2017 Dataset license]()\n\nIf you use this model in your work, please cite the following papers:\n\n\n\nEnjoy!" ]
image-classification
transformers
# llama_or_what Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### alpaca ![alpaca](images/alpaca.jpg) #### guanaco ![guanaco](images/guanaco.jpg) #### llama ![llama](images/llama.jpg) #### vicuna ![vicuna](images/vicuna.jpg)
{"tags": ["image-classification", "pytorch", "huggingpics"], "metrics": ["accuracy"]}
firebolt/llama_or_what
null
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us
# llama_or_what Autogenerated by HuggingPics️ Create your own image classifier for anything by running the demo on Google Colab. Report any issues with the demo at the github repo. ## Example Images #### alpaca !alpaca #### guanaco !guanaco #### llama !llama #### vicuna !vicuna
[ "# llama_or_what\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### alpaca\n\n!alpaca", "#### guanaco\n\n!guanaco", "#### llama\n\n!llama", "#### vicuna\n\n!vicuna" ]
[ "TAGS\n#transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "# llama_or_what\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### alpaca\n\n!alpaca", "#### guanaco\n\n!guanaco", "#### llama\n\n!llama", "#### vicuna\n\n!vicuna" ]
image-classification
transformers
# llama_or_what2 Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### alpaca ![alpaca](images/alpaca.jpg) #### guanaco ![guanaco](images/guanaco.jpg) #### llama ![llama](images/llama.jpg) #### vicuna ![vicuna](images/vicuna.jpg)
{"tags": ["image-classification", "pytorch", "huggingpics"], "metrics": ["accuracy"]}
firebolt/llama_or_what2
null
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "huggingpics", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us
# llama_or_what2 Autogenerated by HuggingPics️ Create your own image classifier for anything by running the demo on Google Colab. Report any issues with the demo at the github repo. ## Example Images #### alpaca !alpaca #### guanaco !guanaco #### llama !llama #### vicuna !vicuna
[ "# llama_or_what2\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### alpaca\n\n!alpaca", "#### guanaco\n\n!guanaco", "#### llama\n\n!llama", "#### vicuna\n\n!vicuna" ]
[ "TAGS\n#transformers #pytorch #tensorboard #vit #image-classification #huggingpics #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "# llama_or_what2\n\n\nAutogenerated by HuggingPics️\n\nCreate your own image classifier for anything by running the demo on Google Colab.\n\nReport any issues with the demo at the github repo.", "## Example Images", "#### alpaca\n\n!alpaca", "#### guanaco\n\n!guanaco", "#### llama\n\n!llama", "#### vicuna\n\n!vicuna" ]
text-classification
transformers
# Model Trained Using AutoNLP - Problem type: Binary Classification - Model ID: 310939 ## Validation Metrics - Loss: 0.027471264824271202 - Accuracy: 0.9931118314424635 - Precision: 0.946236559139785 - Recall: 0.88 - AUC: 0.9952871621621622 - F1: 0.911917098445596 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/fjarrett/autonlp-giveaway_detection_05-310939 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("fjarrett/autonlp-giveaway_detection_05-310939", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("fjarrett/autonlp-giveaway_detection_05-310939", use_auth_token=True) inputs = tokenizer("I love AutoNLP", return_tensors="pt") outputs = model(**inputs) ```
{"language": "en", "tags": ["autonlp"], "datasets": ["fjarrett/autonlp-data-giveaway_detection_05"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}]}
popsmash-admin/autonlp-giveaway_detection_05-310939
null
[ "transformers", "pytorch", "distilbert", "text-classification", "autonlp", "en", "dataset:fjarrett/autonlp-data-giveaway_detection_05", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #distilbert #text-classification #autonlp #en #dataset-fjarrett/autonlp-data-giveaway_detection_05 #autotrain_compatible #endpoints_compatible #region-us
# Model Trained Using AutoNLP - Problem type: Binary Classification - Model ID: 310939 ## Validation Metrics - Loss: 0.027471264824271202 - Accuracy: 0.9931118314424635 - Precision: 0.946236559139785 - Recall: 0.88 - AUC: 0.9952871621621622 - F1: 0.911917098445596 ## Usage You can use cURL to access this model: Or Python API:
[ "# Model Trained Using AutoNLP\n\n- Problem type: Binary Classification\n- Model ID: 310939", "## Validation Metrics\n\n- Loss: 0.027471264824271202\n- Accuracy: 0.9931118314424635\n- Precision: 0.946236559139785\n- Recall: 0.88\n- AUC: 0.9952871621621622\n- F1: 0.911917098445596", "## Usage\n\nYou can use cURL to access this model:\n\n\n\nOr Python API:" ]
[ "TAGS\n#transformers #pytorch #distilbert #text-classification #autonlp #en #dataset-fjarrett/autonlp-data-giveaway_detection_05 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Trained Using AutoNLP\n\n- Problem type: Binary Classification\n- Model ID: 310939", "## Validation Metrics\n\n- Loss: 0.027471264824271202\n- Accuracy: 0.9931118314424635\n- Precision: 0.946236559139785\n- Recall: 0.88\n- AUC: 0.9952871621621622\n- F1: 0.911917098445596", "## Usage\n\nYou can use cURL to access this model:\n\n\n\nOr Python API:" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned-amazon_reviews_multi This model is a fine-tuned version of [BSC-TeMU/roberta-base-bne](https://huggingface.co/BSC-TeMU/roberta-base-bne) on the amazon_reviews_multi dataset. It achieves the following results on the evaluation set: - Loss: 0.2157 - Accuracy: 0.9173 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1125 | 1.0 | 13 | 0.2066 | 0.9165 | | 0.0186 | 2.0 | 26 | 0.2157 | 0.9173 | ### Framework versions - Transformers 4.10.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["amazon_reviews_multi"], "metrics": ["accuracy"], "model-index": [{"name": "roberta-base-bne-finetuned-amazon_reviews_multi", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "amazon_reviews_multi", "type": "amazon_reviews_multi", "args": "es"}, "metrics": [{"type": "accuracy", "value": 0.91725, "name": "Accuracy"}]}]}]}
fjluque/roberta-base-bne-finetuned-amazon_reviews_multi
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "dataset:amazon_reviews_multi", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #roberta #text-classification #generated_from_trainer #dataset-amazon_reviews_multi #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us
roberta-base-bne-finetuned-amazon\_reviews\_multi ================================================= This model is a fine-tuned version of BSC-TeMU/roberta-base-bne on the amazon\_reviews\_multi dataset. It achieves the following results on the evaluation set: * Loss: 0.2157 * Accuracy: 0.9173 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.10.2 * Pytorch 1.9.0+cu102 * Datasets 1.12.1 * Tokenizers 0.10.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.10.2\n* Pytorch 1.9.0+cu102\n* Datasets 1.12.1\n* Tokenizers 0.10.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #roberta #text-classification #generated_from_trainer #dataset-amazon_reviews_multi #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.10.2\n* Pytorch 1.9.0+cu102\n* Datasets 1.12.1\n* Tokenizers 0.10.3" ]
automatic-speech-recognition
transformers
this is my model card
{}
fkHug/modelFromWav2vec
null
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #wav2vec2 #automatic-speech-recognition #endpoints_compatible #region-us
this is my model card
[]
[ "TAGS\n#transformers #pytorch #wav2vec2 #automatic-speech-recognition #endpoints_compatible #region-us \n" ]
token-classification
flair
## English Chunking in Flair (fast model) This is the fast phrase chunking model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **96,22** (CoNLL-2000) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | ADJP | adjectival | | ADVP | adverbial | | CONJP | conjunction | | INTJ | interjection | | LST | list marker | | NP | noun phrase | | PP | prepositional | | PRT | particle | | SBAR | subordinate clause | | VP | verb phrase | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/chunk-english-fast") # make example sentence sentence = Sentence("The happy man has been eating at the diner") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('np'): print(entity) ``` This yields the following output: ``` Span [1,2,3]: "The happy man" [− Labels: NP (0.9958)] Span [4,5,6]: "has been eating" [− Labels: VP (0.8759)] Span [7]: "at" [− Labels: PP (1.0)] Span [8,9]: "the diner" [− Labels: NP (0.9991)] ``` So, the spans "*The happy man*" and "*the diner*" are labeled as **noun phrases** (NP) and "*has been eating*" is labeled as a **verb phrase** (VP) in the sentence "*The happy man has been eating at the diner*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_2000 from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_2000() # 2. what tag do we want to predict? tag_type = 'np' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('news-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/chunk-english-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2000"], "widget": [{"text": "The happy man has been eating at the diner"}]}
flair/chunk-english-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2000", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2000 #region-us
English Chunking in Flair (fast model) -------------------------------------- This is the fast phrase chunking model for English that ships with Flair. F1-Score: 96,22 (CoNLL-2000) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the spans "*The happy man*" and "*the diner*" are labeled as noun phrases (NP) and "*has been eating*" is labeled as a verb phrase (VP) in the sentence "*The happy man has been eating at the diner*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the spans \"*The happy man*\" and \"*the diner*\" are labeled as noun phrases (NP) and \"*has been eating*\" is labeled as a verb phrase (VP) in the sentence \"*The happy man has been eating at the diner*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2000 #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the spans \"*The happy man*\" and \"*the diner*\" are labeled as noun phrases (NP) and \"*has been eating*\" is labeled as a verb phrase (VP) in the sentence \"*The happy man has been eating at the diner*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Chunking in Flair (default model) This is the standard phrase chunking model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **96,48** (CoNLL-2000) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | ADJP | adjectival | | ADVP | adverbial | | CONJP | conjunction | | INTJ | interjection | | LST | list marker | | NP | noun phrase | | PP | prepositional | | PRT | particle | | SBAR | subordinate clause | | VP | verb phrase | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/chunk-english") # make example sentence sentence = Sentence("The happy man has been eating at the diner") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('np'): print(entity) ``` This yields the following output: ``` Span [1,2,3]: "The happy man" [− Labels: NP (0.9958)] Span [4,5,6]: "has been eating" [− Labels: VP (0.8759)] Span [7]: "at" [− Labels: PP (1.0)] Span [8,9]: "the diner" [− Labels: NP (0.9991)] ``` So, the spans "*The happy man*" and "*the diner*" are labeled as **noun phrases** (NP) and "*has been eating*" is labeled as a **verb phrase** (VP) in the sentence "*The happy man has been eating at the diner*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_2000 from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_2000() # 2. what tag do we want to predict? tag_type = 'np' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/chunk-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2000"], "widget": [{"text": "The happy man has been eating at the diner"}]}
flair/chunk-english
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2000", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2000 #has_space #region-us
English Chunking in Flair (default model) ----------------------------------------- This is the standard phrase chunking model for English that ships with Flair. F1-Score: 96,48 (CoNLL-2000) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the spans "*The happy man*" and "*the diner*" are labeled as noun phrases (NP) and "*has been eating*" is labeled as a verb phrase (VP) in the sentence "*The happy man has been eating at the diner*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the spans \"*The happy man*\" and \"*the diner*\" are labeled as noun phrases (NP) and \"*has been eating*\" is labeled as a verb phrase (VP) in the sentence \"*The happy man has been eating at the diner*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2000 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the spans \"*The happy man*\" and \"*the diner*\" are labeled as noun phrases (NP) and \"*has been eating*\" is labeled as a verb phrase (VP) in the sentence \"*The happy man has been eating at the diner*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Verb Disambiguation in Flair (fast model) This is the fast verb disambiguation model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **88,27** (Ontonotes) - predicts [Proposition Bank verb frames](http://verbs.colorado.edu/propbank/framesets-english-aliases/). Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/frame-english-fast") # make example sentence sentence = Sentence("George returned to Berlin to return his hat.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following frame tags are found:') # iterate over entities and print for entity in sentence.get_spans('frame'): print(entity) ``` This yields the following output: ``` Span [2]: "returned" [− Labels: return.01 (0.9867)] Span [6]: "return" [− Labels: return.02 (0.4741)] ``` So, the word "*returned*" is labeled as **return.01** (as in *go back somewhere*) while "*return*" is labeled as **return.02** (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus = ColumnCorpus( "resources/tasks/srl", column_format={1: "text", 11: "frame"} ) # 2. what tag do we want to predict? tag_type = 'frame' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ BytePairEmbeddings("en"), FlairEmbeddings("news-forward-fast"), FlairEmbeddings("news-backward-fast"), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/frame-english-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2019flair, title={FLAIR: An easy-to-use framework for state-of-the-art NLP}, author={Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland}, booktitle={{NAACL} 2019, 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)}, pages={54--59}, year={2019} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "George returned to Berlin to return his hat."}]}
flair/frame-english-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us
## English Verb Disambiguation in Flair (fast model) This is the fast verb disambiguation model for English that ships with Flair. F1-Score: 88,27 (Ontonotes) - predicts Proposition Bank verb frames. Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*returned*" is labeled as return.01 (as in *go back somewhere*) while "*return*" is labeled as return.02 (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "## English Verb Disambiguation in Flair (fast model)\n\nThis is the fast verb disambiguation model for English that ships with Flair.\n\nF1-Score: 88,27 (Ontonotes) - predicts Proposition Bank verb frames.\n\nBased on Flair embeddings and LSTM-CRF.\n\n---", "### Demo: How to use in Flair\n\nRequires: Flair ('pip install flair')\n\n\n\nThis yields the following output:\n\n\nSo, the word \"*returned*\" is labeled as return.01 (as in *go back somewhere*) while \"*return*\" is labeled as return.02 (as in *give back something*) in the sentence \"*George returned to Berlin to return his hat*\". \n\n\n---", "### Training: Script to train this model\n\nThe following Flair script was used to train this model: \n\n\n\n\n\n---", "### Cite\n\nPlease cite the following paper when using this model.\n\n\n\n---", "### Issues?\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us \n", "## English Verb Disambiguation in Flair (fast model)\n\nThis is the fast verb disambiguation model for English that ships with Flair.\n\nF1-Score: 88,27 (Ontonotes) - predicts Proposition Bank verb frames.\n\nBased on Flair embeddings and LSTM-CRF.\n\n---", "### Demo: How to use in Flair\n\nRequires: Flair ('pip install flair')\n\n\n\nThis yields the following output:\n\n\nSo, the word \"*returned*\" is labeled as return.01 (as in *go back somewhere*) while \"*return*\" is labeled as return.02 (as in *give back something*) in the sentence \"*George returned to Berlin to return his hat*\". \n\n\n---", "### Training: Script to train this model\n\nThe following Flair script was used to train this model: \n\n\n\n\n\n---", "### Cite\n\nPlease cite the following paper when using this model.\n\n\n\n---", "### Issues?\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Verb Disambiguation in Flair (default model) This is the standard verb disambiguation model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **89,34** (Ontonotes) - predicts [Proposition Bank verb frames](http://verbs.colorado.edu/propbank/framesets-english-aliases/). Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/frame-english") # make example sentence sentence = Sentence("George returned to Berlin to return his hat.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following frame tags are found:') # iterate over entities and print for entity in sentence.get_spans('frame'): print(entity) ``` This yields the following output: ``` Span [2]: "returned" [− Labels: return.01 (0.9951)] Span [6]: "return" [− Labels: return.02 (0.6361)] ``` So, the word "*returned*" is labeled as **return.01** (as in *go back somewhere*) while "*return*" is labeled as **return.02** (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus = ColumnCorpus( "resources/tasks/srl", column_format={1: "text", 11: "frame"} ) # 2. what tag do we want to predict? tag_type = 'frame' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ BytePairEmbeddings("en"), FlairEmbeddings("news-forward"), FlairEmbeddings("news-backward"), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/frame-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2019flair, title={FLAIR: An easy-to-use framework for state-of-the-art NLP}, author={Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland}, booktitle={{NAACL} 2019, 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)}, pages={54--59}, year={2019} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "George returned to Berlin to return his hat."}]}
flair/frame-english
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us
## English Verb Disambiguation in Flair (default model) This is the standard verb disambiguation model for English that ships with Flair. F1-Score: 89,34 (Ontonotes) - predicts Proposition Bank verb frames. Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*returned*" is labeled as return.01 (as in *go back somewhere*) while "*return*" is labeled as return.02 (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "## English Verb Disambiguation in Flair (default model)\n\nThis is the standard verb disambiguation model for English that ships with Flair.\n\nF1-Score: 89,34 (Ontonotes) - predicts Proposition Bank verb frames.\n\nBased on Flair embeddings and LSTM-CRF.\n\n---", "### Demo: How to use in Flair\n\nRequires: Flair ('pip install flair')\n\n\n\nThis yields the following output:\n\n\nSo, the word \"*returned*\" is labeled as return.01 (as in *go back somewhere*) while \"*return*\" is labeled as return.02 (as in *give back something*) in the sentence \"*George returned to Berlin to return his hat*\". \n\n\n---", "### Training: Script to train this model\n\nThe following Flair script was used to train this model: \n\n\n\n\n\n---", "### Cite\n\nPlease cite the following paper when using this model.\n\n\n\n---", "### Issues?\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us \n", "## English Verb Disambiguation in Flair (default model)\n\nThis is the standard verb disambiguation model for English that ships with Flair.\n\nF1-Score: 89,34 (Ontonotes) - predicts Proposition Bank verb frames.\n\nBased on Flair embeddings and LSTM-CRF.\n\n---", "### Demo: How to use in Flair\n\nRequires: Flair ('pip install flair')\n\n\n\nThis yields the following output:\n\n\nSo, the word \"*returned*\" is labeled as return.01 (as in *go back somewhere*) while \"*return*\" is labeled as return.02 (as in *give back something*) in the sentence \"*George returned to Berlin to return his hat*\". \n\n\n---", "### Training: Script to train this model\n\nThe following Flair script was used to train this model: \n\n\n\n\n\n---", "### Cite\n\nPlease cite the following paper when using this model.\n\n\n\n---", "### Issues?\n\nThe Flair issue tracker is available here." ]
token-classification
flair
# Danish NER in Flair (default model) This is the standard 4-class NER model for Danish that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **81.78** (DaNER) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on Transformer embeddings and LSTM-CRF. --- # Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-danish") # make example sentence sentence = Sentence("Jens Peter Hansen kommer fra Danmark") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2,3]: "Jens Peter Hansen" [− Labels: PER (0.9961)] Span [6]: "Danmark" [− Labels: LOC (0.9816)] ``` So, the entities "*Jens Peter Hansen*" (labeled as a **person**) and "*Danmark*" (labeled as a **location**) are found in the sentence "*Jens Peter Hansen kommer fra Danmark*". --- ### Training: Script to train this model The model was trained by the [DaNLP project](https://github.com/alexandrainst/danlp) using the [DaNE corpus](https://github.com/alexandrainst/danlp/blob/master/docs/docs/datasets.md#danish-dependency-treebank-dane-dane). Check their repo for more information. The following Flair script may be used to train such a model: ```python from flair.data import Corpus from flair.datasets import DANE from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = DANE() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('da'), # contextual string embeddings, forward FlairEmbeddings('da-forward'), # contextual string embeddings, backward FlairEmbeddings('da-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-danish', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following papers when using this model. ``` @inproceedings{akbik-etal-2019-flair, title = "{FLAIR}: An Easy-to-Use Framework for State-of-the-Art {NLP}", author = "Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland", booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics (Demonstrations)", year = "2019", url = "https://www.aclweb.org/anthology/N19-4010", pages = "54--59", } ``` And check the [DaNLP project](https://github.com/alexandrainst/danlp) for more information. --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "da", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["DaNE"], "widget": [{"text": "Jens Peter Hansen kommer fra Danmark"}]}
flair/ner-danish
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "da", "dataset:DaNE", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "da" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #da #dataset-DaNE #region-us
Danish NER in Flair (default model) =================================== This is the standard 4-class NER model for Danish that ships with Flair. F1-Score: 81.78 (DaNER) Predicts 4 tags: Based on Transformer embeddings and LSTM-CRF. --- Demo: How to use in Flair ========================= Requires: Flair ('pip install flair') This yields the following output: So, the entities "*Jens Peter Hansen*" (labeled as a person) and "*Danmark*" (labeled as a location) are found in the sentence "*Jens Peter Hansen kommer fra Danmark*". --- ### Training: Script to train this model The model was trained by the DaNLP project using the DaNE corpus. Check their repo for more information. The following Flair script may be used to train such a model: --- ### Cite Please cite the following papers when using this model. And check the DaNLP project for more information. --- ### Issues? The Flair issue tracker is available here.
[ "### Training: Script to train this model\n\n\nThe model was trained by the DaNLP project using the DaNE corpus. Check their repo for more information.\n\n\nThe following Flair script may be used to train such a model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model.\n\n\nAnd check the DaNLP project for more information.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #da #dataset-DaNE #region-us \n", "### Training: Script to train this model\n\n\nThe model was trained by the DaNLP project using the DaNE corpus. Check their repo for more information.\n\n\nThe following Flair script may be used to train such a model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model.\n\n\nAnd check the DaNLP project for more information.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## Dutch NER in Flair (large model) This is the large 4-class NER model for Dutch that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **95,25** (CoNLL-03 Dutch) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/). --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-dutch-large") # make example sentence sentence = Sentence("George Washington ging naar Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (1.0)] Span [5]: "Washington" [− Labels: LOC (1.0)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging naar Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python import torch # 1. get the corpus from flair.datasets import CONLL_03_DUTCH corpus = CONLL_03_DUTCH() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize fine-tuneable transformer embeddings WITH document context from flair.embeddings import TransformerWordEmbeddings embeddings = TransformerWordEmbeddings( model='xlm-roberta-large', layers="-1", subtoken_pooling="first", fine_tune=True, use_context=True, ) # 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection) from flair.models import SequenceTagger tagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type='ner', use_crf=False, use_rnn=False, reproject_embeddings=False, ) # 6. initialize trainer with AdamW optimizer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW) # 7. run training with XLM parameters (20 epochs, small LR) from torch.optim.lr_scheduler import OneCycleLR trainer.train('resources/taggers/ner-dutch-large', learning_rate=5.0e-6, mini_batch_size=4, mini_batch_chunk_size=1, max_epochs=20, scheduler=OneCycleLR, embeddings_storage_mode='none', weight_decay=0., ) ) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{schweter2020flert, title={FLERT: Document-Level Features for Named Entity Recognition}, author={Stefan Schweter and Alan Akbik}, year={2020}, eprint={2011.06993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "nl", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging naar Washington"}]}
flair/ner-dutch-large
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "nl", "dataset:conll2003", "arxiv:2011.06993", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2011.06993" ]
[ "nl" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #nl #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us
Dutch NER in Flair (large model) -------------------------------- This is the large 4-class NER model for Dutch that ships with Flair. F1-Score: 95,25 (CoNLL-03 Dutch) Predicts 4 tags: Based on document-level XLM-R embeddings and FLERT. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging naar Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging naar Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #nl #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging naar Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
# Dutch NER in Flair (default model) This is the standard 4-class NER model for Dutch that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **92,58** (CoNLL-03) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on Transformer embeddings and LSTM-CRF. --- # Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-dutch") # make example sentence sentence = Sentence("George Washington ging naar Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.997)] Span [5]: "Washington" [− Labels: LOC (0.9996)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging naar Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03_DUTCH from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_03_DUTCH() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize embeddings embeddings = TransformerWordEmbeddings('wietsedv/bert-base-dutch-cased') # 5. initialize sequence tagger tagger: SequenceTagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer trainer: ModelTrainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-dutch', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik-etal-2019-flair, title = "{FLAIR}: An Easy-to-Use Framework for State-of-the-Art {NLP}", author = "Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland", booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics (Demonstrations)", year = "2019", url = "https://www.aclweb.org/anthology/N19-4010", pages = "54--59", } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "nl", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging naar Washington."}]}
flair/ner-dutch
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "nl", "dataset:conll2003", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "nl" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #nl #dataset-conll2003 #region-us
Dutch NER in Flair (default model) ================================== This is the standard 4-class NER model for Dutch that ships with Flair. F1-Score: 92,58 (CoNLL-03) Predicts 4 tags: Based on Transformer embeddings and LSTM-CRF. --- Demo: How to use in Flair ========================= Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging naar Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #nl #dataset-conll2003 #region-us \n", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (fast model) This is the fast 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **92,92** (corrected CoNLL-03) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english-fast") # make example sentence sentence = Sentence("George Washington went to Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9515)] Span [5]: "Washington" [− Labels: LOC (0.992)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03 from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_03() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # contextual string embeddings, forward FlairEmbeddings('news-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('news-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington went to Washington"}]}
flair/ner-english-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2003", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #has_space #region-us
English NER in Flair (fast model) --------------------------------- This is the fast 4-class NER model for English that ships with Flair. F1-Score: 92,92 (corrected CoNLL-03) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (large model) This is the large 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **94,36** (corrected CoNLL-03) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/). --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english-large") # make example sentence sentence = Sentence("George Washington went to Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (1.0)] Span [5]: "Washington" [− Labels: LOC (1.0)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python import torch # 1. get the corpus from flair.datasets import CONLL_03 corpus = CONLL_03() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize fine-tuneable transformer embeddings WITH document context from flair.embeddings import TransformerWordEmbeddings embeddings = TransformerWordEmbeddings( model='xlm-roberta-large', layers="-1", subtoken_pooling="first", fine_tune=True, use_context=True, ) # 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection) from flair.models import SequenceTagger tagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type='ner', use_crf=False, use_rnn=False, reproject_embeddings=False, ) # 6. initialize trainer with AdamW optimizer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW) # 7. run training with XLM parameters (20 epochs, small LR) from torch.optim.lr_scheduler import OneCycleLR trainer.train('resources/taggers/ner-english-large', learning_rate=5.0e-6, mini_batch_size=4, mini_batch_chunk_size=1, max_epochs=20, scheduler=OneCycleLR, embeddings_storage_mode='none', weight_decay=0., ) ) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{schweter2020flert, title={FLERT: Document-Level Features for Named Entity Recognition}, author={Stefan Schweter and Alan Akbik}, year={2020}, eprint={2011.06993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington went to Washington"}]}
flair/ner-english-large
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2003", "arxiv:2011.06993", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2011.06993" ]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us
English NER in Flair (large model) ---------------------------------- This is the large 4-class NER model for English that ships with Flair. F1-Score: 94,36 (corrected CoNLL-03) Predicts 4 tags: Based on document-level XLM-R embeddings and FLERT. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (Ontonotes fast model) This is the fast version of the 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **89.3** (Ontonotes) Predicts 18 tags: | **tag** | **meaning** | |---------------------------------|-----------| | CARDINAL | cardinal value | | DATE | date value | | EVENT | event name | | FAC | building name | | GPE | geo-political entity | | LANGUAGE | language name | | LAW | law name | | LOC | location name | | MONEY | money name | | NORP | affiliation | | ORDINAL | ordinal value | | ORG | organization name | | PERCENT | percent value | | PERSON | person name | | PRODUCT | product name | | QUANTITY | quantity value | | TIME | time value | | WORK_OF_ART | name of work of art | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english-ontonotes-fast") # make example sentence sentence = Sentence("On September 1st George Washington won 1 dollar.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [2,3]: "September 1st" [− Labels: DATE (0.9655)] Span [4,5]: "George Washington" [− Labels: PERSON (0.8243)] Span [7,8]: "1 dollar" [− Labels: MONEY (0.8022)] ``` So, the entities "*September 1st*" (labeled as a **date**), "*George Washington*" (labeled as a **person**) and "*1 dollar*" (labeled as a **money**) are found in the sentence "*On September 1st George Washington won 1 dollar*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('en-crawl'), # contextual string embeddings, forward FlairEmbeddings('news-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('news-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-english-ontonotes-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "On September 1st George Washington won 1 dollar."}]}
flair/ner-english-ontonotes-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us
English NER in Flair (Ontonotes fast model) ------------------------------------------- This is the fast version of the 18-class NER model for English that ships with Flair. F1-Score: 89.3 (Ontonotes) Predicts 18 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*September 1st*" (labeled as a date), "*George Washington*" (labeled as a person) and "*1 dollar*" (labeled as a money) are found in the sentence "*On September 1st George Washington won 1 dollar*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George Washington*\" (labeled as a person) and \"*1 dollar*\" (labeled as a money) are found in the sentence \"*On September 1st George Washington won 1 dollar*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George Washington*\" (labeled as a person) and \"*1 dollar*\" (labeled as a money) are found in the sentence \"*On September 1st George Washington won 1 dollar*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (Ontonotes large model) This is the large 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **90.93** (Ontonotes) Predicts 18 tags: | **tag** | **meaning** | |---------------------------------|-----------| | CARDINAL | cardinal value | | DATE | date value | | EVENT | event name | | FAC | building name | | GPE | geo-political entity | | LANGUAGE | language name | | LAW | law name | | LOC | location name | | MONEY | money name | | NORP | affiliation | | ORDINAL | ordinal value | | ORG | organization name | | PERCENT | percent value | | PERSON | person name | | PRODUCT | product name | | QUANTITY | quantity value | | TIME | time value | | WORK_OF_ART | name of work of art | Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/). --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english-ontonotes-large") # make example sentence sentence = Sentence("On September 1st George won 1 dollar while watching Game of Thrones.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [2,3]: "September 1st" [− Labels: DATE (1.0)] Span [4]: "George" [− Labels: PERSON (1.0)] Span [6,7]: "1 dollar" [− Labels: MONEY (1.0)] Span [10,11,12]: "Game of Thrones" [− Labels: WORK_OF_ART (1.0)] ``` So, the entities "*September 1st*" (labeled as a **date**), "*George*" (labeled as a **person**), "*1 dollar*" (labeled as a **money**) and "Game of Thrones" (labeled as a **work of art**) are found in the sentence "*On September 1st George Washington won 1 dollar while watching Game of Thrones*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize fine-tuneable transformer embeddings WITH document context from flair.embeddings import TransformerWordEmbeddings embeddings = TransformerWordEmbeddings( model='xlm-roberta-large', layers="-1", subtoken_pooling="first", fine_tune=True, use_context=True, ) # 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection) from flair.models import SequenceTagger tagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type='ner', use_crf=False, use_rnn=False, reproject_embeddings=False, ) # 6. initialize trainer with AdamW optimizer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW) # 7. run training with XLM parameters (20 epochs, small LR) from torch.optim.lr_scheduler import OneCycleLR trainer.train('resources/taggers/ner-english-ontonotes-large', learning_rate=5.0e-6, mini_batch_size=4, mini_batch_chunk_size=1, max_epochs=20, scheduler=OneCycleLR, embeddings_storage_mode='none', weight_decay=0., ) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{schweter2020flert, title={FLERT: Document-Level Features for Named Entity Recognition}, author={Stefan Schweter and Alan Akbik}, year={2020}, eprint={2011.06993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "On September 1st George won 1 dollar while watching Game of Thrones."}]}
flair/ner-english-ontonotes-large
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "arxiv:2011.06993", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2011.06993" ]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #arxiv-2011.06993 #has_space #region-us
English NER in Flair (Ontonotes large model) -------------------------------------------- This is the large 18-class NER model for English that ships with Flair. F1-Score: 90.93 (Ontonotes) Predicts 18 tags: Based on document-level XLM-R embeddings and FLERT. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*September 1st*" (labeled as a date), "*George*" (labeled as a person), "*1 dollar*" (labeled as a money) and "Game of Thrones" (labeled as a work of art) are found in the sentence "*On September 1st George Washington won 1 dollar while watching Game of Thrones*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George*\" (labeled as a person), \"*1 dollar*\" (labeled as a money) and \"Game of Thrones\" (labeled as a work of art) are found in the sentence \"*On September 1st George Washington won 1 dollar while watching Game of Thrones*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #arxiv-2011.06993 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George*\" (labeled as a person), \"*1 dollar*\" (labeled as a money) and \"Game of Thrones\" (labeled as a work of art) are found in the sentence \"*On September 1st George Washington won 1 dollar while watching Game of Thrones*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (Ontonotes default model) This is the 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **89.27** (Ontonotes) Predicts 18 tags: | **tag** | **meaning** | |---------------------------------|-----------| | CARDINAL | cardinal value | | DATE | date value | | EVENT | event name | | FAC | building name | | GPE | geo-political entity | | LANGUAGE | language name | | LAW | law name | | LOC | location name | | MONEY | money name | | NORP | affiliation | | ORDINAL | ordinal value | | ORG | organization name | | PERCENT | percent value | | PERSON | person name | | PRODUCT | product name | | QUANTITY | quantity value | | TIME | time value | | WORK_OF_ART | name of work of art | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english-ontonotes") # make example sentence sentence = Sentence("On September 1st George Washington won 1 dollar.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [2,3]: "September 1st" [− Labels: DATE (0.8824)] Span [4,5]: "George Washington" [− Labels: PERSON (0.9604)] Span [7,8]: "1 dollar" [− Labels: MONEY (0.9837)] ``` So, the entities "*September 1st*" (labeled as a **date**), "*George Washington*" (labeled as a **person**) and "*1 dollar*" (labeled as a **money**) are found in the sentence "*On September 1st George Washington won 1 dollar*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('en-crawl'), # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-english-ontonotes', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "On September 1st George Washington won 1 dollar."}]}
flair/ner-english-ontonotes
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us
English NER in Flair (Ontonotes default model) ---------------------------------------------- This is the 18-class NER model for English that ships with Flair. F1-Score: 89.27 (Ontonotes) Predicts 18 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*September 1st*" (labeled as a date), "*George Washington*" (labeled as a person) and "*1 dollar*" (labeled as a money) are found in the sentence "*On September 1st George Washington won 1 dollar*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George Washington*\" (labeled as a person) and \"*1 dollar*\" (labeled as a money) are found in the sentence \"*On September 1st George Washington won 1 dollar*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*September 1st*\" (labeled as a date), \"*George Washington*\" (labeled as a person) and \"*1 dollar*\" (labeled as a money) are found in the sentence \"*On September 1st George Washington won 1 dollar*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English NER in Flair (default model) This is the standard 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **93,06** (corrected CoNLL-03) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-english") # make example sentence sentence = Sentence("George Washington went to Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9968)] Span [5]: "Washington" [− Labels: LOC (0.9994)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03 from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_03() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington went to Washington"}]}
flair/ner-english
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:conll2003", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #has_space #region-us
English NER in Flair (default model) ------------------------------------ This is the standard 4-class NER model for English that ships with Flair. F1-Score: 93,06 (corrected CoNLL-03) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington went to Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-conll2003 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington went to Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## French NER in Flair (default model) This is the standard 4-class NER model for French that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **90,61** (WikiNER) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-french") # make example sentence sentence = Sentence("George Washington est allé à Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.7394)] Span [6]: "Washington" [− Labels: LOC (0.9161)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington est allé à Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import WIKINER_FRENCH from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = WIKINER_FRENCH() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('fr'), # contextual string embeddings, forward FlairEmbeddings('fr-forward'), # contextual string embeddings, backward FlairEmbeddings('fr-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-french', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "fr", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington est all\u00e9 \u00e0 Washington"}]}
flair/ner-french
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "fr", "dataset:conll2003", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "fr" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #fr #dataset-conll2003 #has_space #region-us
French NER in Flair (default model) ----------------------------------- This is the standard 4-class NER model for French that ships with Flair. F1-Score: 90,61 (WikiNER) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington est allé à Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington est allé à Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #fr #dataset-conll2003 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington est allé à Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## German NER in Flair (large model) This is the large 4-class NER model for German that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **92,31** (CoNLL-03 German revised) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf). --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-german-large") # make example sentence sentence = Sentence("George Washington ging nach Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (1.0)] Span [5]: "Washington" [− Labels: LOC (1.0)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python import torch # 1. get the corpus from flair.datasets import CONLL_03_GERMAN corpus = CONLL_03_GERMAN() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize fine-tuneable transformer embeddings WITH document context from flair.embeddings import TransformerWordEmbeddings embeddings = TransformerWordEmbeddings( model='xlm-roberta-large', layers="-1", subtoken_pooling="first", fine_tune=True, use_context=True, ) # 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection) from flair.models import SequenceTagger tagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type='ner', use_crf=False, use_rnn=False, reproject_embeddings=False, ) # 6. initialize trainer with AdamW optimizer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW) # 7. run training with XLM parameters (20 epochs, small LR) from torch.optim.lr_scheduler import OneCycleLR trainer.train('resources/taggers/ner-german-large', learning_rate=5.0e-6, mini_batch_size=4, mini_batch_chunk_size=1, max_epochs=20, scheduler=OneCycleLR, embeddings_storage_mode='none', weight_decay=0., ) ) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{schweter2020flert, title={FLERT: Document-Level Features for Named Entity Recognition}, author={Stefan Schweter and Alan Akbik}, year={2020}, eprint={2011.06993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "de", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging nach Washington"}]}
flair/ner-german-large
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "de", "dataset:conll2003", "arxiv:2011.06993", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2011.06993" ]
[ "de" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #de #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us
German NER in Flair (large model) --------------------------------- This is the large 4-class NER model for German that ships with Flair. F1-Score: 92,31 (CoNLL-03 German revised) Predicts 4 tags: Based on document-level XLM-R embeddings and FLERT. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #de #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## NER for German Legal Text in Flair (default model) This is the legal NER model for German that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **96,35** (LER German dataset) Predicts 19 tags: | **tag** | **meaning** | |---------------------------------|-----------| | AN | Anwalt | | EUN | Europäische Norm | | GS | Gesetz | | GRT | Gericht | | INN | Institution | | LD | Land | | LDS | Landschaft | | LIT | Literatur | | MRK | Marke | | ORG | Organisation | | PER | Person | | RR | Richter | | RS | Rechtssprechung | | ST | Stadt | | STR | Straße | | UN | Unternehmen | | VO | Verordnung | | VS | Vorschrift | | VT | Vertrag | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. More details on the Legal NER dataset [here](https://github.com/elenanereiss/Legal-Entity-Recognition) --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-german-legal") # make example sentence (don't use tokenizer since Rechtstexte are badly handled) sentence = Sentence("Herr W. verstieß gegen § 36 Abs. 7 IfSG.", use_tokenizer=False) # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [2]: "W." [− Labels: PER (0.9911)] Span [5,6,7,8,9]: "§ 36 Abs. 7 IfSG." [− Labels: GS (0.5353)] ``` So, the entities "*W.*" (labeled as a **person**) and "*§ 36 Abs. 7 IfSG*" (labeled as a **Gesetz**) are found in the sentence "*Herr W. verstieß gegen § 36 Abs. 7 IfSG.*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import LER_GERMAN from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = LER_GERMAN() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('de'), # contextual string embeddings, forward FlairEmbeddings('de-forward'), # contextual string embeddings, backward FlairEmbeddings('de-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-german-legal', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following papers when using this model. ``` @inproceedings{leitner2019fine, author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider}, title = {{Fine-grained Named Entity Recognition in Legal Documents}}, booktitle = {Semantic Systems. The Power of AI and Knowledge Graphs. Proceedings of the 15th International Conference (SEMANTiCS 2019)}, year = 2019, pages = {272--287}, pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}} ``` ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "de", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["legal"], "widget": [{"text": "Herr W. verstie\u00df gegen \u00a7 36 Abs. 7 IfSG."}]}
flair/ner-german-legal
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "de", "dataset:legal", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "de" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #de #dataset-legal #region-us
NER for German Legal Text in Flair (default model) -------------------------------------------------- This is the legal NER model for German that ships with Flair. F1-Score: 96,35 (LER German dataset) Predicts 19 tags: Based on Flair embeddings and LSTM-CRF. More details on the Legal NER dataset here --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*W.*" (labeled as a person) and "*§ 36 Abs. 7 IfSG*" (labeled as a Gesetz) are found in the sentence "*Herr W. verstieß gegen § 36 Abs. 7 IfSG.*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following papers when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*W.*\" (labeled as a person) and \"*§ 36 Abs. 7 IfSG*\" (labeled as a Gesetz) are found in the sentence \"*Herr W. verstieß gegen § 36 Abs. 7 IfSG.*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #de #dataset-legal #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*W.*\" (labeled as a person) and \"*§ 36 Abs. 7 IfSG*\" (labeled as a Gesetz) are found in the sentence \"*Herr W. verstieß gegen § 36 Abs. 7 IfSG.*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## German NER in Flair (default model) This is the standard 4-class NER model for German that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **87,94** (CoNLL-03 German revised) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-german") # make example sentence sentence = Sentence("George Washington ging nach Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9977)] Span [5]: "Washington" [− Labels: LOC (0.9895)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03_GERMAN from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the corpus corpus: Corpus = CONLL_03_GERMAN() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('de'), # contextual string embeddings, forward FlairEmbeddings('de-forward'), # contextual string embeddings, backward FlairEmbeddings('de-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-german', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "de", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging nach Washington"}]}
flair/ner-german
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "de", "dataset:conll2003", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "de" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #de #dataset-conll2003 #has_space #region-us
German NER in Flair (default model) ----------------------------------- This is the standard 4-class NER model for German that ships with Flair. F1-Score: 87,94 (CoNLL-03 German revised) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #de #dataset-conll2003 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## 4-Language NER in Flair (English, German, Dutch and Spanish) This is the fast 4-class NER model for 4 CoNLL-03 languages that ships with [Flair](https://github.com/flairNLP/flair/). Also kind of works for related languages like French. F1-Score: **91,51** (CoNLL-03 English), **85,72** (CoNLL-03 German revised), **86,22** (CoNLL-03 Dutch), **85,78** (CoNLL-03 Spanish) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-multi-fast") # make example sentence in any of the four languages sentence = Sentence("George Washington ging nach Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9977)] Span [5]: "Washington" [− Labels: LOC (0.9895)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03, CONLL_03_GERMAN, CONLL_03_DUTCH, CONLL_03_SPANISH from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the multi-language corpus corpus: Corpus = MultiCorpus([ CONLL_03(), # English corpus CONLL_03_GERMAN(), # German corpus CONLL_03_DUTCH(), # Dutch corpus CONLL_03_SPANISH(), # Spanish corpus ]) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # FastText embeddings WordEmbeddings('de'), # contextual string embeddings, forward FlairEmbeddings('multi-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('multi-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-multi-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following papers when using this model. ``` @misc{akbik2019multilingual, title={Multilingual sequence labeling with one model}, author={Akbik, Alan and Bergmann, Tanja and Vollgraf, Roland} booktitle = {{NLDL} 2019, Northern Lights Deep Learning Workshop}, year = {2019} } ``` ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ```
{"language": ["en", "de", "nl", "es"], "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging nach Washington"}]}
flair/ner-multi-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "de", "nl", "es", "dataset:conll2003", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en", "de", "nl", "es" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #de #nl #es #dataset-conll2003 #has_space #region-us
4-Language NER in Flair (English, German, Dutch and Spanish) ------------------------------------------------------------ This is the fast 4-class NER model for 4 CoNLL-03 languages that ships with Flair. Also kind of works for related languages like French. F1-Score: 91,51 (CoNLL-03 English), 85,72 (CoNLL-03 German revised), 86,22 (CoNLL-03 Dutch), 85,78 (CoNLL-03 Spanish) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following papers when using this model.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #de #nl #es #dataset-conll2003 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following papers when using this model." ]
token-classification
flair
## 4-Language NER in Flair (English, German, Dutch and Spanish) This is the standard 4-class NER model for 4 CoNLL-03 languages that ships with [Flair](https://github.com/flairNLP/flair/). Also kind of works for related languages like French. F1-Score: **92,16** (CoNLL-03 English), **87,33** (CoNLL-03 German revised), **88,96** (CoNLL-03 Dutch), **86,65** (CoNLL-03 Spanish) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-multi") # make example sentence in any of the four languages sentence = Sentence("George Washington ging nach Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9977)] Span [5]: "Washington" [− Labels: LOC (0.9895)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03, CONLL_03_GERMAN, CONLL_03_DUTCH, CONLL_03_SPANISH from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the multi-language corpus corpus: Corpus = MultiCorpus([ CONLL_03(), # English corpus CONLL_03_GERMAN(), # German corpus CONLL_03_DUTCH(), # Dutch corpus CONLL_03_SPANISH(), # Spanish corpus ]) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # FastText embeddings WordEmbeddings('de'), # contextual string embeddings, forward FlairEmbeddings('multi-forward'), # contextual string embeddings, backward FlairEmbeddings('multi-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-multi', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{akbik2019multilingual, title={Multilingual sequence labeling with one model}, author={Akbik, Alan and Bergmann, Tanja and Vollgraf, Roland} booktitle = {{NLDL} 2019, Northern Lights Deep Learning Workshop}, year = {2019} } ``` ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ```
{"language": ["en", "de", "nl", "es", "multilingual"], "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington ging nach Washington"}]}
flair/ner-multi
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "de", "nl", "es", "multilingual", "dataset:conll2003", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en", "de", "nl", "es", "multilingual" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #de #nl #es #multilingual #dataset-conll2003 #region-us
4-Language NER in Flair (English, German, Dutch and Spanish) ------------------------------------------------------------ This is the standard 4-class NER model for 4 CoNLL-03 languages that ships with Flair. Also kind of works for related languages like French. F1-Score: 92,16 (CoNLL-03 English), 87,33 (CoNLL-03 German revised), 88,96 (CoNLL-03 Dutch), 86,65 (CoNLL-03 Spanish) Predicts 4 tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #de #nl #es #multilingual #dataset-conll2003 #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington ging nach Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model." ]
token-classification
flair
## Spanish NER in Flair (large model) This is the large 4-class NER model for Spanish that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **90,54** (CoNLL-03 Spanish) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/). --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-spanish-large") # make example sentence sentence = Sentence("George Washington fue a Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (1.0)] Span [5]: "Washington" [− Labels: LOC (1.0)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington fue a Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python import torch # 1. get the corpus from flair.datasets import CONLL_03_SPANISH corpus = CONLL_03_SPANISH() # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize fine-tuneable transformer embeddings WITH document context from flair.embeddings import TransformerWordEmbeddings embeddings = TransformerWordEmbeddings( model='xlm-roberta-large', layers="-1", subtoken_pooling="first", fine_tune=True, use_context=True, ) # 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection) from flair.models import SequenceTagger tagger = SequenceTagger( hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type='ner', use_crf=False, use_rnn=False, reproject_embeddings=False, ) # 6. initialize trainer with AdamW optimizer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW) # 7. run training with XLM parameters (20 epochs, small LR) from torch.optim.lr_scheduler import OneCycleLR trainer.train('resources/taggers/ner-spanish-large', learning_rate=5.0e-6, mini_batch_size=4, mini_batch_chunk_size=1, max_epochs=20, scheduler=OneCycleLR, embeddings_storage_mode='none', weight_decay=0., ) ) ``` --- ### Cite Please cite the following paper when using this model. ``` @misc{schweter2020flert, title={FLERT: Document-Level Features for Named Entity Recognition}, author={Stefan Schweter and Alan Akbik}, year={2020}, eprint={2011.06993}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "es", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["conll2003"], "widget": [{"text": "George Washington fue a Washington"}]}
flair/ner-spanish-large
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "es", "dataset:conll2003", "arxiv:2011.06993", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "2011.06993" ]
[ "es" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #es #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us
Spanish NER in Flair (large model) ---------------------------------- This is the large 4-class NER model for Spanish that ships with Flair. F1-Score: 90,54 (CoNLL-03 Spanish) Predicts 4 tags: Based on document-level XLM-R embeddings and FLERT. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the entities "*George Washington*" (labeled as a person) and "*Washington*" (labeled as a location) are found in the sentence "*George Washington fue a Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington fue a Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #es #dataset-conll2003 #arxiv-2011.06993 #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the entities \"*George Washington*\" (labeled as a person) and \"*Washington*\" (labeled as a location) are found in the sentence \"*George Washington fue a Washington*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Part-of-Speech Tagging in Flair (fast model) This is the fast part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,10** (Ontonotes) Predicts fine-grained POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADD | Email | |AFX | Affix | |CC | Coordinating conjunction | |CD | Cardinal number | |DT | Determiner | |EX | Existential there | |FW | Foreign word | |HYPH | Hyphen | |IN | Preposition or subordinating conjunction | |JJ | Adjective | |JJR |Adjective, comparative | |JJS | Adjective, superlative | |LS | List item marker | |MD | Modal | |NFP | Superfluous punctuation | |NN | Noun, singular or mass | |NNP |Proper noun, singular | |NNPS | Proper noun, plural | |NNS |Noun, plural | |PDT | Predeterminer | |POS | Possessive ending | |PRP | Personal pronoun | |PRP$ | Possessive pronoun | |RB | Adverb | |RBR | Adverb, comparative | |RBS | Adverb, superlative | |RP | Particle | |SYM | Symbol | |TO | to | |UH | Interjection | |VB | Verb, base form | |VBD | Verb, past tense | |VBG | Verb, gerund or present participle | |VBN | Verb, past participle | |VBP | Verb, non-3rd person singular present | |VBZ | Verb, 3rd person singular present | |WDT | Wh-determiner | |WP | Wh-pronoun | |WP$ | Possessive wh-pronoun | |WRB | Wh-adverb | |XX | Unknown | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/pos-english-fast") # make example sentence sentence = Sentence("I love Berlin.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "I" [− Labels: PRP (1.0)] Span [2]: "love" [− Labels: VBP (0.9998)] Span [3]: "Berlin" [− Labels: NNP (0.9999)] Span [4]: "." [− Labels: . (0.9998)] ``` So, the word "*I*" is labeled as a **pronoun** (PRP), "*love*" is labeled as a **verb** (VBP) and "*Berlin*" is labeled as a **proper noun** (NNP) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'pos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/pos-english-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "I love Berlin."}]}
flair/pos-english-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us
English Part-of-Speech Tagging in Flair (fast model) ---------------------------------------------------- This is the fast part-of-speech tagging model for English that ships with Flair. F1-Score: 98,10 (Ontonotes) Predicts fine-grained POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*I*" is labeled as a pronoun (PRP), "*love*" is labeled as a verb (VBP) and "*Berlin*" is labeled as a proper noun (NNP) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRP), \"*love*\" is labeled as a verb (VBP) and \"*Berlin*\" is labeled as a proper noun (NNP) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRP), \"*love*\" is labeled as a verb (VBP) and \"*Berlin*\" is labeled as a proper noun (NNP) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Part-of-Speech Tagging in Flair (default model) This is the standard part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,19** (Ontonotes) Predicts fine-grained POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADD | Email | |AFX | Affix | |CC | Coordinating conjunction | |CD | Cardinal number | |DT | Determiner | |EX | Existential there | |FW | Foreign word | |HYPH | Hyphen | |IN | Preposition or subordinating conjunction | |JJ | Adjective | |JJR |Adjective, comparative | |JJS | Adjective, superlative | |LS | List item marker | |MD | Modal | |NFP | Superfluous punctuation | |NN | Noun, singular or mass | |NNP |Proper noun, singular | |NNPS | Proper noun, plural | |NNS |Noun, plural | |PDT | Predeterminer | |POS | Possessive ending | |PRP | Personal pronoun | |PRP$ | Possessive pronoun | |RB | Adverb | |RBR | Adverb, comparative | |RBS | Adverb, superlative | |RP | Particle | |SYM | Symbol | |TO | to | |UH | Interjection | |VB | Verb, base form | |VBD | Verb, past tense | |VBG | Verb, gerund or present participle | |VBN | Verb, past participle | |VBP | Verb, non-3rd person singular present | |VBZ | Verb, 3rd person singular present | |WDT | Wh-determiner | |WP | Wh-pronoun | |WP$ | Possessive wh-pronoun | |WRB | Wh-adverb | |XX | Unknown | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/pos-english") # make example sentence sentence = Sentence("I love Berlin.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "I" [− Labels: PRP (1.0)] Span [2]: "love" [− Labels: VBP (1.0)] Span [3]: "Berlin" [− Labels: NNP (0.9999)] Span [4]: "." [− Labels: . (1.0)] ``` So, the word "*I*" is labeled as a **pronoun** (PRP), "*love*" is labeled as a **verb** (VBP) and "*Berlin*" is labeled as a **proper noun** (NNP) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'pos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/pos-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "inference": false}
flair/pos-english
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us
English Part-of-Speech Tagging in Flair (default model) ------------------------------------------------------- This is the standard part-of-speech tagging model for English that ships with Flair. F1-Score: 98,19 (Ontonotes) Predicts fine-grained POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*I*" is labeled as a pronoun (PRP), "*love*" is labeled as a verb (VBP) and "*Berlin*" is labeled as a proper noun (NNP) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRP), \"*love*\" is labeled as a verb (VBP) and \"*Berlin*\" is labeled as a proper noun (NNP) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #has_space #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRP), \"*love*\" is labeled as a verb (VBP) and \"*Berlin*\" is labeled as a proper noun (NNP) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Universal Part-of-Speech Tagging in Flair (fast model) This is the fast universal part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,47** (Ontonotes) Predicts universal POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADJ | adjective | | ADP | adposition | | ADV | adverb | | AUX | auxiliary | | CCONJ | coordinating conjunction | | DET | determiner | | INTJ | interjection | | NOUN | noun | | NUM | numeral | | PART | particle | | PRON | pronoun | | PROPN | proper noun | | PUNCT | punctuation | | SCONJ | subordinating conjunction | | SYM | symbol | | VERB | verb | | X | other | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/upos-english-fast") # make example sentence sentence = Sentence("I love Berlin.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "I" [− Labels: PRON (0.9996)] Span [2]: "love" [− Labels: VERB (1.0)] Span [3]: "Berlin" [− Labels: PROPN (0.9986)] Span [4]: "." [− Labels: PUNCT (1.0)] ``` So, the word "*I*" is labeled as a **pronoun** (PRON), "*love*" is labeled as a **verb** (VERB) and "*Berlin*" is labeled as a **proper noun** (PROPN) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'upos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('news-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/upos-english-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "I love Berlin."}]}
flair/upos-english-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us
English Universal Part-of-Speech Tagging in Flair (fast model) -------------------------------------------------------------- This is the fast universal part-of-speech tagging model for English that ships with Flair. F1-Score: 98,47 (Ontonotes) Predicts universal POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*I*" is labeled as a pronoun (PRON), "*love*" is labeled as a verb (VERB) and "*Berlin*" is labeled as a proper noun (PROPN) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRON), \"*love*\" is labeled as a verb (VERB) and \"*Berlin*\" is labeled as a proper noun (PROPN) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRON), \"*love*\" is labeled as a verb (VERB) and \"*Berlin*\" is labeled as a proper noun (PROPN) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## English Universal Part-of-Speech Tagging in Flair (default model) This is the standard universal part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,6** (Ontonotes) Predicts universal POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADJ | adjective | | ADP | adposition | | ADV | adverb | | AUX | auxiliary | | CCONJ | coordinating conjunction | | DET | determiner | | INTJ | interjection | | NOUN | noun | | NUM | numeral | | PART | particle | | PRON | pronoun | | PROPN | proper noun | | PUNCT | punctuation | | SCONJ | subordinating conjunction | | SYM | symbol | | VERB | verb | | X | other | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/upos-english") # make example sentence sentence = Sentence("I love Berlin.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "I" [− Labels: PRON (0.9996)] Span [2]: "love" [− Labels: VERB (1.0)] Span [3]: "Berlin" [− Labels: PROPN (0.9986)] Span [4]: "." [− Labels: PUNCT (1.0)] ``` So, the word "*I*" is labeled as a **pronoun** (PRON), "*love*" is labeled as a **verb** (VERB) and "*Berlin*" is labeled as a **proper noun** (PROPN) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import ColumnCorpus from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself) corpus: Corpus = ColumnCorpus( "resources/tasks/onto-ner", column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"}, tag_to_bioes="ner", ) # 2. what tag do we want to predict? tag_type = 'upos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('news-forward'), # contextual string embeddings, backward FlairEmbeddings('news-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/upos-english', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": "en", "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "I love Berlin."}]}
flair/upos-english
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us
English Universal Part-of-Speech Tagging in Flair (default model) ----------------------------------------------------------------- This is the standard universal part-of-speech tagging model for English that ships with Flair. F1-Score: 98,6 (Ontonotes) Predicts universal POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the word "*I*" is labeled as a pronoun (PRON), "*love*" is labeled as a verb (VERB) and "*Berlin*" is labeled as a proper noun (PROPN) in the sentence "*I love Berlin*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRON), \"*love*\" is labeled as a verb (VERB) and \"*Berlin*\" is labeled as a proper noun (PROPN) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #dataset-ontonotes #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the word \"*I*\" is labeled as a pronoun (PRON), \"*love*\" is labeled as a verb (VERB) and \"*Berlin*\" is labeled as a proper noun (PROPN) in the sentence \"*I love Berlin*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## Multilingual Universal Part-of-Speech Tagging in Flair (fast model) This is the fast multilingual universal part-of-speech tagging model that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **92,88** (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech) Predicts universal POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADJ | adjective | | ADP | adposition | | ADV | adverb | | AUX | auxiliary | | CCONJ | coordinating conjunction | | DET | determiner | | INTJ | interjection | | NOUN | noun | | NUM | numeral | | PART | particle | | PRON | pronoun | | PROPN | proper noun | | PUNCT | punctuation | | SCONJ | subordinating conjunction | | SYM | symbol | | VERB | verb | | X | other | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/upos-multi-fast") # make example sentence sentence = Sentence("Ich liebe Berlin, as they say. ") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('pos'): print(entity) ``` This yields the following output: ``` Span [1]: "Ich" [− Labels: PRON (0.9999)] Span [2]: "liebe" [− Labels: VERB (0.9999)] Span [3]: "Berlin" [− Labels: PROPN (0.9997)] Span [4]: "," [− Labels: PUNCT (1.0)] Span [5]: "as" [− Labels: SCONJ (0.9991)] Span [6]: "they" [− Labels: PRON (0.9998)] Span [7]: "say" [− Labels: VERB (0.9998)] Span [8]: "." [− Labels: PUNCT (1.0)] ``` So, the words "*Ich*" and "*they*" are labeled as **pronouns** (PRON), while "*liebe*" and "*say*" are labeled as **verbs** (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import MultiCorpus from flair.datasets import UD_ENGLISH, UD_GERMAN, UD_FRENCH, UD_ITALIAN, UD_POLISH, UD_DUTCH, UD_CZECH, \ UD_DANISH, UD_SPANISH, UD_SWEDISH, UD_NORWEGIAN, UD_FINNISH from flair.embeddings import StackedEmbeddings, FlairEmbeddings # 1. make a multi corpus consisting of 12 UD treebanks (in_memory=False here because this corpus becomes large) corpus = MultiCorpus([ UD_ENGLISH(in_memory=False), UD_GERMAN(in_memory=False), UD_DUTCH(in_memory=False), UD_FRENCH(in_memory=False), UD_ITALIAN(in_memory=False), UD_SPANISH(in_memory=False), UD_POLISH(in_memory=False), UD_CZECH(in_memory=False), UD_DANISH(in_memory=False), UD_SWEDISH(in_memory=False), UD_NORWEGIAN(in_memory=False), UD_FINNISH(in_memory=False), ]) # 2. what tag do we want to predict? tag_type = 'upos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('multi-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('multi-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type, use_crf=False) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/upos-multi-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": ["en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", false, "fi", "cs"], "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "Ich liebe Berlin, as they say."}]}
flair/upos-multi-fast
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", "no", "fi", "cs", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", "no", "fi", "cs" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #de #fr #it #nl #pl #es #sv #da #no #fi #cs #dataset-ontonotes #region-us
Multilingual Universal Part-of-Speech Tagging in Flair (fast model) ------------------------------------------------------------------- This is the fast multilingual universal part-of-speech tagging model that ships with Flair. F1-Score: 92,88 (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech) Predicts universal POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the words "*Ich*" and "*they*" are labeled as pronouns (PRON), while "*liebe*" and "*say*" are labeled as verbs (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the words \"*Ich*\" and \"*they*\" are labeled as pronouns (PRON), while \"*liebe*\" and \"*say*\" are labeled as verbs (VERB) in the multilingual sentence \"*Ich liebe Berlin, as they say*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #de #fr #it #nl #pl #es #sv #da #no #fi #cs #dataset-ontonotes #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the words \"*Ich*\" and \"*they*\" are labeled as pronouns (PRON), while \"*liebe*\" and \"*say*\" are labeled as verbs (VERB) in the multilingual sentence \"*Ich liebe Berlin, as they say*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## Multilingual Universal Part-of-Speech Tagging in Flair (default model) This is the default multilingual universal part-of-speech tagging model that ships with [Flair](https://github.com/flairNLP/flair/). F1-Score: **98,47** (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech) Predicts universal POS tags: | **tag** | **meaning** | |---------------------------------|-----------| |ADJ | adjective | | ADP | adposition | | ADV | adverb | | AUX | auxiliary | | CCONJ | coordinating conjunction | | DET | determiner | | INTJ | interjection | | NOUN | noun | | NUM | numeral | | PART | particle | | PRON | pronoun | | PROPN | proper noun | | PUNCT | punctuation | | SCONJ | subordinating conjunction | | SYM | symbol | | VERB | verb | | X | other | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/upos-multi") # make example sentence sentence = Sentence("Ich liebe Berlin, as they say. ") # predict POS tags tagger.predict(sentence) # print sentence print(sentence) # iterate over tokens and print the predicted POS label print("The following POS tags are found:") for token in sentence: print(token.get_label("upos")) ``` This yields the following output: ``` Token[0]: "Ich" → PRON (0.9999) Token[1]: "liebe" → VERB (0.9999) Token[2]: "Berlin" → PROPN (0.9997) Token[3]: "," → PUNCT (1.0) Token[4]: "as" → SCONJ (0.9991) Token[5]: "they" → PRON (0.9998) Token[6]: "say" → VERB (0.9998) Token[7]: "." → PUNCT (1.0) ``` So, the words "*Ich*" and "*they*" are labeled as **pronouns** (PRON), while "*liebe*" and "*say*" are labeled as **verbs** (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import MultiCorpus from flair.datasets import UD_ENGLISH, UD_GERMAN, UD_FRENCH, UD_ITALIAN, UD_POLISH, UD_DUTCH, UD_CZECH, \ UD_DANISH, UD_SPANISH, UD_SWEDISH, UD_NORWEGIAN, UD_FINNISH from flair.embeddings import StackedEmbeddings, FlairEmbeddings # 1. make a multi corpus consisting of 12 UD treebanks (in_memory=False here because this corpus becomes large) corpus = MultiCorpus([ UD_ENGLISH(in_memory=False), UD_GERMAN(in_memory=False), UD_DUTCH(in_memory=False), UD_FRENCH(in_memory=False), UD_ITALIAN(in_memory=False), UD_SPANISH(in_memory=False), UD_POLISH(in_memory=False), UD_CZECH(in_memory=False), UD_DANISH(in_memory=False), UD_SWEDISH(in_memory=False), UD_NORWEGIAN(in_memory=False), UD_FINNISH(in_memory=False), ]) # 2. what tag do we want to predict? tag_type = 'upos' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # contextual string embeddings, forward FlairEmbeddings('multi-forward'), # contextual string embeddings, backward FlairEmbeddings('multi-backward'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type, use_crf=False) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/upos-multi', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following paper when using this model. ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ``` --- ### Issues? The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
{"language": ["en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", false, "fi", "cs"], "tags": ["flair", "token-classification", "sequence-tagger-model"], "datasets": ["ontonotes"], "widget": [{"text": "Ich liebe Berlin, as they say"}]}
flair/upos-multi
null
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", "no", "fi", "cs", "dataset:ontonotes", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "en", "de", "fr", "it", "nl", "pl", "es", "sv", "da", "no", "fi", "cs" ]
TAGS #flair #pytorch #token-classification #sequence-tagger-model #en #de #fr #it #nl #pl #es #sv #da #no #fi #cs #dataset-ontonotes #region-us
Multilingual Universal Part-of-Speech Tagging in Flair (default model) ---------------------------------------------------------------------- This is the default multilingual universal part-of-speech tagging model that ships with Flair. F1-Score: 98,47 (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech) Predicts universal POS tags: Based on Flair embeddings and LSTM-CRF. --- ### Demo: How to use in Flair Requires: Flair ('pip install flair') This yields the following output: So, the words "*Ich*" and "*they*" are labeled as pronouns (PRON), while "*liebe*" and "*say*" are labeled as verbs (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*". --- ### Training: Script to train this model The following Flair script was used to train this model: --- ### Cite Please cite the following paper when using this model. --- ### Issues? The Flair issue tracker is available here.
[ "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the words \"*Ich*\" and \"*they*\" are labeled as pronouns (PRON), while \"*liebe*\" and \"*say*\" are labeled as verbs (VERB) in the multilingual sentence \"*Ich liebe Berlin, as they say*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
[ "TAGS\n#flair #pytorch #token-classification #sequence-tagger-model #en #de #fr #it #nl #pl #es #sv #da #no #fi #cs #dataset-ontonotes #region-us \n", "### Demo: How to use in Flair\n\n\nRequires: Flair ('pip install flair')\n\n\nThis yields the following output:\n\n\nSo, the words \"*Ich*\" and \"*they*\" are labeled as pronouns (PRON), while \"*liebe*\" and \"*say*\" are labeled as verbs (VERB) in the multilingual sentence \"*Ich liebe Berlin, as they say*\".\n\n\n\n\n---", "### Training: Script to train this model\n\n\nThe following Flair script was used to train this model:\n\n\n\n\n---", "### Cite\n\n\nPlease cite the following paper when using this model.\n\n\n\n\n---", "### Issues?\n\n\nThe Flair issue tracker is available here." ]
token-classification
flair
## Test model README Some test README description
{"tags": ["flair", "token-classification"], "widget": [{"text": "does this work"}]}
flairbook/flairmodel
null
[ "flair", "pytorch", "token-classification", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #flair #pytorch #token-classification #region-us
## Test model README Some test README description
[ "## Test model README\nSome test README description" ]
[ "TAGS\n#flair #pytorch #token-classification #region-us \n", "## Test model README\nSome test README description" ]
token-classification
flair
## Test model README Some test README description
{"tags": ["flair", "token-classification"], "widget": [{"text": "does this work"}]}
flairbook2/flairmodel
null
[ "flair", "pytorch", "token-classification", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #flair #pytorch #token-classification #region-us
## Test model README Some test README description
[ "## Test model README\nSome test README description" ]
[ "TAGS\n#flair #pytorch #token-classification #region-us \n", "## Test model README\nSome test README description" ]
text-generation
transformers
# Marty DialoGPT Model
{"tags": ["conversational"]}
flakje/DialoGPT-small-Marty
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Marty DialoGPT Model
[ "# Marty DialoGPT Model" ]
[ "TAGS\n#transformers #pytorch #gpt2 #text-generation #conversational #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Marty DialoGPT Model" ]
fill-mask
transformers
# FlauBERT: Unsupervised Language Model Pre-training for French **FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer. Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert). ## FlauBERT models | Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters | | :------: | :---: | :---: | :---: | :---: | | `flaubert-small-cased` | 6 | 8 | 512 | 54 M | | `flaubert-base-uncased` | 12 | 12 | 768 | 137 M | | `flaubert-base-cased` | 12 | 12 | 768 | 138 M | | `flaubert-large-cased` | 24 | 16 | 1024 | 373 M | **Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. ## Using FlauBERT with Hugging Face's Transformers ```python import torch from transformers import FlaubertModel, FlaubertTokenizer # Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased', # 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased'] modelname = 'flaubert/flaubert_base_cased' # Load pretrained model and tokenizer flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True) flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False) # do_lowercase=False if using cased models, True if using uncased ones sentence = "Le chat mange une pomme." token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)]) last_layer = flaubert(token_ids)[0] print(last_layer.shape) # torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension) # The BERT [CLS] token correspond to the first hidden state of the last layer cls_embedding = last_layer[:, 0, :] ``` **Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one of the following values: ``` ['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased'] ``` ## References If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: [LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf) ``` @InProceedings{le2020flaubert, author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier}, title = {FlauBERT: Unsupervised Language Model Pre-training for French}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, pages = {2479--2490}, url = {https://www.aclweb.org/anthology/2020.lrec-1.302} } ``` [TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/) ``` @inproceedings{le2020flaubert, title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais}, author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier}, booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles}, pages = {268--278}, year = {2020}, organization = {ATALA} } ```
{"language": "fr", "license": "mit", "tags": ["bert", "language-model", "flaubert", "flue", "french", "bert-base", "flaubert-base", "cased"], "datasets": ["flaubert"], "metrics": ["flue"]}
flaubert/flaubert_base_cased
null
[ "transformers", "pytorch", "flaubert", "fill-mask", "bert", "language-model", "flue", "french", "bert-base", "flaubert-base", "cased", "fr", "dataset:flaubert", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "fr" ]
TAGS #transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #bert-base #flaubert-base #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us
FlauBERT: Unsupervised Language Model Pre-training for French ============================================================= FlauBERT is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. Along with FlauBERT comes FLUE: an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the official website. FlauBERT models --------------- Note: 'flaubert-small-cased' is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. Using FlauBERT with Hugging Face's Transformers ----------------------------------------------- Notes: if your 'transformers' version is <=2.10.0, 'modelname' should take one of the following values: References ---------- If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: LREC paper TALN paper
[]
[ "TAGS\n#transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #bert-base #flaubert-base #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us \n" ]
fill-mask
transformers
# FlauBERT: Unsupervised Language Model Pre-training for French **FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer. Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert). ## FlauBERT models | Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters | | :------: | :---: | :---: | :---: | :---: | | `flaubert-small-cased` | 6 | 8 | 512 | 54 M | | `flaubert-base-uncased` | 12 | 12 | 768 | 137 M | | `flaubert-base-cased` | 12 | 12 | 768 | 138 M | | `flaubert-large-cased` | 24 | 16 | 1024 | 373 M | **Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. ## Using FlauBERT with Hugging Face's Transformers ```python import torch from transformers import FlaubertModel, FlaubertTokenizer # Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased', # 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased'] modelname = 'flaubert/flaubert_base_cased' # Load pretrained model and tokenizer flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True) flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False) # do_lowercase=False if using cased models, True if using uncased ones sentence = "Le chat mange une pomme." token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)]) last_layer = flaubert(token_ids)[0] print(last_layer.shape) # torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension) # The BERT [CLS] token correspond to the first hidden state of the last layer cls_embedding = last_layer[:, 0, :] ``` **Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one of the following values: ``` ['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased'] ``` ## References If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: [LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf) ``` @InProceedings{le2020flaubert, author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier}, title = {FlauBERT: Unsupervised Language Model Pre-training for French}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, pages = {2479--2490}, url = {https://www.aclweb.org/anthology/2020.lrec-1.302} } ``` [TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/) ``` @inproceedings{le2020flaubert, title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais}, author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier}, booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles}, pages = {268--278}, year = {2020}, organization = {ATALA} } ```
{"language": "fr", "license": "mit", "tags": ["bert", "language-model", "flaubert", "flue", "french", "flaubert-base", "uncased"], "datasets": ["flaubert"], "metrics": ["flue"]}
flaubert/flaubert_base_uncased
null
[ "transformers", "pytorch", "flaubert", "fill-mask", "bert", "language-model", "flue", "french", "flaubert-base", "uncased", "fr", "dataset:flaubert", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "fr" ]
TAGS #transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #flaubert-base #uncased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us
FlauBERT: Unsupervised Language Model Pre-training for French ============================================================= FlauBERT is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. Along with FlauBERT comes FLUE: an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the official website. FlauBERT models --------------- Note: 'flaubert-small-cased' is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. Using FlauBERT with Hugging Face's Transformers ----------------------------------------------- Notes: if your 'transformers' version is <=2.10.0, 'modelname' should take one of the following values: References ---------- If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: LREC paper TALN paper
[]
[ "TAGS\n#transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #flaubert-base #uncased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us \n" ]
fill-mask
transformers
# FlauBERT: Unsupervised Language Model Pre-training for French **FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer. Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert). ## FlauBERT models | Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters | | :------: | :---: | :---: | :---: | :---: | | `flaubert-small-cased` | 6 | 8 | 512 | 54 M | | `flaubert-base-uncased` | 12 | 12 | 768 | 137 M | | `flaubert-base-cased` | 12 | 12 | 768 | 138 M | | `flaubert-large-cased` | 24 | 16 | 1024 | 373 M | **Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. ## Using FlauBERT with Hugging Face's Transformers ```python import torch from transformers import FlaubertModel, FlaubertTokenizer # Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased', # 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased'] modelname = 'flaubert/flaubert_base_cased' # Load pretrained model and tokenizer flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True) flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False) # do_lowercase=False if using cased models, True if using uncased ones sentence = "Le chat mange une pomme." token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)]) last_layer = flaubert(token_ids)[0] print(last_layer.shape) # torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension) # The BERT [CLS] token correspond to the first hidden state of the last layer cls_embedding = last_layer[:, 0, :] ``` **Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one of the following values: ``` ['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased'] ``` ## References If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: [LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf) ``` @InProceedings{le2020flaubert, author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier}, title = {FlauBERT: Unsupervised Language Model Pre-training for French}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, pages = {2479--2490}, url = {https://www.aclweb.org/anthology/2020.lrec-1.302} } ``` [TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/) ``` @inproceedings{le2020flaubert, title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais}, author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier}, booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles}, pages = {268--278}, year = {2020}, organization = {ATALA} } ```
{"language": "fr", "license": "mit", "tags": ["bert", "language-model", "flaubert", "flue", "french", "bert-large", "flaubert-large", "cased"], "datasets": ["flaubert"], "metrics": ["flue"]}
flaubert/flaubert_large_cased
null
[ "transformers", "pytorch", "flaubert", "fill-mask", "bert", "language-model", "flue", "french", "bert-large", "flaubert-large", "cased", "fr", "dataset:flaubert", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "fr" ]
TAGS #transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #bert-large #flaubert-large #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us
FlauBERT: Unsupervised Language Model Pre-training for French ============================================================= FlauBERT is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. Along with FlauBERT comes FLUE: an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the official website. FlauBERT models --------------- Note: 'flaubert-small-cased' is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. Using FlauBERT with Hugging Face's Transformers ----------------------------------------------- Notes: if your 'transformers' version is <=2.10.0, 'modelname' should take one of the following values: References ---------- If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: LREC paper TALN paper
[]
[ "TAGS\n#transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #bert-large #flaubert-large #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us \n" ]
fill-mask
transformers
# FlauBERT: Unsupervised Language Model Pre-training for French **FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer. Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert). ## FlauBERT models | Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters | | :------: | :---: | :---: | :---: | :---: | | `flaubert-small-cased` | 6 | 8 | 512 | 54 M | | `flaubert-base-uncased` | 12 | 12 | 768 | 137 M | | `flaubert-base-cased` | 12 | 12 | 768 | 138 M | | `flaubert-large-cased` | 24 | 16 | 1024 | 373 M | **Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. ## Using FlauBERT with Hugging Face's Transformers ```python import torch from transformers import FlaubertModel, FlaubertTokenizer # Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased', # 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased'] modelname = 'flaubert/flaubert_base_cased' # Load pretrained model and tokenizer flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True) flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False) # do_lowercase=False if using cased models, True if using uncased ones sentence = "Le chat mange une pomme." token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)]) last_layer = flaubert(token_ids)[0] print(last_layer.shape) # torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension) # The BERT [CLS] token correspond to the first hidden state of the last layer cls_embedding = last_layer[:, 0, :] ``` **Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one of the following values: ``` ['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased'] ``` ## References If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: [LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf) ``` @InProceedings{le2020flaubert, author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier}, title = {FlauBERT: Unsupervised Language Model Pre-training for French}, booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference}, month = {May}, year = {2020}, address = {Marseille, France}, publisher = {European Language Resources Association}, pages = {2479--2490}, url = {https://www.aclweb.org/anthology/2020.lrec-1.302} } ``` [TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/) ``` @inproceedings{le2020flaubert, title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais}, author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier}, booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles}, pages = {268--278}, year = {2020}, organization = {ATALA} } ```
{"language": "fr", "license": "mit", "tags": ["bert", "language-model", "flaubert", "flue", "french", "flaubert-small", "cased"], "datasets": ["flaubert"], "metrics": ["flue"]}
flaubert/flaubert_small_cased
null
[ "transformers", "pytorch", "flaubert", "fill-mask", "bert", "language-model", "flue", "french", "flaubert-small", "cased", "fr", "dataset:flaubert", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "fr" ]
TAGS #transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #flaubert-small #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us
FlauBERT: Unsupervised Language Model Pre-training for French ============================================================= FlauBERT is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) Jean Zay supercomputer. Along with FlauBERT comes FLUE: an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the official website. FlauBERT models --------------- Note: 'flaubert-small-cased' is partially trained so performance is not guaranteed. Consider using it for debugging purpose only. Using FlauBERT with Hugging Face's Transformers ----------------------------------------------- Notes: if your 'transformers' version is <=2.10.0, 'modelname' should take one of the following values: References ---------- If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers: LREC paper TALN paper
[]
[ "TAGS\n#transformers #pytorch #flaubert #fill-mask #bert #language-model #flue #french #flaubert-small #cased #fr #dataset-flaubert #license-mit #autotrain_compatible #endpoints_compatible #has_space #region-us \n" ]
fill-mask
transformers
MLM fine-tuned from Bertimbau-Base model on the Brazilian Federal Official Gazette (200k instances)
{}
flavio-nakasato/berdou_200k
null
[ "transformers", "pytorch", "bert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us
MLM fine-tuned from Bertimbau-Base model on the Brazilian Federal Official Gazette (200k instances)
[]
[ "TAGS\n#transformers #pytorch #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
fill-mask
transformers
MLM fine-tuned from Bertimbau-Base model on the Brazilian Federal Official Gazette (500k instances)
{}
flavio-nakasato/berdou_500k
null
[ "transformers", "pytorch", "tensorboard", "bert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us
MLM fine-tuned from Bertimbau-Base model on the Brazilian Federal Official Gazette (500k instances)
[]
[ "TAGS\n#transformers #pytorch #tensorboard #bert #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
fill-mask
transformers
RoBERTa model pretrained on the Brazilian Federal Official Gazette (200k instances).
{}
flavio-nakasato/deeppolicytracker_200k
null
[ "transformers", "pytorch", "roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us
RoBERTa model pretrained on the Brazilian Federal Official Gazette (200k instances).
[]
[ "TAGS\n#transformers #pytorch #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
fill-mask
transformers
RoBERTa model pretrained on the Brazilian Federal Official Gazette (500k instances).
{}
flavio-nakasato/deeppolicytracker_500k
null
[ "transformers", "pytorch", "tensorboard", "roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us
RoBERTa model pretrained on the Brazilian Federal Official Gazette (500k instances).
[]
[ "TAGS\n#transformers #pytorch #tensorboard #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
fill-mask
transformers
MLM fine-tuned from BR-BERTo model on the Brazilian Federal Official Gazette (100k instances)
{}
flavio-nakasato/roberdou_100k
null
[ "transformers", "pytorch", "roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #transformers #pytorch #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us
MLM fine-tuned from BR-BERTo model on the Brazilian Federal Official Gazette (100k instances)
[]
[ "TAGS\n#transformers #pytorch #roberta #fill-mask #autotrain_compatible #endpoints_compatible #region-us \n" ]
text2text-generation
transformers
# Image-captioning-Indonesia This is an encoder-decoder image captioning model using [CLIP](https://huggingface.co/transformers/model_doc/clip.html) as the visual encoder and [Marian](https://huggingface.co/transformers/model_doc/marian.html) as the textual decoder on datasets with Indonesian captions. This model was trained using HuggingFace's Flax framework and is part of the [JAX/Flax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organized by [HuggingFace](https://huggingface.co). All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. ## How to use At time of writing, you will need to install [HuggingFace](https://github.com/huggingface/) from its latest master branch in order to load `FlaxMarian`. You will also need to have the [`flax_clip_vision_marian` folder](https://github.com/indonesian-nlp/Indonesia-Image-Captioning/tree/main/flax_clip_vision_marian) in your project directory to load the model using the `FlaxCLIPVisionMarianForConditionalGeneration` class. ```python from torchvision.io import ImageReadMode, read_image from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize from torchvision.transforms.functional import InterpolationMode import torch import numpy as np from transformers import MarianTokenizer from flax_clip_vision_marian.modeling_clip_vision_marian import FlaxCLIPVisionMarianForConditionalGeneration clip_marian_model_name = 'flax-community/Image-captioning-Indonesia' model = FlaxCLIPVisionMarianForConditionalGeneration.from_pretrained(clip_marian_model_name) marian_model_name = 'Helsinki-NLP/opus-mt-en-id' tokenizer = MarianTokenizer.from_pretrained(marian_model_name) config = model.config image_size = config.clip_vision_config.image_size # Image transformation transforms = torch.nn.Sequential( Resize([image_size], interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ConvertImageDtype(torch.float), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ) # Hyperparameters max_length = 8 num_beams = 4 gen_kwargs = {"max_length": max_length, "num_beams": num_beams} def generate_step(batch): output_ids = model.generate(pixel_values, **gen_kwargs) token_ids = np.array(output_ids.sequences)[0] caption = tokenizer.decode(token_ids) return caption image_file_path = image_file_path image = read_image(image_file_path, mode=ImageReadMode.RGB) image = transforms(image) pixel_values = torch.stack([image]).permute(0, 2, 3, 1).numpy() generated_ids = generate_step(pixel_values) print(generated_ids) ``` ## Training data The Model was trained on translated Coco,Flickr and ViZWiz, each of them were translated using google translate and marian mt. we took only random 2 captions per image for each datasets ## Training procedure The model was trained on a TPUv3-8 VM provided by the Google Cloud team. ## Team members - Cahya Wirawan ([@cahya](https://huggingface.co/cahya)) - Galuh Sahid ([@Galuh](https://huggingface.co/Galuh)) - Muhammad Agung Hambali ([@AyameRushia](https://huggingface.co/AyameRushia)) - Samsul Rahmadani ([@munggok](https://huggingface.co/munggok))
{"language": "id"}
flax-community/Image-captioning-Indonesia
null
[ "transformers", "jax", "clip-vision-marian", "text2text-generation", "id", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "id" ]
TAGS #transformers #jax #clip-vision-marian #text2text-generation #id #autotrain_compatible #endpoints_compatible #has_space #region-us
# Image-captioning-Indonesia This is an encoder-decoder image captioning model using CLIP as the visual encoder and Marian as the textual decoder on datasets with Indonesian captions. This model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. ## How to use At time of writing, you will need to install HuggingFace from its latest master branch in order to load 'FlaxMarian'. You will also need to have the 'flax_clip_vision_marian' folder in your project directory to load the model using the 'FlaxCLIPVisionMarianForConditionalGeneration' class. ## Training data The Model was trained on translated Coco,Flickr and ViZWiz, each of them were translated using google translate and marian mt. we took only random 2 captions per image for each datasets ## Training procedure The model was trained on a TPUv3-8 VM provided by the Google Cloud team. ## Team members - Cahya Wirawan (@cahya) - Galuh Sahid (@Galuh) - Muhammad Agung Hambali (@AyameRushia) - Samsul Rahmadani (@munggok)
[ "# Image-captioning-Indonesia\n\nThis is an encoder-decoder image captioning model using CLIP as the visual encoder and Marian as the textual decoder on datasets with Indonesian captions.\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.", "## How to use\nAt time of writing, you will need to install HuggingFace from its latest master branch in order to load 'FlaxMarian'.\n\nYou will also need to have the 'flax_clip_vision_marian' folder in your project directory to load the model using the 'FlaxCLIPVisionMarianForConditionalGeneration' class.", "## Training data\nThe Model was trained on translated Coco,Flickr and ViZWiz, each of them were translated using google translate and marian mt. we took only random 2 captions per image for each datasets", "## Training procedure \nThe model was trained on a TPUv3-8 VM provided by the Google Cloud team.", "## Team members\n- Cahya Wirawan (@cahya)\n- Galuh Sahid (@Galuh)\n- Muhammad Agung Hambali (@AyameRushia)\n- Samsul Rahmadani (@munggok)" ]
[ "TAGS\n#transformers #jax #clip-vision-marian #text2text-generation #id #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# Image-captioning-Indonesia\n\nThis is an encoder-decoder image captioning model using CLIP as the visual encoder and Marian as the textual decoder on datasets with Indonesian captions.\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.", "## How to use\nAt time of writing, you will need to install HuggingFace from its latest master branch in order to load 'FlaxMarian'.\n\nYou will also need to have the 'flax_clip_vision_marian' folder in your project directory to load the model using the 'FlaxCLIPVisionMarianForConditionalGeneration' class.", "## Training data\nThe Model was trained on translated Coco,Flickr and ViZWiz, each of them were translated using google translate and marian mt. we took only random 2 captions per image for each datasets", "## Training procedure \nThe model was trained on a TPUv3-8 VM provided by the Google Cloud team.", "## Team members\n- Cahya Wirawan (@cahya)\n- Galuh Sahid (@Galuh)\n- Muhammad Agung Hambali (@AyameRushia)\n- Samsul Rahmadani (@munggok)" ]
null
null
# Neural ODE with Flax This is the result of project ["Reproduce Neural ODE and SDE"][projectlink] in [HuggingFace Flax/JAX community week][comweeklink]. <code>main.py</code> will execute training of ResNet or OdeNet for MNIST dataset. [projectlink]: https://discuss.huggingface.co/t/reproduce-neural-ode-and-neural-sde/7590 [comweeklink]: https://github.com/huggingface/transformers/tree/master/examples/research_projects/jax-projects#projects ## Dependency ### JAX and Flax For JAX installation, please follow [here][jaxinstalllink]. or simply, type ```bash pip install jax jaxlib ``` For Flax installation, ```bash pip install flax ``` [jaxinstalllink]: https://github.com/google/jax#installation Tensorflow-datasets will download MNIST dataset to environment. ## How to run training For (small) ResNet training, ```bash python main.py --model=resnet --lr=1e-4 --n_epoch=20 --batch_size=64 ``` For Neural ODE training, ```bash python main.py --model=odenet --lr=1e-4 --n_epoch=20 --batch_size=64 ``` For Continuous Normalizing Flow, ```bash python main.py --model=cnf --sample_dataset=circles ``` Sample datasets can be chosen as circles, moons, or scurve. # Sample Results ![cnf-viz](https://user-images.githubusercontent.com/72425253/126124351-44e00438-055e-4b1c-90ee-758a545dd602.gif) ![cnf-viz](https://user-images.githubusercontent.com/72425253/126124648-dcb3f8f4-396a-447c-96cf-f9304377fa48.gif) ![cnf-viz](https://user-images.githubusercontent.com/72425253/126127269-4c02ee6a-a9a3-4b9f-b380-f8669f58872b.gif) # Bird Call generation Score SDE These are the codes for the bird call generation score sde model. <code>core-sde-sampler.py</code> will execute the sampler. The sampler uses pretrained weight to generate bird calls. The weight can be found [here](https://github.com/mandelbrot-walker/Birdcall-score-sde/blob/main/ckpt.flax) For using different sample generation parameters change the argument values. For example, ```bash python main.py --sigma=25 --num_steps=500 --signal_to_noise_ratio=0.10 --etol=1e-5 --sample_batch_size = 128 --sample_no = 47 ``` In order to generate the audios, these dependencies are required, ```bash pip install librosa pip install soundfile ``` In order to train the model from scratch, please generate the dataset using this [link](www.kaggle.com/ibraheemmoosa/birdsong-spectogram-generation). The dataset is generated in kaggle. Therefore, during training your username and api key is required in the specified section inside the script. ```bash python main.py --sigma=35 --n_epochs=1000 --batch_size=512 --lr=1e-3 --num_steps=500 --signal_to_noise_ratio=0.15 --etol=1e-5 --sample_batch_size = 64 --sample_no = 23 ``` Generated samples can be found [here](https://github.com/mandelbrot-walker/Birdcall-score-sde/tree/main/generated_samples) and [here](https://colab.research.google.com/drive/1AbF4aIMkSfNs-G__MXzqY7JSrz6qvLYN)
{}
flax-community/NeuralODE_SDE
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[]
TAGS #region-us
# Neural ODE with Flax This is the result of project ["Reproduce Neural ODE and SDE"][projectlink] in [HuggingFace Flax/JAX community week][comweeklink]. <code>URL</code> will execute training of ResNet or OdeNet for MNIST dataset. [projectlink]: URL [comweeklink]: URL ## Dependency ### JAX and Flax For JAX installation, please follow [here][jaxinstalllink]. or simply, type For Flax installation, [jaxinstalllink]: URL Tensorflow-datasets will download MNIST dataset to environment. ## How to run training For (small) ResNet training, For Neural ODE training, For Continuous Normalizing Flow, Sample datasets can be chosen as circles, moons, or scurve. # Sample Results !cnf-viz !cnf-viz !cnf-viz # Bird Call generation Score SDE These are the codes for the bird call generation score sde model. <code>URL</code> will execute the sampler. The sampler uses pretrained weight to generate bird calls. The weight can be found here For using different sample generation parameters change the argument values. For example, In order to generate the audios, these dependencies are required, In order to train the model from scratch, please generate the dataset using this link. The dataset is generated in kaggle. Therefore, during training your username and api key is required in the specified section inside the script. Generated samples can be found here and here
[ "# Neural ODE with Flax\nThis is the result of project [\"Reproduce Neural ODE and SDE\"][projectlink] in [HuggingFace Flax/JAX community week][comweeklink].\n\n<code>URL</code> will execute training of ResNet or OdeNet for MNIST dataset.\n\n[projectlink]: URL\n\n[comweeklink]: URL", "## Dependency", "### JAX and Flax\n\nFor JAX installation, please follow [here][jaxinstalllink].\n\nor simply, type\n\n\nFor Flax installation,\n\n\n[jaxinstalllink]: URL\n\n\nTensorflow-datasets will download MNIST dataset to environment.", "## How to run training\n\nFor (small) ResNet training,\n\n\nFor Neural ODE training, \n\n\nFor Continuous Normalizing Flow,\n\nSample datasets can be chosen as circles, moons, or scurve.", "# Sample Results\n\n!cnf-viz\n!cnf-viz\n!cnf-viz", "# Bird Call generation Score SDE \n\nThese are the codes for the bird call generation score sde model. \n\n<code>URL</code> will execute the sampler. The sampler uses pretrained weight to generate bird calls. The weight can be found here\n\nFor using different sample generation parameters change the argument values. For example,\n \nIn order to generate the audios, these dependencies are required,\n\n\nIn order to train the model from scratch, please generate the dataset using this link. The dataset is generated in kaggle. Therefore, during training your username and api key is required in the specified section inside the script. \n \nGenerated samples can be found here\nand here" ]
[ "TAGS\n#region-us \n", "# Neural ODE with Flax\nThis is the result of project [\"Reproduce Neural ODE and SDE\"][projectlink] in [HuggingFace Flax/JAX community week][comweeklink].\n\n<code>URL</code> will execute training of ResNet or OdeNet for MNIST dataset.\n\n[projectlink]: URL\n\n[comweeklink]: URL", "## Dependency", "### JAX and Flax\n\nFor JAX installation, please follow [here][jaxinstalllink].\n\nor simply, type\n\n\nFor Flax installation,\n\n\n[jaxinstalllink]: URL\n\n\nTensorflow-datasets will download MNIST dataset to environment.", "## How to run training\n\nFor (small) ResNet training,\n\n\nFor Neural ODE training, \n\n\nFor Continuous Normalizing Flow,\n\nSample datasets can be chosen as circles, moons, or scurve.", "# Sample Results\n\n!cnf-viz\n!cnf-viz\n!cnf-viz", "# Bird Call generation Score SDE \n\nThese are the codes for the bird call generation score sde model. \n\n<code>URL</code> will execute the sampler. The sampler uses pretrained weight to generate bird calls. The weight can be found here\n\nFor using different sample generation parameters change the argument values. For example,\n \nIn order to generate the audios, these dependencies are required,\n\n\nIn order to train the model from scratch, please generate the dataset using this link. The dataset is generated in kaggle. Therefore, during training your username and api key is required in the specified section inside the script. \n \nGenerated samples can be found here\nand here" ]
fill-mask
transformers
# NOTE: We have trained newer and better Finnish RoBERTa large model which can be found from different repository: [https://huggingface.co/Finnish-NLP/roberta-large-finnish](https://huggingface.co/Finnish-NLP/roberta-large-finnish). Our future Finnish models will be available at the [Finnish-NLP](https://huggingface.co/Finnish-NLP) Hugging Face organization # RoBERTa large model for Finnish Pretrained model on Finnish language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/abs/1907.11692) and first released in [this repository](https://github.com/pytorch/fairseq/tree/master/examples/roberta). This model is case-sensitive: it makes a difference between finnish and Finnish. ## Model description RoBERTa is a transformers model pretrained on a large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with the Masked language modeling (MLM) objective. Taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. This way, the model learns an inner representation of the Finnish language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the RoBERTa model as inputs. ## Intended uses & limitations You can use the raw model for masked language modeling, but it's mostly intended to be fine-tuned on a downstream task. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='flax-community/RoBERTa-large-finnish') >>> unmasker("Moikka olen <mask> kielimalli.") [{'sequence': 'Moikka olen uusi kielimalli.', 'score': 0.05129234120249748, 'token': 1825, 'token_str': ' uusi'}, {'sequence': 'Moikka olen toinen kielimalli.', 'score': 0.03112379088997841, 'token': 2194, 'token_str': ' toinen'}, {'sequence': 'Moikka olen myös kielimalli.', 'score': 0.025534993037581444, 'token': 491, 'token_str': ' myös'}, {'sequence': 'Moikka olen ensimmäinen kielimalli.', 'score': 0.020146571099758148, 'token': 2832, 'token_str': ' ensimmäinen'}, {'sequence': 'Moikka olen vapaa kielimalli.', 'score': 0.018089469522237778, 'token': 2257, 'token_str': ' vapaa'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import RobertaTokenizer, RobertaModel tokenizer = RobertaTokenizer.from_pretrained('flax-community/RoBERTa-large-finnish') model = RobertaModel.from_pretrained('flax-community/RoBERTa-large-finnish') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import RobertaTokenizer, TFRobertaModel tokenizer = RobertaTokenizer.from_pretrained('flax-community/RoBERTa-large-finnish') model = TFRobertaModel.from_pretrained('flax-community/RoBERTa-large-finnish', from_pt=True) text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. ## Training data This Finnish RoBERTa model was pretrained on the combination of two datasets: - [mc4](https://huggingface.co/datasets/mc4), the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset - [Yle Finnish News Archive](http://urn.fi/urn:nbn:fi:lb-2017070501) Raw datasets were cleaned to filter out bad quality and non-Finnish examples. Together these cleaned datasets were around 51GB of text. ## Training procedure ### Preprocessing The texts are tokenized using a byte version of Byte-Pair Encoding (BPE) and a vocabulary size of 50265. The inputs of the model take pieces of 512 contiguous token that may span over documents. The beginning of a new document is marked with `<s>` and the end of one by `</s>` The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `<mask>`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. Contrary to BERT, the masking is done dynamically during pretraining (e.g., it changes at each epoch and is not fixed). ### Pretraining The model was trained on TPUv3-8 VM, sponsored by the Hugging Face JAX/Flax community week event, for 2 epochs with a sequence length of 128 and continuing for one more epoch with a sequence length of 512. The optimizer used is Adafactor with a learning rate of 2e-4, \\(\beta_{1} = 0.9\\), \\(\beta_{2} = 0.98\\) and \\(\epsilon = 1e-6\\), learning rate warmup for 1500 steps and linear decay of the learning rate after. ## Evaluation results Evaluation was done by fine-tuning the model on downstream text classification task with two different labeled datasets: [Yle News](https://github.com/spyysalo/yle-corpus) and [Eduskunta](https://github.com/aajanki/eduskunta-vkk). Yle News classification fine-tuning was done with two different sequence lengths: 128 and 512 but Eduskunta only with 128 sequence length. When fine-tuned on those datasets, this model (the first row of the table) achieves the following accuracy results compared to the [FinBERT (Finnish BERT)](https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1) and to our newer [Finnish RoBERTa-large](https://huggingface.co/Finnish-NLP/roberta-large-finnish) trained with larger dataset: | | Average | Yle News 128 length | Yle News 512 length | Eduskunta 128 length | |----------------------------------------|----------|---------------------|---------------------|----------------------| |flax-community/RoBERTa-large-finnish |87.72 |94.42 |95.06 |73.67 | |Finnish-NLP/roberta-large-finnish |88.02 |94.53 |95.23 |74.30 | |TurkuNLP/bert-base-finnish-cased-v1 |**88.82** |**94.90** |**95.49** |**76.07** | To conclude, this model slightly loses to our newer [Finnish RoBERTa-large](https://huggingface.co/Finnish-NLP/roberta-large-finnish) model trained with larger dataset and also slightly loses to the [FinBERT (Finnish BERT)](https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1) model. ## Team Members - Aapo Tanskanen, [Hugging Face profile](https://huggingface.co/aapot), [LinkedIn profile](https://www.linkedin.com/in/aapotanskanen/) - Rasmus Toivanen [Hugging Face profile](https://huggingface.co/RASMUS), [LinkedIn profile](https://www.linkedin.com/in/rasmustoivanen/) - Tommi Vehviläinen [Hugging Face profile](https://huggingface.co/Tommi) Feel free to contact us for more details 🤗
{"language": ["fi"], "license": "apache-2.0", "tags": ["finnish", "roberta"], "datasets": ["mc4"], "widget": [{"text": "Moikka olen <mask> kielimalli."}]}
flax-community/RoBERTa-large-finnish
null
[ "transformers", "pytorch", "jax", "tensorboard", "roberta", "fill-mask", "finnish", "fi", "dataset:mc4", "arxiv:1907.11692", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "1907.11692" ]
[ "fi" ]
TAGS #transformers #pytorch #jax #tensorboard #roberta #fill-mask #finnish #fi #dataset-mc4 #arxiv-1907.11692 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
NOTE: We have trained newer and better Finnish RoBERTa large model which can be found from different repository: URL Our future Finnish models will be available at the Finnish-NLP Hugging Face organization ============================================================================================================================================================================================================= RoBERTa large model for Finnish =============================== Pretrained model on Finnish language using a masked language modeling (MLM) objective. It was introduced in this paper and first released in this repository. This model is case-sensitive: it makes a difference between finnish and Finnish. Model description ----------------- RoBERTa is a transformers model pretrained on a large corpus of Finnish data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with the Masked language modeling (MLM) objective. Taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. This way, the model learns an inner representation of the Finnish language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the RoBERTa model as inputs. Intended uses & limitations --------------------------- You can use the raw model for masked language modeling, but it's mostly intended to be fine-tuned on a downstream task. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use You can use this model directly with a pipeline for masked language modeling: Here is how to use this model to get the features of a given text in PyTorch: and in TensorFlow: ### Limitations and bias The training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral. Therefore, the model can have biased predictions. Training data ------------- This Finnish RoBERTa model was pretrained on the combination of two datasets: * mc4, the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset * Yle Finnish News Archive Raw datasets were cleaned to filter out bad quality and non-Finnish examples. Together these cleaned datasets were around 51GB of text. Training procedure ------------------ ### Preprocessing The texts are tokenized using a byte version of Byte-Pair Encoding (BPE) and a vocabulary size of 50265. The inputs of the model take pieces of 512 contiguous token that may span over documents. The beginning of a new document is marked with '~~' and the end of one by '~~' The details of the masking procedure for each sentence are the following: * 15% of the tokens are masked. * In 80% of the cases, the masked tokens are replaced by ''. * In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. * In the 10% remaining cases, the masked tokens are left as is. Contrary to BERT, the masking is done dynamically during pretraining (e.g., it changes at each epoch and is not fixed). ### Pretraining The model was trained on TPUv3-8 VM, sponsored by the Hugging Face JAX/Flax community week event, for 2 epochs with a sequence length of 128 and continuing for one more epoch with a sequence length of 512. The optimizer used is Adafactor with a learning rate of 2e-4, \(\beta\_{1} = 0.9\), \(\beta\_{2} = 0.98\) and \(\epsilon = 1e-6\), learning rate warmup for 1500 steps and linear decay of the learning rate after. Evaluation results ------------------ Evaluation was done by fine-tuning the model on downstream text classification task with two different labeled datasets: Yle News and Eduskunta. Yle News classification fine-tuning was done with two different sequence lengths: 128 and 512 but Eduskunta only with 128 sequence length. When fine-tuned on those datasets, this model (the first row of the table) achieves the following accuracy results compared to the FinBERT (Finnish BERT) and to our newer Finnish RoBERTa-large trained with larger dataset: To conclude, this model slightly loses to our newer Finnish RoBERTa-large model trained with larger dataset and also slightly loses to the FinBERT (Finnish BERT) model. Team Members ------------ * Aapo Tanskanen, Hugging Face profile, LinkedIn profile * Rasmus Toivanen Hugging Face profile, LinkedIn profile * Tommi Vehviläinen Hugging Face profile Feel free to contact us for more details
[ "### How to use\n\n\nYou can use this model directly with a pipeline for masked language modeling:\n\n\nHere is how to use this model to get the features of a given text in PyTorch:\n\n\nand in TensorFlow:", "### Limitations and bias\n\n\nThe training data used for this model contains a lot of unfiltered content from the internet, which is far from\nneutral. Therefore, the model can have biased predictions.\n\n\nTraining data\n-------------\n\n\nThis Finnish RoBERTa model was pretrained on the combination of two datasets:\n\n\n* mc4, the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset\n* Yle Finnish News Archive\n\n\nRaw datasets were cleaned to filter out bad quality and non-Finnish examples. Together these cleaned datasets were around 51GB of text.\n\n\nTraining procedure\n------------------", "### Preprocessing\n\n\nThe texts are tokenized using a byte version of Byte-Pair Encoding (BPE) and a vocabulary size of 50265. The inputs of\nthe model take pieces of 512 contiguous token that may span over documents. The beginning of a new document is marked\nwith '~~' and the end of one by '~~'\n\n\nThe details of the masking procedure for each sentence are the following:\n\n\n* 15% of the tokens are masked.\n* In 80% of the cases, the masked tokens are replaced by ''.\n* In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.\n* In the 10% remaining cases, the masked tokens are left as is.\n\n\nContrary to BERT, the masking is done dynamically during pretraining (e.g., it changes at each epoch and is not fixed).", "### Pretraining\n\n\nThe model was trained on TPUv3-8 VM, sponsored by the Hugging Face JAX/Flax community week event, for 2 epochs with a sequence length of 128 and continuing for one more epoch with a sequence length of 512. The optimizer used is Adafactor with a learning rate of 2e-4, \\(\\beta\\_{1} = 0.9\\), \\(\\beta\\_{2} = 0.98\\) and \\(\\epsilon = 1e-6\\), learning rate warmup for 1500 steps and linear decay of the learning rate after.\n\n\nEvaluation results\n------------------\n\n\nEvaluation was done by fine-tuning the model on downstream text classification task with two different labeled datasets: Yle News and Eduskunta. Yle News classification fine-tuning was done with two different sequence lengths: 128 and 512 but Eduskunta only with 128 sequence length.\nWhen fine-tuned on those datasets, this model (the first row of the table) achieves the following accuracy results compared to the FinBERT (Finnish BERT) and to our newer Finnish RoBERTa-large trained with larger dataset:\n\n\n\nTo conclude, this model slightly loses to our newer Finnish RoBERTa-large model trained with larger dataset and also slightly loses to the FinBERT (Finnish BERT) model.\n\n\nTeam Members\n------------\n\n\n* Aapo Tanskanen, Hugging Face profile, LinkedIn profile\n* Rasmus Toivanen Hugging Face profile, LinkedIn profile\n* Tommi Vehviläinen Hugging Face profile\n\n\nFeel free to contact us for more details" ]
[ "TAGS\n#transformers #pytorch #jax #tensorboard #roberta #fill-mask #finnish #fi #dataset-mc4 #arxiv-1907.11692 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### How to use\n\n\nYou can use this model directly with a pipeline for masked language modeling:\n\n\nHere is how to use this model to get the features of a given text in PyTorch:\n\n\nand in TensorFlow:", "### Limitations and bias\n\n\nThe training data used for this model contains a lot of unfiltered content from the internet, which is far from\nneutral. Therefore, the model can have biased predictions.\n\n\nTraining data\n-------------\n\n\nThis Finnish RoBERTa model was pretrained on the combination of two datasets:\n\n\n* mc4, the dataset mC4 is a multilingual colossal, cleaned version of Common Crawl's web crawl corpus. We used the Finnish subset of the mC4 dataset\n* Yle Finnish News Archive\n\n\nRaw datasets were cleaned to filter out bad quality and non-Finnish examples. Together these cleaned datasets were around 51GB of text.\n\n\nTraining procedure\n------------------", "### Preprocessing\n\n\nThe texts are tokenized using a byte version of Byte-Pair Encoding (BPE) and a vocabulary size of 50265. The inputs of\nthe model take pieces of 512 contiguous token that may span over documents. The beginning of a new document is marked\nwith '~~' and the end of one by '~~'\n\n\nThe details of the masking procedure for each sentence are the following:\n\n\n* 15% of the tokens are masked.\n* In 80% of the cases, the masked tokens are replaced by ''.\n* In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace.\n* In the 10% remaining cases, the masked tokens are left as is.\n\n\nContrary to BERT, the masking is done dynamically during pretraining (e.g., it changes at each epoch and is not fixed).", "### Pretraining\n\n\nThe model was trained on TPUv3-8 VM, sponsored by the Hugging Face JAX/Flax community week event, for 2 epochs with a sequence length of 128 and continuing for one more epoch with a sequence length of 512. The optimizer used is Adafactor with a learning rate of 2e-4, \\(\\beta\\_{1} = 0.9\\), \\(\\beta\\_{2} = 0.98\\) and \\(\\epsilon = 1e-6\\), learning rate warmup for 1500 steps and linear decay of the learning rate after.\n\n\nEvaluation results\n------------------\n\n\nEvaluation was done by fine-tuning the model on downstream text classification task with two different labeled datasets: Yle News and Eduskunta. Yle News classification fine-tuning was done with two different sequence lengths: 128 and 512 but Eduskunta only with 128 sequence length.\nWhen fine-tuned on those datasets, this model (the first row of the table) achieves the following accuracy results compared to the FinBERT (Finnish BERT) and to our newer Finnish RoBERTa-large trained with larger dataset:\n\n\n\nTo conclude, this model slightly loses to our newer Finnish RoBERTa-large model trained with larger dataset and also slightly loses to the FinBERT (Finnish BERT) model.\n\n\nTeam Members\n------------\n\n\n* Aapo Tanskanen, Hugging Face profile, LinkedIn profile\n* Rasmus Toivanen Hugging Face profile, LinkedIn profile\n* Tommi Vehviläinen Hugging Face profile\n\n\nFeel free to contact us for more details" ]
text-generation
transformers
# Sinhala GPT2 trained on MC4 (manually cleaned) ### Overview This is a smaller GPT2 model trained on [MC4](https://github.com/allenai/allennlp/discussions/5056) Sinhala dataset. As Sinhala is one of those low resource languages, there are only a handful of models been trained. So, this would be a great place to start training for more downstream tasks. This model uses a manually cleaned version of MC4 dataset which can be found [here](https://huggingface.co/datasets/keshan/clean-si-mc4). Although the dataset is relatively small ~3GB. The finetuned model on [news articles](https://huggingface.co/keshan/sinhala-gpt2-newswire) generates good and acceptable results. ## Model Specification The model chosen for training is GPT2 with the following specifications: 1. vocab_size=50257 2. n_embd=768 3. n_head=12 4. n_layer=12 5. n_positions=1024 ## How to Use You can use this model directly with a pipeline for causal language modeling: ```py from transformers import pipeline generator = pipeline('text-generation', model='flax-community/Sinhala-gpt2') generator("මම", max_length=50, num_return_sequences=5) ```
{"language": "si", "tags": ["Sinhala", "text-generation", "gpt2"], "datasets": ["mc4"]}
flax-community/Sinhala-gpt2
null
[ "transformers", "pytorch", "tf", "jax", "tensorboard", "gpt2", "feature-extraction", "Sinhala", "text-generation", "si", "dataset:mc4", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "si" ]
TAGS #transformers #pytorch #tf #jax #tensorboard #gpt2 #feature-extraction #Sinhala #text-generation #si #dataset-mc4 #endpoints_compatible #has_space #text-generation-inference #region-us
# Sinhala GPT2 trained on MC4 (manually cleaned) ### Overview This is a smaller GPT2 model trained on MC4 Sinhala dataset. As Sinhala is one of those low resource languages, there are only a handful of models been trained. So, this would be a great place to start training for more downstream tasks. This model uses a manually cleaned version of MC4 dataset which can be found here. Although the dataset is relatively small ~3GB. The finetuned model on news articles generates good and acceptable results. ## Model Specification The model chosen for training is GPT2 with the following specifications: 1. vocab_size=50257 2. n_embd=768 3. n_head=12 4. n_layer=12 5. n_positions=1024 ## How to Use You can use this model directly with a pipeline for causal language modeling:
[ "# Sinhala GPT2 trained on MC4 (manually cleaned)", "### Overview\n\nThis is a smaller GPT2 model trained on MC4 Sinhala dataset. As Sinhala is one of those low resource languages, there are only a handful of models been trained. So, this would be a great place to start training for more downstream tasks.\n\nThis model uses a manually cleaned version of MC4 dataset which can be found here. Although the dataset is relatively small ~3GB. The finetuned model on news articles generates good and acceptable results.", "## Model Specification\n\n\nThe model chosen for training is GPT2 with the following specifications:\n 1. vocab_size=50257\n 2. n_embd=768\n 3. n_head=12\n 4. n_layer=12\n 5. n_positions=1024", "## How to Use\nYou can use this model directly with a pipeline for causal language modeling:" ]
[ "TAGS\n#transformers #pytorch #tf #jax #tensorboard #gpt2 #feature-extraction #Sinhala #text-generation #si #dataset-mc4 #endpoints_compatible #has_space #text-generation-inference #region-us \n", "# Sinhala GPT2 trained on MC4 (manually cleaned)", "### Overview\n\nThis is a smaller GPT2 model trained on MC4 Sinhala dataset. As Sinhala is one of those low resource languages, there are only a handful of models been trained. So, this would be a great place to start training for more downstream tasks.\n\nThis model uses a manually cleaned version of MC4 dataset which can be found here. Although the dataset is relatively small ~3GB. The finetuned model on news articles generates good and acceptable results.", "## Model Specification\n\n\nThe model chosen for training is GPT2 with the following specifications:\n 1. vocab_size=50257\n 2. n_embd=768\n 3. n_head=12\n 4. n_layer=12\n 5. n_positions=1024", "## How to Use\nYou can use this model directly with a pipeline for causal language modeling:" ]
fill-mask
transformers
## Sinhala Roberta model trained on MC4 Sinhala dataset (manually cleaned)
{"language": "si", "tags": ["fill-mask", "sinhala", "roberta"]}
flax-community/Sinhala-roberta
null
[ "transformers", "pytorch", "jax", "tensorboard", "roberta", "feature-extraction", "fill-mask", "sinhala", "si", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "si" ]
TAGS #transformers #pytorch #jax #tensorboard #roberta #feature-extraction #fill-mask #sinhala #si #endpoints_compatible #region-us
## Sinhala Roberta model trained on MC4 Sinhala dataset (manually cleaned)
[ "## Sinhala Roberta model trained on MC4 Sinhala dataset (manually cleaned)" ]
[ "TAGS\n#transformers #pytorch #jax #tensorboard #roberta #feature-extraction #fill-mask #sinhala #si #endpoints_compatible #region-us \n", "## Sinhala Roberta model trained on MC4 Sinhala dataset (manually cleaned)" ]
fill-mask
transformers
<div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><b>Update:</b> This model has been moved to <a href="https://huggingface.co/linhd-postdata/alberti-bert-base-multilingual-cased">linhd-postdata/alberti-bert-base-multilingual-cased</a>, where it will be maintained and updated. </p> </div> # ALBERTI ALBERTI is a set of two BERT-based multilingual model for poetry. One for verses and another one for stanzas. This model has been further trained with the PULPO corpus for verses using [Flax](https://github.com/google/flax), including training scripts. This is part of the [Flax/Jax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organised by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google. ## PULPO PULPO, the Prodigious Unannotated Literary Poetry Corpus, is a set of multilingual corpora of verses and stanzas with over 95M words. The following corpora has been downloaded using the [Averell](https://github.com/linhd-postdata/averell/) tool, developed by the [POSTDATA](https://postdata.linhd.uned.es/) team: ### Spanish - [Disco v3](https://github.com/pruizf/disco) - [Corpus of Spanish Golden-Age Sonnets](https://github.com/bncolorado/CorpusSonetosSigloDeOro) - [Corpus general de poesía lírica castellana del Siglo de Oro](https://github.com/bncolorado/CorpusGeneralPoesiaLiricaCastellanaDelSigloDeOro) - [Gongocorpus](https://github.com/linhd-postdata/gongocorpus) - [source](http://obvil.sorbonne-universite.site/corpus/gongora/gongora_obra-poetica) ### English - [Eighteenth-Century Poetry Archive (ECPA)](https://github.com/alhuber1502/ECPA) - [For better for verse](https://github.com/waynegraham/for_better_for_verse) ### French - [Métrique en Ligne](https://crisco2.unicaen.fr/verlaine/index.php?navigation=accueil) - [source](https://github.com/linhd-postdata/metrique-en-ligne) ### Italian - [Biblioteca italiana](https://github.com/linhd-postdata/biblioteca_italiana) - [source](http://www.bibliotecaitaliana.it/) ### Czech - [Corpus of Czech Verse](https://github.com/versotym/corpusCzechVerse) ### Portuguese - [Stichotheque](https://gitlab.com/stichotheque/stichotheque-pt) Also, we obtained the following corpora from these sources: ### Spanish - [Poesi.as](https://github.com/linhd-postdata/poesi.as) - [source](http://www.poesi.as/) ### English - [A Gutenberg Poetry Corpus](https://github.com/aparrish/gutenberg-poetry-corpus) ### Arabic - [Arabic Poetry dataset](https://www.kaggle.com/ahmedabelal/arabic-poetry) ### Chinese - [THU Chinese Classical Poetry Corpus](https://github.com/THUNLP-AIPoet/Datasets/tree/master/CCPC) ### Finnish - [SKVR](https://github.com/sks190/SKVR) ### German - [TextGrid Poetry Corpus](https://github.com/linhd-postdata/textgrid-poetry) - [source](https://textgrid.de/en/digitale-bibliothek) - [German Rhyme Corpus](https://github.com/tnhaider/german-rhyme-corpus) ### Hungarian - [verskorpusz](https://github.com/ELTE-DH/verskorpusz) ### Portuguese - [Poems in Portuguese](https://www.kaggle.com/oliveirasp6/poems-in-portuguese) ### Russian - [19 000 Russian poems](https://www.kaggle.com/grafstor/19-000-russian-poems) ## Team members - Álvaro Pérez ([alvp](https://huggingface.co/alvp)) - Javier de la Rosa ([versae](https://huggingface.co/versae)) - Aitor Díaz ([aitordiaz](https://huggingface.co/aitordiaz)) - Elena González-Blanco - Salvador Ros ([salva](https://huggingface.co/salva)) ## Useful links - [Community Week timeline](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104#summary-timeline-calendar-6) - [Community Week README](https://github.com/huggingface/transformers/blob/master/examples/research_projects/jax-projects/README.md) - [Community Week thread](https://discuss.huggingface.co/t/bertin-pretrain-roberta-large-from-scratch-in-spanish/7125) - [Community Week channel](https://discord.com/channels/858019234139602994/859113060068229190) - [Masked Language Modelling example scripts](https://github.com/huggingface/transformers/tree/master/examples/flax/language-modeling) - [Model Repository](https://huggingface.co/flax-community/alberti-bert-base-multilingual-cased/) ## Acknowledgments This project would not have been possible without the infrastructure and resources provided by HuggingFace and Google Cloud. Moreover, we want to thank POSTDATA Project (ERC-StG-679528) and the Computational Literary Studies Infrastructure (CLS INFRA No. 101004984) of the European Union's Horizon 2020 research and innovation programme for their support and time allowance.
{"language": "es", "license": "cc-by-4.0", "tags": ["multilingual", "bert"], "pipeline_tag": "fill-mask", "widget": [{"text": "\u00bfQu\u00e9 es la vida? Un [MASK]."}]}
flax-community/alberti-bert-base-multilingual-cased
null
[ "transformers", "pytorch", "jax", "joblib", "safetensors", "bert", "fill-mask", "multilingual", "es", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "es" ]
TAGS #transformers #pytorch #jax #joblib #safetensors #bert #fill-mask #multilingual #es #license-cc-by-4.0 #autotrain_compatible #endpoints_compatible #has_space #region-us
<div class="course-tip course-tip-orange bg-gradient-to-br dark:bg-gradient-to-r before:border-orange-500 dark:before:border-orange-800 from-orange-50 dark:from-gray-900 to-white dark:to-gray-950 border border-orange-50 text-orange-700 dark:text-gray-400"> <p><b>Update:</b> This model has been moved to <a href="URL where it will be maintained and updated. </p> </div> # ALBERTI ALBERTI is a set of two BERT-based multilingual model for poetry. One for verses and another one for stanzas. This model has been further trained with the PULPO corpus for verses using Flax, including training scripts. This is part of the Flax/Jax Community Week, organised by HuggingFace and TPU usage sponsored by Google. ## PULPO PULPO, the Prodigious Unannotated Literary Poetry Corpus, is a set of multilingual corpora of verses and stanzas with over 95M words. The following corpora has been downloaded using the Averell tool, developed by the POSTDATA team: ### Spanish - Disco v3 - Corpus of Spanish Golden-Age Sonnets - Corpus general de poesía lírica castellana del Siglo de Oro - Gongocorpus - source ### English - Eighteenth-Century Poetry Archive (ECPA) - For better for verse ### French - Métrique en Ligne - source ### Italian - Biblioteca italiana - source ### Czech - Corpus of Czech Verse ### Portuguese - Stichotheque Also, we obtained the following corpora from these sources: ### Spanish - URL - source ### English - A Gutenberg Poetry Corpus ### Arabic - Arabic Poetry dataset ### Chinese - THU Chinese Classical Poetry Corpus ### Finnish - SKVR ### German - TextGrid Poetry Corpus - source - German Rhyme Corpus ### Hungarian - verskorpusz ### Portuguese - Poems in Portuguese ### Russian - 19 000 Russian poems ## Team members - Álvaro Pérez (alvp) - Javier de la Rosa (versae) - Aitor Díaz (aitordiaz) - Elena González-Blanco - Salvador Ros (salva) ## Useful links - Community Week timeline - Community Week README - Community Week thread - Community Week channel - Masked Language Modelling example scripts - Model Repository ## Acknowledgments This project would not have been possible without the infrastructure and resources provided by HuggingFace and Google Cloud. Moreover, we want to thank POSTDATA Project (ERC-StG-679528) and the Computational Literary Studies Infrastructure (CLS INFRA No. 101004984) of the European Union's Horizon 2020 research and innovation programme for their support and time allowance.
[ "# ALBERTI\n\nALBERTI is a set of two BERT-based multilingual model for poetry. One for verses and another one for stanzas. This model has been further trained with the PULPO corpus for verses using Flax, including training scripts.\n\nThis is part of the\nFlax/Jax Community Week, organised by HuggingFace and TPU usage sponsored by Google.", "## PULPO\n\nPULPO, the Prodigious Unannotated Literary Poetry Corpus, is a set of multilingual corpora of verses and stanzas with over 95M words.\n\nThe following corpora has been downloaded using the Averell tool, developed by the POSTDATA team:", "### Spanish\n- Disco v3\n- Corpus of Spanish Golden-Age Sonnets\n- Corpus general de poesía lírica castellana del Siglo de Oro\n- Gongocorpus - source", "### English\n- Eighteenth-Century Poetry Archive (ECPA)\n- For better for verse", "### French\n- Métrique en Ligne - source", "### Italian\n- Biblioteca italiana - source", "### Czech\n- Corpus of Czech Verse", "### Portuguese\n- Stichotheque\n\nAlso, we obtained the following corpora from these sources:", "### Spanish \n- URL - source", "### English\n- A Gutenberg Poetry Corpus", "### Arabic\n- Arabic Poetry dataset", "### Chinese\n- THU Chinese Classical Poetry Corpus", "### Finnish\n- SKVR", "### German\n- TextGrid Poetry Corpus - source\n- German Rhyme Corpus", "### Hungarian\n- verskorpusz", "### Portuguese\n- Poems in Portuguese", "### Russian\n- 19 000 Russian poems", "## Team members\n\n- Álvaro Pérez (alvp)\n- Javier de la Rosa (versae)\n- Aitor Díaz (aitordiaz)\n- Elena González-Blanco\n- Salvador Ros (salva)", "## Useful links\n\n- Community Week timeline\n- Community Week README\n- Community Week thread\n- Community Week channel\n- Masked Language Modelling example scripts\n- Model Repository", "## Acknowledgments\n\nThis project would not have been possible without the infrastructure and resources provided by HuggingFace and Google Cloud. Moreover, we want to thank POSTDATA Project (ERC-StG-679528) and the Computational Literary Studies Infrastructure (CLS INFRA No. 101004984) of the European Union's Horizon 2020 research and innovation programme for their support and time allowance." ]
[ "TAGS\n#transformers #pytorch #jax #joblib #safetensors #bert #fill-mask #multilingual #es #license-cc-by-4.0 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# ALBERTI\n\nALBERTI is a set of two BERT-based multilingual model for poetry. One for verses and another one for stanzas. This model has been further trained with the PULPO corpus for verses using Flax, including training scripts.\n\nThis is part of the\nFlax/Jax Community Week, organised by HuggingFace and TPU usage sponsored by Google.", "## PULPO\n\nPULPO, the Prodigious Unannotated Literary Poetry Corpus, is a set of multilingual corpora of verses and stanzas with over 95M words.\n\nThe following corpora has been downloaded using the Averell tool, developed by the POSTDATA team:", "### Spanish\n- Disco v3\n- Corpus of Spanish Golden-Age Sonnets\n- Corpus general de poesía lírica castellana del Siglo de Oro\n- Gongocorpus - source", "### English\n- Eighteenth-Century Poetry Archive (ECPA)\n- For better for verse", "### French\n- Métrique en Ligne - source", "### Italian\n- Biblioteca italiana - source", "### Czech\n- Corpus of Czech Verse", "### Portuguese\n- Stichotheque\n\nAlso, we obtained the following corpora from these sources:", "### Spanish \n- URL - source", "### English\n- A Gutenberg Poetry Corpus", "### Arabic\n- Arabic Poetry dataset", "### Chinese\n- THU Chinese Classical Poetry Corpus", "### Finnish\n- SKVR", "### German\n- TextGrid Poetry Corpus - source\n- German Rhyme Corpus", "### Hungarian\n- verskorpusz", "### Portuguese\n- Poems in Portuguese", "### Russian\n- 19 000 Russian poems", "## Team members\n\n- Álvaro Pérez (alvp)\n- Javier de la Rosa (versae)\n- Aitor Díaz (aitordiaz)\n- Elena González-Blanco\n- Salvador Ros (salva)", "## Useful links\n\n- Community Week timeline\n- Community Week README\n- Community Week thread\n- Community Week channel\n- Masked Language Modelling example scripts\n- Model Repository", "## Acknowledgments\n\nThis project would not have been possible without the infrastructure and resources provided by HuggingFace and Google Cloud. Moreover, we want to thank POSTDATA Project (ERC-StG-679528) and the Computational Literary Studies Infrastructure (CLS INFRA No. 101004984) of the European Union's Horizon 2020 research and innovation programme for their support and time allowance." ]
text2text-generation
transformers
# arabic-t5-small This is a T5v1.1 (small) trained on the concatenation of the Arabic Billion Words corpus and the Arabic subsets of the mC4 and Oscar datasets. The model could only be trained for about `10%` of the whole dataset due to time limitations. This is equivalent to `22'000` steps or about `4.3` Billion tokens. ## Training parameters | | | | :-------------------: | :-----------: | | Training batch size | `384` | | Evaluation batch size | `768` | | learning rate | `1e-2` | | dtype | `jnp.float32` | ## Preprocessing and the tokenizer We tried to keep the preprocessing to a bare minimum. We only replaced URLs, emails and social media user mentions with fixed tokens. Contrary to other pretrained Arabic LMs, we decided to not strip the Arabic diacritics and to keep them part of the vocabulary. The tokenizer was trained on `5%` of the training set, with a vocabulary size of `64'000`. For more details about preprocessing, check the [tokenizer code](https://huggingface.co/flax-community/arabic-t5-small/blob/main/t5_tokenizer_model.py) ## Data The model was trained on the concatenation of the Arabic Billion Words corpus and the Arabic subsets of the mC4 and Oscar datasets. A random `0.1%` subset of the data was reserved for evaluation and the rest for training. ## Results | | | | :-----------------: | :-----------: | | Evaluation accuracy | `56.84%` | | Evaluation Loss | `2.423` | | Training Loss | `2.392` | | Training Time | `22h 23m 51s` | ## Note for finetuning This model was pretrained with dropout turned off, so the default `dropout_rate` in the model config is `0`. To finetune the model dropout should be turned be back on, like this: ```python model = T5ForConditionalGeneration.from_pretrained("flax-community/arabic-t5-small", dropout_rate=0.1) ``` or, ```python model = AutoModelForSeq2SeqLM.from_pretrained("flax-community/arabic-t5-small", dropout_rate=0.1) ```
{"language": ["ar"], "datasets": ["mc4", "oscar", "arabic_billion_words"]}
flax-community/arabic-t5-small
null
[ "transformers", "pytorch", "tf", "jax", "tensorboard", "safetensors", "t5", "text2text-generation", "ar", "dataset:mc4", "dataset:oscar", "dataset:arabic_billion_words", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "ar" ]
TAGS #transformers #pytorch #tf #jax #tensorboard #safetensors #t5 #text2text-generation #ar #dataset-mc4 #dataset-oscar #dataset-arabic_billion_words #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
arabic-t5-small =============== This is a T5v1.1 (small) trained on the concatenation of the Arabic Billion Words corpus and the Arabic subsets of the mC4 and Oscar datasets. The model could only be trained for about '10%' of the whole dataset due to time limitations. This is equivalent to '22'000' steps or about '4.3' Billion tokens. Training parameters ------------------- Preprocessing and the tokenizer ------------------------------- We tried to keep the preprocessing to a bare minimum. We only replaced URLs, emails and social media user mentions with fixed tokens. Contrary to other pretrained Arabic LMs, we decided to not strip the Arabic diacritics and to keep them part of the vocabulary. The tokenizer was trained on '5%' of the training set, with a vocabulary size of '64'000'. For more details about preprocessing, check the tokenizer code Data ---- The model was trained on the concatenation of the Arabic Billion Words corpus and the Arabic subsets of the mC4 and Oscar datasets. A random '0.1%' subset of the data was reserved for evaluation and the rest for training. Results ------- Note for finetuning ------------------- This model was pretrained with dropout turned off, so the default 'dropout\_rate' in the model config is '0'. To finetune the model dropout should be turned be back on, like this: or,
[]
[ "TAGS\n#transformers #pytorch #tf #jax #tensorboard #safetensors #t5 #text2text-generation #ar #dataset-mc4 #dataset-oscar #dataset-arabic_billion_words #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
text2text-generation
transformers
# bengali-t5-base **bengali-t5-base** is a model trained on the Bengali portion of MT5 dataset. We used the `T5-base` model for this model. [Flax/Jax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), organized by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google. The model is trained on around ~11B tokens (64 size batch, 512 tokens, 350k steps). ## load tokenizer ``` >>> tokenizer = transformers.AutoTokenizer.from_pretrained("flax-community/bengali-t5-base") >>> tokenizer.encode("আমি বাংলার গান গাই") >>> tokenizer.decode([93, 1912, 814, 5995, 3, 1]) ``` ``` [93, 1912, 814, 5995, 3, 1] 'আমি বাংলার গান গাই </s>' ``` ## load model ``` >>> config = T5Config.from_pretrained("flax-community/bengali-t5-base") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("flax-community/bengali-t5-base", config=config) ``` The model is trained on `de-noising` objectives followed by the script [here](https://huggingface.co/flax-community/bengali-t5-base/blob/main/run_t5_mlm_flax.py) and [here](https://huggingface.co/flax-community/bengali-t5-base/blob/main/run.sh). Currently This model doesn't have any generation capability. If you want this model to have generation capability, please do a finetuning on `prefix-LM` objective mentioned in the [paper](https://arxiv.org/abs/1910.10683). See the tensorboard log in `Training metrics` tab. Please note that we haven't finetuned the model in any downstream task. ## Proposal - [Project Proposal](https://discuss.huggingface.co/t/pretrain-t5-from-scratch-in-bengali/7121) ## Participants - [Ibraheem Muhammad Moosa](https://huggingface.co/ibraheemmoosa) - [Tasnim Mohiuddin](https://huggingface.co/tasnim) - [Khalid Saifullah](https://huggingface.co/khalidsaifullaah) - [Tahsin Mayeesha](https://tahsin-mayeesha.github.io/) - [M Saiful Bari](https://huggingface.co/sbmaruf) ## Useful links - [Community Week timeline](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104#summary-timeline-calendar-6) - [Community Week README](https://github.com/huggingface/transformers/blob/master/examples/research_projects/jax-projects/README.md) - [Masked Language Modelling example scripts](https://github.com/huggingface/transformers/tree/master/examples/flax/language-modeling) - [Model Repository](https://huggingface.co/flax-community/roberta-base-als-demo)
{}
flax-community/bengali-t5-base
null
[ "transformers", "pytorch", "jax", "tensorboard", "safetensors", "mt5", "text2text-generation", "arxiv:1910.10683", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
[ "1910.10683" ]
[]
TAGS #transformers #pytorch #jax #tensorboard #safetensors #mt5 #text2text-generation #arxiv-1910.10683 #autotrain_compatible #endpoints_compatible #has_space #text-generation-inference #region-us
# bengali-t5-base bengali-t5-base is a model trained on the Bengali portion of MT5 dataset. We used the 'T5-base' model for this model. Flax/Jax Community Week, organized by HuggingFace and TPU usage sponsored by Google. The model is trained on around ~11B tokens (64 size batch, 512 tokens, 350k steps). ## load tokenizer ## load model The model is trained on 'de-noising' objectives followed by the script here and here. Currently This model doesn't have any generation capability. If you want this model to have generation capability, please do a finetuning on 'prefix-LM' objective mentioned in the paper. See the tensorboard log in 'Training metrics' tab. Please note that we haven't finetuned the model in any downstream task. ## Proposal - Project Proposal ## Participants - Ibraheem Muhammad Moosa - Tasnim Mohiuddin - Khalid Saifullah - Tahsin Mayeesha - M Saiful Bari ## Useful links - Community Week timeline - Community Week README - Masked Language Modelling example scripts - Model Repository
[ "# bengali-t5-base\n\nbengali-t5-base is a model trained on the Bengali portion of MT5 dataset. We used the 'T5-base' model for this model.\n\nFlax/Jax Community Week, organized by HuggingFace and TPU usage sponsored by Google.\n\nThe model is trained on around ~11B tokens (64 size batch, 512 tokens, 350k steps).", "## load tokenizer", "## load model\n\n\n\nThe model is trained on 'de-noising' objectives followed by the script here and here. Currently This model doesn't have any generation capability. If you want this model to have generation capability, please do a finetuning on 'prefix-LM' objective mentioned in the paper. \n\nSee the tensorboard log in 'Training metrics' tab.\n\nPlease note that we haven't finetuned the model in any downstream task.", "## Proposal\n- Project Proposal", "## Participants\n- Ibraheem Muhammad Moosa\n- Tasnim Mohiuddin\n- Khalid Saifullah\n- Tahsin Mayeesha\n- M Saiful Bari", "## Useful links\n- Community Week timeline\n- Community Week README\n- Masked Language Modelling example scripts\n- Model Repository" ]
[ "TAGS\n#transformers #pytorch #jax #tensorboard #safetensors #mt5 #text2text-generation #arxiv-1910.10683 #autotrain_compatible #endpoints_compatible #has_space #text-generation-inference #region-us \n", "# bengali-t5-base\n\nbengali-t5-base is a model trained on the Bengali portion of MT5 dataset. We used the 'T5-base' model for this model.\n\nFlax/Jax Community Week, organized by HuggingFace and TPU usage sponsored by Google.\n\nThe model is trained on around ~11B tokens (64 size batch, 512 tokens, 350k steps).", "## load tokenizer", "## load model\n\n\n\nThe model is trained on 'de-noising' objectives followed by the script here and here. Currently This model doesn't have any generation capability. If you want this model to have generation capability, please do a finetuning on 'prefix-LM' objective mentioned in the paper. \n\nSee the tensorboard log in 'Training metrics' tab.\n\nPlease note that we haven't finetuned the model in any downstream task.", "## Proposal\n- Project Proposal", "## Participants\n- Ibraheem Muhammad Moosa\n- Tasnim Mohiuddin\n- Khalid Saifullah\n- Tahsin Mayeesha\n- M Saiful Bari", "## Useful links\n- Community Week timeline\n- Community Week README\n- Masked Language Modelling example scripts\n- Model Repository" ]
fill-mask
transformers
## BERT base-uncased for in Swahili This model was trained using HuggingFace's Flax framework and is part of the [JAX/Flax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organized by [HuggingFace](https://huggingface.co). All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. ## How to use ```python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("flax-community/bert-base-uncased-swahili") model = AutoModelForMaskedLM.from_pretrained("flax-community/bert-base-uncased-swahili") print(round((model.num_parameters())/(1000*1000)),"Million Parameters") 110 Million Parameters ``` #### **Training Data**: This model was trained on [Swahili Safi](https://huggingface.co/datasets/flax-community/swahili-safi) #### **More Details**: For more details and Demo please check [HF Swahili Space](https://huggingface.co/spaces/flax-community/Swahili)
{"language": "sw", "datasets": ["flax-community/swahili-safi"], "widget": [{"text": "Si kila mwenye makucha [MASK] simba."}]}
flax-community/bert-base-uncased-swahili
null
[ "transformers", "pytorch", "jax", "tensorboard", "bert", "fill-mask", "sw", "dataset:flax-community/swahili-safi", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "sw" ]
TAGS #transformers #pytorch #jax #tensorboard #bert #fill-mask #sw #dataset-flax-community/swahili-safi #autotrain_compatible #endpoints_compatible #has_space #region-us
## BERT base-uncased for in Swahili This model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. ## How to use #### Training Data: This model was trained on Swahili Safi #### More Details: For more details and Demo please check HF Swahili Space
[ "## BERT base-uncased for in Swahili\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.", "## How to use", "#### Training Data:\nThis model was trained on Swahili Safi", "#### More Details:\nFor more details and Demo please check HF Swahili Space" ]
[ "TAGS\n#transformers #pytorch #jax #tensorboard #bert #fill-mask #sw #dataset-flax-community/swahili-safi #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "## BERT base-uncased for in Swahili\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.", "## How to use", "#### Training Data:\nThis model was trained on Swahili Safi", "#### More Details:\nFor more details and Demo please check HF Swahili Space" ]
text-classification
transformers
## Swahili News Classification with BERT This model was trained using HuggingFace's Flax framework and is part of the [JAX/Flax Community Week](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104) organized by [HuggingFace](https://huggingface.co). All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. This [model](https://huggingface.co/flax-community/bert-base-uncased-swahili) was used as the base and fine-tuned for this task. ## How to use ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("flax-community/bert-swahili-news-classification") model = AutoModelForSequenceClassification.from_pretrained("flax-community/bert-swahili-news-classification") ``` ``` Eval metrics (10% valid set): {'accuracy': 0.9114740008594757} ```
{"language": "sw", "datasets": ["flax-community/swahili-safi"], "widget": [{"text": "Idris ameandika kwenye ukurasa wake wa Instagram akimkumbusha Diamond kutekeleza ahadi yake kumpigia Zari magoti kumuomba msamaha kama alivyowahi kueleza awali.Idris ameandika;"}]}
flax-community/bert-swahili-news-classification
null
[ "transformers", "pytorch", "jax", "tensorboard", "safetensors", "bert", "text-classification", "sw", "dataset:flax-community/swahili-safi", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
[]
[ "sw" ]
TAGS #transformers #pytorch #jax #tensorboard #safetensors #bert #text-classification #sw #dataset-flax-community/swahili-safi #autotrain_compatible #endpoints_compatible #has_space #region-us
## Swahili News Classification with BERT This model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team. This model was used as the base and fine-tuned for this task. ## How to use
[ "## Swahili News Classification with BERT\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.\n\nThis model was used as the base and fine-tuned for this task.", "## How to use" ]
[ "TAGS\n#transformers #pytorch #jax #tensorboard #safetensors #bert #text-classification #sw #dataset-flax-community/swahili-safi #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "## Swahili News Classification with BERT\n\nThis model was trained using HuggingFace's Flax framework and is part of the JAX/Flax Community Week organized by HuggingFace. All training was done on a TPUv3-8 VM sponsored by the Google Cloud team.\n\nThis model was used as the base and fine-tuned for this task.", "## How to use" ]