modelId
stringlengths 4
81
| tags
list | pipeline_tag
stringclasses 17
values | config
dict | downloads
int64 0
59.7M
| first_commit
timestamp[ns, tz=UTC] | card
stringlengths 51
438k
|
---|---|---|---|---|---|---|
bert-base-cased-finetuned-mrpc
|
[
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 11,644 | 2023-05-03T09:48:37Z |
---
tags:
- autotrain
- vision
- image-classification
datasets:
- davanstrien/autotrain-data-imagein-hand
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
co2_eq_emissions:
emissions: 0.0029977062839944267
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 55028128547
- CO2 Emissions (in grams): 0.0030
## Validation Metrics
- Loss: 0.071
- Accuracy: 0.979
- Precision: 0.986
- Recall: 0.986
- AUC: 0.986
- F1: 0.986
|
bert-base-cased
|
[
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"en",
"dataset:bookcorpus",
"dataset:wikipedia",
"arxiv:1810.04805",
"transformers",
"exbert",
"license:apache-2.0",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8,621,271 | 2023-05-03T09:48:47Z |
---
tags:
- autotrain
- vision
- image-classification
datasets:
- davanstrien/autotrain-data-imagein-hand
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
co2_eq_emissions:
emissions: 0.0028690409445669206
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 55028128549
- CO2 Emissions (in grams): 0.0029
## Validation Metrics
- Loss: 0.066
- Accuracy: 0.979
- Precision: 0.979
- Recall: 0.993
- AUC: 0.996
- F1: 0.986
|
bert-base-chinese
|
[
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"zh",
"arxiv:1810.04805",
"transformers",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 3,377,486 | 2023-05-03T09:48:53Z |
---
tags:
- autotrain
- vision
- image-classification
datasets:
- davanstrien/autotrain-data-imagein-hand
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
co2_eq_emissions:
emissions: 0.9776478201913659
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 55028128550
- CO2 Emissions (in grams): 0.9776
## Validation Metrics
- Loss: 0.093
- Accuracy: 0.974
- Precision: 0.986
- Recall: 0.979
- AUC: 0.990
- F1: 0.982
|
bert-base-german-cased
|
[
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"de",
"transformers",
"exbert",
"license:mit",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 175,983 | 2023-05-03T09:48:59Z |
---
tags:
- autotrain
- vision
- image-classification
datasets:
- davanstrien/autotrain-data-imagein-hand
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
co2_eq_emissions:
emissions: 1.0929097724608432
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 55028128551
- CO2 Emissions (in grams): 1.0929
## Validation Metrics
- Loss: 0.069
- Accuracy: 0.979
- Precision: 0.983
- Recall: 0.990
- AUC: 0.989
- F1: 0.986
|
bert-base-german-dbmdz-cased
|
[
"pytorch",
"jax",
"bert",
"fill-mask",
"de",
"transformers",
"license:mit",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 1,814 | 2023-05-03T09:49:04Z |
---
tags:
- autotrain
- vision
- image-classification
datasets:
- ImageIN/ImageIn_annotations
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
co2_eq_emissions:
emissions: 0.7314698226256887
---
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 55028128552
- CO2 Emissions (in grams): 0.7315
## Validation Metrics
- Loss: 0.040
- Accuracy: 0.989
- Precision: 0.993
- Recall: 0.993
- AUC: 0.993
- F1: 0.993
|
xlm-mlm-ende-1024
|
[
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"de",
"arxiv:1901.07291",
"arxiv:1910.09700",
"transformers",
"license:cc-by-nc-4.0",
"autotrain_compatible",
"has_space"
] |
fill-mask
|
{
"architectures": [
"XLMWithLMHeadModel"
],
"model_type": "xlm",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 287 | null |
---
language: en
tags:
- multivae
license: apache-2.0
---
### Downloading this model from the Hub
This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub`
```python
>>> from multivae.models import AutoModel
>>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name")
```
|
AAli/distilbert-base-uncased-finetuned-cola
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-03T12:49:28Z |
---
language:
- es
license: gpl-3.0
tags:
- generated_from_trainer
model-index:
- name: xi-ciai-cba-martin-fierro
results: []
widget:
- text: "Aqui me pongo a cantar"
example_title: "Martin Fierro"
---
Hugging Face: IA Colaborativa
=============================
En este repositorio estará disponible el código y modelo que entrené para la
charla "Hugging Face: IA Colaborativa"
del [XI Congreso de Innovación, Ambiente e
Ingeniería](https://fcefyn.unc.edu.ar/facultad/secretarias/extension/prosecretaria-de-desarrollo-sostenible/xi-congreso-de-innovacion-ambiente-e-ingenieria/),
de la Facultad de Ciencias Exactas, Físicas y Naturales, de la Universidad
Nacional de Córdoba, Argentina, en 2023.
Para inicializar el setup hace falta tener instalado y activado
[`git-lfs`](https://git-lfs.com/).
Pueden clonar el repositorio con:
$ git clone https://huggingface.co/crscardellino/xi-ciai-cba-martin-fierro
Y luego crean el entorno e instalan los requerimientos.
$ python -m venv venv
$ source ./venv/bin/activate
(venv) $ pip install -r requirements.txt
El código está probado con Python 3.10, pero debería funcionar con Python >=
3.8. En los requerimientos está organizado para instalar
[PyTorch](https://pytorch.org/) v2.0.0 para cpu, pero pueden ajustarlo para
utilizar GPUs suponiendo que cumplan los requerimientos de CUDA.
## Model Specifications (Auto Generated)
This model is a fine-tuned version of
[DeepESP/gpt2-spanish](https://huggingface.co/DeepESP/gpt2-spanish) on the
`./data/martin-fierro_train.txt` dataset. It achieves the following results on
the evaluation set:
- Loss: 3.9067
## Model description
GPT-2 model finetuned on the poem ["El Gaucho Martin
Fierro"](https://es.wikipedia.org/wiki/El_Gaucho_Mart%C3%ADn_Fierro)
## Intended uses & limitations
This was trained for the talk "Hugging Face: IA Colaborativa" @ [XI Congreso de
Innovación, Ambiente e
Ingeniería](https://fcefyn.unc.edu.ar/facultad/secretarias/extension/prosecretaria-de-desarrollo-sostenible/xi-congreso-de-innovacion-ambiente-e-ingenieria/),
Argentina, 2023.
## Training and evaluation data
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 4.3864 | 1.0 | 18 | 4.2025 |
| 3.948 | 2.0 | 36 | 4.0440 |
| 3.7962 | 3.0 | 54 | 3.9804 |
| 3.6105 | 4.0 | 72 | 3.9458 |
| 3.4444 | 5.0 | 90 | 3.9280 |
| 3.3855 | 6.0 | 108 | 3.9192 |
| 3.3142 | 7.0 | 126 | 3.9091 |
| 3.2192 | 8.0 | 144 | 3.9074 |
| 3.1615 | 9.0 | 162 | 3.9070 |
| 3.1637 | 10.0 | 180 | 3.9067 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cpu
- Datasets 2.11.0
- Tokenizers 0.13.3
|
AJ/rick-discord-bot
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational",
"humor"
] |
conversational
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | 2023-05-03T13:22:40Z |
---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# gszabo/distiluse-base-multilingual-cased-v2-epoch30
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('gszabo/distiluse-base-multilingual-cased-v2-epoch30')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=gszabo/distiluse-base-multilingual-cased-v2-epoch30)
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 939 with parameters:
```
{'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.MegaBatchMarginLoss.MegaBatchMarginLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 10000,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
(2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
)
```
## Citing & Authors
<!--- Describe where people can find more information -->
|
AbidHasan95/movieHunt2
|
[
"pytorch",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
] |
token-classification
|
{
"architectures": [
"DistilBertForTokenClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | null |
---
license: mit
---
Experimental proof of concept made for the [Huggingface JAX/Diffusers community sprint](https://github.com/huggingface/community-events/tree/main/jax-controlnet-sprint)
[Demo available here](https://huggingface.co/spaces/Cognomen/CatCon-Controlnet-WD-1-5-b2)
This is a controlnet for the Stable Diffusion checkpoint [Waifu Diffusion 1.5 beta 2](https://huggingface.co/waifu-diffusion/wd-1-5-beta2) which aims to guide image generation by conditioning outputs with patches of images from a common category of the training target examples. The current checkpoint has been trained for approx. 100k steps on a filtered subset of [Danbooru 2021](https://gwern.net/danbooru2021) using artists as the conditioned category with the aim of learning robust style transfer from an image example.
Major limitations:
- The current checkpoint was trained on 768x768 crops without aspect ratio checkpointing. Loss in coherence for non-square aspect ratios can be expected.
- The training dataset is extremely noisy and used without filtering stylistic outliers from within each category, so performance may be less than ideal. A more diverse dataset with a larger variety of styles and categories would likely have better performance.
- The Waifu Diffusion base model is a hybrid anime/photography model, and can unpredictably jump between those modalities.
- As styling is sensitive to divergences in model checkpoints, the capabilities of this controlnet are not expected to predictably apply to other SD 2.X checkpoints.
Waifu Diffusion 1.5 beta 2 is licensed under [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/). This controlnet imposes no restrictions beyond the MIT license, but it cannot be used independently of a base model.
|
AccurateIsaiah/DialoGPT-small-mozarkv2
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] |
conversational
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 12 | null |
---
license: mit
datasets:
- ccdv/govreport-summarization
- urialon/gov_report_validation
- urialon/gov_report_test
pipeline_tag: text2text-generation
---
Baseline model for the preprint [Unlimiformer: Long-Range Transformers with Unlimited Length Input](https://arxiv.org/abs/2305.01625)
This is a BART-base model finetuned as a baseline. The model was finetuned on GovReport using the data processing pipeline from SLED; to load the validation or test set for use with these model, please use the datasets [urialon/gov_report_validation](https://huggingface.co/datasets/urialon/gov_report_validation) and [urialon/gov_report_test](https://huggingface.co/datasets/urialon/gov_report_test).
|
AdapterHub/bert-base-uncased-pf-cq
|
[
"bert",
"en",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering",
"adapterhub:qa/cq"
] |
question-answering
|
{
"architectures": null,
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
library_name: "transformers.js"
---
https://huggingface.co/MBZUAI/LaMini-T5-223M with ONNX weights to be compatible with Transformers.js.
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|
AdapterHub/roberta-base-pf-quoref
|
[
"roberta",
"en",
"dataset:quoref",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering"
] |
question-answering
|
{
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# hli/lstm-qqp-scomp-sentence-transformer
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 2048 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('hli/lstm-qqp-scomp-sentence-transformer')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=hli/lstm-qqp-scomp-sentence-transformer)
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 2813 with parameters:
```
{'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters:
```
{'scale': 20.0, 'similarity_fct': 'cos_sim'}
```
Parameters of the fit()-Method:
```
{
"epochs": 10,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 2813,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): WordEmbeddings(
(emb_layer): Embedding(400001, 300)
)
(1): LSTM(
(encoder): LSTM(300, 1024, batch_first=True, bidirectional=True)
)
(2): Pooling({'word_embedding_dimension': 2048, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': True, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information -->
|
Adielcane/Adiel
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-03T16:50:53Z |
---
license: creativeml-openrail-m
---
https://civitai.com/models/56751/ibaraki-kasen-touhou-project
|
Ahmadatiya97/Alannah
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: prova
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# prova
This model is a fine-tuned version of [bert-base-multilingual-uncased](https://huggingface.co/bert-base-multilingual-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5444
- F1 Score: 0.8762
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| No log | 1.0 | 369 | 0.4424 | 0.7742 |
| No log | 2.0 | 738 | 0.5326 | 0.7994 |
| No log | 3.0 | 1107 | 0.4969 | 0.8681 |
| No log | 4.0 | 1476 | 0.5444 | 0.8762 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Amalq/distilroberta-base-finetuned-anxiety-depression
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: creativeml-openrail-m
base_model: stabilityai/stable-diffusion-2-1-base
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- controlnet
inference: true
---
# controlnet-rgres/modelout
These are controlnet weights trained on stabilityai/stable-diffusion-2-1-base with new type of conditioning.
This model is trained for less steps than the base AerialDreams ControlNet. This allow for easier stylization even though the resulting images are less constrained by the segmentation.
|
aisoftware/Loquela
|
[
"onnx"
] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
tags:
- image-classification
- pytorch
- huggingpics
metrics:
- accuracy
model-index:
- name: FishTreeRock_Classifier_v1
results:
- task:
name: Image Classification
type: image-classification
metrics:
- name: Accuracy
type: accuracy
value: 0.9850746393203735
---
# FishTreeRock_Classifier_v1
Autogenerated by HuggingPics🤗🖼️
Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb).
Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics).
## Example Images
#### fish

#### rock

#### tree

|
AmirServi/MyModel
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-03T20:48:14Z |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilgpt2-finetuned-wikitext2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilgpt2-finetuned-wikitext2
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.6421
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.7602 | 1.0 | 2334 | 3.6669 |
| 3.653 | 2.0 | 4668 | 3.6472 |
| 3.6006 | 3.0 | 7002 | 3.6421 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Amirosein/roberta
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | null |
---
library_name: stable-baselines3
tags:
- SpaceInvadersNoFrameskip-v4
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: DQN
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: SpaceInvadersNoFrameskip-v4
type: SpaceInvadersNoFrameskip-v4
metrics:
- type: mean_reward
value: 599.50 +/- 212.67
name: mean_reward
verified: false
---
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4**
This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3)
and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo).
The RL Zoo is a training framework for Stable Baselines3
reinforcement learning agents,
with hyperparameter optimization and pre-trained agents included.
## Usage (with SB3 RL Zoo)
RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
SB3: https://github.com/DLR-RM/stable-baselines3<br/>
SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
Install the RL Zoo (with SB3 and SB3-Contrib):
```bash
pip install rl_zoo3
```
```
# Download model and save it into the logs/ folder
python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga alesthehuman -f logs/
python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
```
If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
```
python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga alesthehuman -f logs/
python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
```
## Training (with the RL Zoo)
```
python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
# Upload the model and generate video (when possible)
python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga alesthehuman
```
## Hyperparameters
```python
OrderedDict([('batch_size', 32),
('buffer_size', 100000),
('env_wrapper',
['stable_baselines3.common.atari_wrappers.AtariWrapper']),
('exploration_final_eps', 0.01),
('exploration_fraction', 0.1),
('frame_stack', 4),
('gradient_steps', 1),
('learning_rate', 0.0001),
('learning_starts', 100000),
('n_timesteps', 1000000.0),
('optimize_memory_usage', False),
('policy', 'CnnPolicy'),
('target_update_interval', 1000),
('train_freq', 4),
('normalize', False)])
```
|
Amit29/t5-small-finetuned-xsum
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
tags:
- spacy
- token-classification
language:
- es
model-index:
- name: es_pipeline
results:
- task:
name: NER
type: token-classification
metrics:
- name: NER Precision
type: precision
value: 0.998766394
- name: NER Recall
type: recall
value: 0.9988961039
- name: NER F Score
type: f_score
value: 0.9988312447
---
| Feature | Description |
| --- | --- |
| **Name** | `es_pipeline` |
| **Version** | `0.0.0` |
| **spaCy** | `>=3.5.2,<3.6.0` |
| **Default Pipeline** | `transformer`, `ner` |
| **Components** | `transformer`, `ner` |
| **Vectors** | 0 keys, 0 unique vectors (0 dimensions) |
| **Sources** | n/a |
| **License** | n/a |
| **Author** | [n/a]() |
### Label Scheme
<details>
<summary>View label scheme (13 labels for 1 components)</summary>
| Component | Labels |
| --- | --- |
| **`ner`** | `BILLING_PERIOD_END`, `BILLING_PERIOD_START`, `BILL_OWNER`, `COMPANY_NAME`, `CUPS`, `DIRECTION`, `ENERGY_P1_PRICE`, `ENERGY_P2_PRICE`, `ENERGY_P3_PRICE`, `NIF`, `POWER_P1_PRICE`, `POWER_P2_PRICE`, `TOTAL_IMPORTE` |
</details>
### Accuracy
| Type | Score |
| --- | --- |
| `ENTS_F` | 99.88 |
| `ENTS_P` | 99.88 |
| `ENTS_R` | 99.89 |
| `TRANSFORMER_LOSS` | 6425.46 |
| `NER_LOSS` | 41888.91 |
|
AmitT/test
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-SoccerTwos
library_name: ml-agents
---
# **poca** Agent playing **SoccerTwos**
This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos
2. Step 1: Write your model_id: kreepy/poca-SoccerTwos
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play 👀
|
Amitabh/doc-classification
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
# Github Repository
- https://github.com/conceptofmind/PaLM
## Huggingface (WIP)
Huggingface integration is a work in progress. Please check out the Github repository.
## Acknowledgements
- <a href="https://github.com/CarperAI">CarperAI</a>, <a href="https://twitter.com/lcastricato">Louis Castricato</a>, and <a href="https://stability.ai/">Stability.ai</a> for the very generous sponsorship to work on machine learning research.
- <a href="https://github.com/lucidrains">Phil Wang (Lucidrains)</a> for his inspiring work and input on training and architectures.
- <a href="https://twitter.com/dmayhem93">Dakota ("He berk reacted once")</a>, <a href="https://twitter.com/jonbtow">Guac</a>, <a href="https://twitter.com/zach_nussbaum">Zach</a>, and <a href="">Aman</a> for providing information about Huggingface and Slurm. I typically only use Apex and DeepSpeed.
## FAQ
Three different size PaLM models (150m, 410m, 1b) have been trained with 8k context length on all of <a href="https://huggingface.co/datasets/c4">C4</a>. The models are compatible with Lucidrain's <a href="https://github.com/lucidrains/toolformer-pytorch">Toolformer-pytorch</a>, <a href="https://github.com/lucidrains/PaLM-pytorch">PaLM-pytorch</a>, and <a href="https://github.com/lucidrains/PaLM-rlhf-pytorch">PaLM-rlhf-pytorch</a>. A fourth 2b model is currently being trained. These are currently the baseline versions of the models and additional training will be done at a larger scale. All of the models will be further instruction-tuned on FLAN to provide flan-PaLM models.
The models were trained with <a href="https://github.com/HazyResearch/flash-attention">Flash Attention</a>, <a href="https://arxiv.org/abs/2212.10554">Xpos Rotary Embeddings</a> for better length extrapolation, and <a href="https://arxiv.org/abs/1911.02150">multi-query single-key-value attention</a> for more efficient decoding. The models have been uploaded to Torch hub and the files are additionally stored on the Huggingface hub. You can find the model each of the PyTorch model files here: <a href="https://huggingface.co/conceptofmind/palm-150m">PaLM-150m</a>, <a href="https://huggingface.co/conceptofmind/palm-410m">PaLM-410m</a>, <a href="https://huggingface.co/conceptofmind/palm-1b">PaLM-1b</a>. If the models are not downloading from Torch hub correctly be sure to clear out the checkpoint and model folders in `.cache/torch/hub/`. If that still does not resolve the issue then you can download the files from the Huggingface repositories.
All of the training data has been pre-tokenized with the GPTNEOX tokenizer and blocked at sequence lengths of 8192. This will help to save the large cost of preprocessing data. The datasets are available on Huggingface in parquet format and chunks here: <a href="https://huggingface.co/datasets/conceptofmind/c4_0-to-20_neox_with_eos_8k">C4 Chunk 1</a>, <a href="https://huggingface.co/datasets/conceptofmind/c4_21-to-40_neox_with_eos_8k">C4 Chunk 2</a>, <a href="https://huggingface.co/datasets/conceptofmind/c4_41-to-60_neox_with_eos_8k">C4 Chunk 3</a>, <a href="https://huggingface.co/datasets/conceptofmind/c4_61-to-80_neox_with_eos_8k">C4 Chunk 4</a>, and <a href="https://huggingface.co/datasets/conceptofmind/c4_81-to-100_neox_with_eos_8k">C4 Chunk 5</a>. There is also another option in the distributed training script to not used the provided pre-tokenized C4 dataset and instead load and process another dataset such as openwebtext.
|
Amro-Kamal/gpt
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
language:
- te
license: apache-2.0
tags:
- whisper-event
- generated_from_trainer
model-index:
- name: Whisper Medium Telugu - Polapragada Yashwant
results: []
datasets:
- parambharat/telugu_asr_corpus
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Whisper Medium Telugu - Polapragada Yashwant
This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the telugu-asr-corpus dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2
- training_steps: 52
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.28.0
- Pytorch 2.0.0
- Datasets 2.12.1.dev0
- Tokenizers 0.13.3
|
Amrrs/south-indian-foods
|
[
"pytorch",
"tensorboard",
"vit",
"image-classification",
"transformers",
"huggingpics",
"model-index",
"autotrain_compatible"
] |
image-classification
|
{
"architectures": [
"ViTForImageClassification"
],
"model_type": "vit",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 21 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: mymodel
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mymodel
This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3705
- Rouge1: 1.762
- Rouge2: 1.4938
- Rougel: 1.7366
- Rougelsum: 1.7385
- Gen Len: 19.7335
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|
| 1.446 | 1.0 | 12500 | 1.3705 | 1.762 | 1.4938 | 1.7366 | 1.7385 | 19.7335 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Ana1315/A
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
tags:
- image-classification
- pytorch
- huggingpics
metrics:
- accuracy
model-index:
- name: SkiingTest
results:
- task:
name: Image Classification
type: image-classification
metrics:
- name: Accuracy
type: accuracy
value: 0.7666666507720947
---
# SkiingTest
Autogenerated by HuggingPics🤗🖼️
Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb).
Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics).
## Example Images
#### Big Mountain

#### Freeride World Tour

#### Freestyle

#### Skiing

|
Ana1315/ana
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: bert-base-uncased-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.49971547639767977
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-finetuned-cola
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4689
- Matthews Correlation: 0.4997
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.4971 | 1.0 | 535 | 0.4689 | 0.4997 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Anamika/autonlp-Feedback1-479512837
|
[
"pytorch",
"xlm-roberta",
"text-classification",
"unk",
"dataset:Anamika/autonlp-data-Feedback1",
"transformers",
"autonlp",
"co2_eq_emissions"
] |
text-classification
|
{
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 34 | null |
---
library_name: sample-factory
tags:
- deep-reinforcement-learning
- reinforcement-learning
- sample-factory
model-index:
- name: APPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: doom_health_gathering_supreme
type: doom_health_gathering_supreme
metrics:
- type: mean_reward
value: 11.46 +/- 4.74
name: mean_reward
verified: false
---
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
## Downloading the model
After installing Sample-Factory, download the model with:
```
python -m sample_factory.huggingface.load_from_hub -r andli28/rl_course_vizdoom_health_gathering_supreme
```
## Using the model
To run the model after download, use the `enjoy` script corresponding to this environment:
```
python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
```
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
## Training with this model
To continue training with this model, use the `train` script corresponding to this environment:
```
python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
```
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
Anders/itu-ams-summa
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
library_name: "transformers.js"
---
https://huggingface.co/facebook/detr-resnet-101 with ONNX weights to be compatible with Transformers.js.
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|
AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_wikiqa_copy
|
[
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] |
feature-extraction
|
{
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | 2023-05-04T04:57:35Z |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad_v2
model-index:
- name: bert-base-multilingual-cased1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-multilingual-cased1
This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on the squad_v2 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3467
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.1335 | 1.0 | 1547 | 1.0084 |
| 0.7574 | 2.0 | 3094 | 0.9890 |
| 0.5211 | 3.0 | 4641 | 1.0139 |
| 0.335 | 4.0 | 6188 | 1.1702 |
| 0.2465 | 5.0 | 7735 | 1.3467 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | null |
---
license: mit
language:
- id
---
# Nondeterministic Context (NDETC) Stemmer
**This is Kaenova's maintained NDETCStemmer, a fork from the [original](https://github.com/alifadwitiyap/NDETCStemmer). I upload the model to Huggingface for model reliability.**
Nondeterministic Context (NDETC) Stemmer adalah library yang mengimplementasikan metode stemming nondeterministic berbasis context untuk memecahkan permasalahan kata-kata ambigu (bermakna lebih dari satu) morfologis pada proses stemming kata dalam bahasa Indonesia.
## Installation
Untuk menginstall library ini diperlukan pip python yang dapat anda download dengan mengikuti link [berikut](https://pip.pypa.io/en/stable/installation/#).
Kemudian anda dapat menjalankan kode berikut pada terminal yang anda gunakan
```bash
pip install NDETCStemmer-kaenova
```
## Usage
Setelah menginstall library ini, anda dapat menggunakannya dengan membuat file baru di dalam folder yang sama dengan NDETCStemmer_IO.py atau dengan cara mengubah file NDETCStemmer_IO.py dengan mengikuti baris code berikut sebagai percobaan:
```python
#import NDETCStemmer library
from NDETCStemmer import NDETCStemmer
#init stemmer
stemmer=NDETCStemmer()
# stemming process
output=stemmer.stem('boleh saya memerah lembu ini')
print(output)
#boleh saya perah lembu ini
print(stemmer.stem('bibirnya memerah tangannya jadi selengket madu'))
#bibir merah tangan jadi lengket madu
```
## Cititation
```
@INPROCEEDINGS{9617514,
author={Bunyamin and Huda, Arief Fatchul and Suryani, Arie Ardiyanti},
booktitle={2021 International Conference on Data Science and Its Applications (ICoDSA)},
title={Indonesian Stemmer for Ambiguous Word based on Context},
year={2021},
volume={},
number={},
pages={1-9},
doi={10.1109/ICoDSA53588.2021.9617514}}
```
## Tentang Nondeterministic Context Stemmer
Merupakan stemmer yang dikembangkan oleh <a href="https://ieeexplore.ieee.org/document/9617514">Bunyamin et al.</a> yang merupakan penelitian lanjutan dari pendekatan nondeterministic yang diusulkan oleh <a href="https://ieeexplore.ieee.org/document/6021829">Purwarianti</a>. Dalam penelitian Purwarianti, setiap kata tidak diperiksa menurut urutan aturan morfologi, tetapi diperiksa menggunakan semua aturan. Kemudian, hasilnya disimpan satu per satu dalam daftar kandidat kata. Kata akhir akan dipilih menggunakan beberapa aturan heuristik, yaitu ketersediaan kosakata dari kata dasar khusus dan panjang kata. </br> </br>Masalah yang dihadapi oleh metode nondeterministic yang dikembangkan Purwarianti ini dan stemmer deterministic (<a href="https://dl.acm.org/doi/10.1145/1316457.1316459">Andiani et al.</a>) adalah masalah ambiguitas kata yang dihasilkan oleh stemmer. Misalkan kata "memalukan" mempunyai 2 kata dasar, yaitu “malu” dan “palu” , tergantung konteksnya. Pada pernyataan-pernyataan berikut “dia tidak ingin memalukan keluarganya” dan “tukang memalukan paku di tembok” kata ambigu "memalukan" akan menghasilkan kata dasar "malu" secara terus-menerus. Berdasarkan konteksnya, hasilnya seharusnya menjadi "malu" di kalimat pertama dan "palu" di kalimat kedua. Nondeterministic stemmer dari Purwarianti menghasilkan beberapa alternatif kandidat kata dari kata-kata ambigu tersebut, tetapi memiliki kelemahan dalam memilih hasil yang tepat, karena ketiadaan konteks. </br></br>Nondeterministic Context Stemmer memperbaiki pendekatan nondeterministik itu dengan menambahkan konteks dalam pemilihan kata terbaik. Dalam menyelesaikan masalah pemilihan kata terbaik untuk setiap masukan kata ambigu, diusulkan penggunaan model word2vec. Dengan cara ini stemmer akan lebih akurat dalam melakukan stemming dibandingkan dengan cara-cara sebelumnya.
#### Kelebihan
NDETC stemmer mampu menstemming kata ambigu, kata reduplikasi, dan kata majemuk dengan imbuhan. Namun, kualitas stemmer tergantung pada pemeriksa aturan afiks, model kata, kamus kata dasar, dan konteksnya. Berikut beberapa contoh kelebihan nondeterministc context stemmer (NDETC) dibandingkan deterministic stemmer (DET):
- Input: kalau pandai <b>menggulai</b>, badar jadi tenggiri, output (NDETC): kalau pandai <b>gulai</b> badar jadi tenggiri. Output (DET): kalau pandai <b>gulai</b> badar jadi tenggiri
- Input: ibu <b>menggulai</b> kopi. Output (NDETC): ibu <b>gula</b> kopi. Output (DET): ibu <b>gulai</b> kopi
- Input: <b>Selangkah</b> lagi, Pedrosa jadi pembalap tes KTM. Output (NDETC): <b>langkah</b> lagi pedrosa jadi balap tes ktm. Output (DET): <b>selang</b> lagi pedrosa jadi balap tes ktm
- Input: Indonesia memiliki <b>beribu-ribu</b> pulau. Output (NDETC): indonesia milik <b>ribu</b> pulau. Output (DET): indonesia milik <b>beribu-ribu</b> pulau
- Input: Kita harus <b>mempertanggungjawabkannya</b>. Output (NDETC): kita harus <b>tanggung jawab</b>. Output (DET): kita harus <b>mempertanggungjawabkannya</b>
- Input: pengampun. Output (NDETC): ampun. Output (DET): kam
- Input: membantah. Output (NDETC): bantah. Output (DET): ban
- Input: pemakalah. Output (NDETC): makalah. Output (DET): maka
- Input: berimanlah. Output (NDETC): iman. Output (DET): rim
- Input: berantai. Output (NDETC): rantai. Output (DET): beranta
- Input: berduri. Output (NDETC): duri. Output (DET): dur
- Input: peperangan. Output (NDETC): perang. Output (DET): peperangan
#### Kekurangan
- Aturan infiks -el-, -em-, -er-, dan -in- tidak digunakan dalam stemmer ini karena memiliki dampak signifikan terhadap semua proses stemmer.
- Konteks kata sebelum dan sesudah kata-kata ambigu morfologis seringkali tidak mendukung pemilihan kata-kata terbaik.
#### Penting
- Kualitas model kata hasil pelatihan word2vec mempengaruhi pemilihan kata-kata terbaik dalam kata-kata ambigu. Model kata dibuat menggunakan pelatihan word2vec dengan beberapa parameter. Beberapa parameter dalam membuat model harus dipilih dengan cermat dan hasilnya harus dibandingkan dari satu model ke model lainnya. Dalam stemmer ini terdapat model yang telah dilatih dengan menggunakan corpus wikipedia berbahasa Indonesia yang diunduh tanggal 2 November 2021.
- Kualitas kamus kata dasar mempengaruhi kualitas stemmer. Kamus kata dasar harus bebas dari kata berimbuhan.
## License
[MIT](https://choosealicense.com/licenses/mit/)
|
AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1
|
[
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] |
feature-extraction
|
{
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | null |
https://civitai.com/models/7706/shirt-tug-pose-lora
LORA model for shirt tug pose, suggested LORA weights: 0.5 ~ 1.5, default weight 1 should be good enough.
If the pose doesn't show up for some checkpoints, try greater weights.
Trigger words: shirt, naked shirt, shirt tug
|
AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1_squad2.0
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | null |
---
license: mit
tags:
- generated_from_keras_callback
model-index:
- name: gpt_image_clef2
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# gpt_image_clef2
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 1.2611
- Train Rouge: 0.4475
- Validation Loss: 1.1578
- Validation Rouge: 0.3944
- Epoch: 25
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 0.0005, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.0005, 'decay_steps': 2554800, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.99, 'epsilon': 0.2, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Rouge | Validation Loss | Validation Rouge | Epoch |
|:----------:|:-----------:|:---------------:|:----------------:|:-----:|
| 1.5255 | 0.4213 | 1.0251 | 0.4284 | 0 |
| 1.1805 | 0.4673 | 0.9779 | 0.4442 | 1 |
| 1.1394 | 0.4800 | 0.9561 | 0.4509 | 2 |
| 1.1168 | 0.4871 | 0.9369 | 0.4595 | 3 |
| 1.1036 | 0.4915 | 0.9314 | 0.4623 | 4 |
| 1.0971 | 0.4936 | 0.9283 | 0.4624 | 5 |
| 1.0946 | 0.4947 | 0.9315 | 0.4617 | 6 |
| 1.0962 | 0.4947 | 0.9323 | 0.4614 | 7 |
| 1.1001 | 0.4943 | 0.9405 | 0.4586 | 8 |
| 1.1065 | 0.4933 | 0.9501 | 0.4560 | 9 |
| 1.1146 | 0.4913 | 0.9614 | 0.4498 | 10 |
| 1.1240 | 0.4890 | 0.9726 | 0.4471 | 11 |
| 1.1341 | 0.4864 | 0.9852 | 0.4429 | 12 |
| 1.1451 | 0.4836 | 0.9982 | 0.4389 | 13 |
| 1.1564 | 0.4799 | 1.0160 | 0.4319 | 14 |
| 1.1680 | 0.4766 | 1.0273 | 0.4296 | 15 |
| 1.1793 | 0.4732 | 1.0405 | 0.4267 | 16 |
| 1.1901 | 0.4699 | 1.0556 | 0.4235 | 17 |
| 1.2007 | 0.4666 | 1.0692 | 0.4184 | 18 |
| 1.2108 | 0.4632 | 1.0796 | 0.4168 | 19 |
| 1.2207 | 0.4603 | 1.0998 | 0.4093 | 20 |
| 1.2299 | 0.4574 | 1.1135 | 0.4057 | 21 |
| 1.2386 | 0.4547 | 1.1297 | 0.4026 | 22 |
| 1.2469 | 0.4519 | 1.1396 | 0.4013 | 23 |
| 1.2540 | 0.4497 | 1.1467 | 0.3960 | 24 |
| 1.2611 | 0.4475 | 1.1578 | 0.3944 | 25 |
### Framework versions
- Transformers 4.28.1
- TensorFlow 2.10.1
- Datasets 2.11.0
- Tokenizers 0.13.3
|
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_10
|
[
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] |
feature-extraction
|
{
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | null |
Access to model hominpark/donut-base-hangul-handwritten-KMOU-10000 is restricted and you are not in the authorized list. Visit https://huggingface.co/hominpark/donut-base-hangul-handwritten-KMOU-10000 to ask for access.
|
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_squad2.0
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 4 | null |
---
license: other
---
Official Repository
Read more about this model here: https://civitai.com/models/53761/goofball-mix Also please support by giving 5 stars and a heart,
Also consider supporting me on Patreon
https://www.patreon.com/GoofyAi
I have huge collection of character Lora that i train myself.check my profile https://civitai.com/user/Goofy_Ai/models
|
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa_copy
|
[
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] |
feature-extraction
|
{
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | null |
# readme
saving some models i like. i will collect them here for using(downloading) easily.
## why
- [x] sometimes i want to use it but fogget where to download it.
## life guarantee statement
If there is infringement, please temporarily notify me and I will delete it.
my email: `ymc-github@gmail.com` or `yemiancheng1993@163.com`
|
AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 4 | null |
## 4-bit quantized weights (q4_0) of TRL-Lib's Stack LLaMa (based on Meta's 7B LLaMa model), to be used with llama.cpp
TRL-Lib's adapter:
https://huggingface.co/trl-lib/llama-7b-se-rl-peft
llama.cpp project:
https://github.com/ggerganov/llama.cpp
This is the full model weights of Meta's original model with TRL-Lib's adapter already applied, not the LoRA weights.
With this in mind, make sure you are complying with Meta's original licensing by applying for access at the following link.
https://ai.facebook.com/blog/large-language-model-llama-meta-ai/
Original HF LLaMa model:
https://huggingface.co/decapoda-research/llama-7b-hf
The weights bin file should be placed in the models/trl-lib subdirectory of llama.cpp.
This is a ggjt version of the model, the tensors are 32 byte aligned to allow mmap loading.
|
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_squad2.0
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: my_awesome_model
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# my_awesome_model
This model is a fine-tuned version of [boldirev-as/my_awesome_model](https://huggingface.co/boldirev-as/my_awesome_model) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1697
- Accuracy: 0.9727
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 0.0883 | 1.0 | 2500 | 0.0926 | 0.971 |
| 0.0737 | 2.0 | 5000 | 0.0857 | 0.9751 |
| 0.0375 | 3.0 | 7500 | 0.1289 | 0.9732 |
| 0.0142 | 4.0 | 10000 | 0.1569 | 0.9731 |
| 0.0002 | 5.0 | 12500 | 0.1697 | 0.9727 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0
- Datasets 2.1.0
- Tokenizers 0.13.3
|
AnonymousSub/unsup-consert-base_copy_wikiqa
|
[
"pytorch",
"bert",
"text-classification",
"transformers"
] |
text-classification
|
{
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 26 | null |
---
tags:
- generated_from_trainer
model-index:
- name: wav2vec2_bert_fusion_iemocap_3
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2_bert_fusion_iemocap_3
This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 4
- eval_batch_size: 16
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu117
- Datasets 2.11.0
- Tokenizers 0.13.2
|
AshtonBenson/DialoGPT-small-quentin
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T09:00:38Z |
---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 244.64 +/- 22.66
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Ayham/xlnet_roberta_new_summarization_cnn_dailymail
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: finetuned-bert-mrpc
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.8382352941176471
- name: F1
type: f1
value: 0.8877551020408163
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuned-bert-mrpc
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4588
- Accuracy: 0.8382
- F1: 0.8878
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| 0.579 | 1.0 | 230 | 0.4858 | 0.7745 | 0.8521 |
| 0.4163 | 2.0 | 460 | 0.4477 | 0.8088 | 0.8721 |
| 0.2533 | 3.0 | 690 | 0.4588 | 0.8382 | 0.8878 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Ayham/xlnet_roberta_summarization_cnn_dailymail
|
[
"pytorch",
"tensorboard",
"encoder-decoder",
"text2text-generation",
"dataset:cnn_dailymail",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] |
text2text-generation
|
{
"architectures": [
"EncoderDecoderModel"
],
"model_type": "encoder-decoder",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 257.12 +/- 15.74
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Ayham/xlnetgpt2_xsum7
|
[
"pytorch",
"tensorboard",
"encoder-decoder",
"text2text-generation",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] |
text2text-generation
|
{
"architectures": [
"EncoderDecoderModel"
],
"model_type": "encoder-decoder",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | 2023-05-04T10:49:24Z |
---
license: mit
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de-fr
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de-fr
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1625
- F1: 0.8550
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.291 | 1.0 | 715 | 0.1845 | 0.8249 |
| 0.1456 | 2.0 | 1430 | 0.1594 | 0.8449 |
| 0.0942 | 3.0 | 2145 | 0.1625 | 0.8550 |
### Framework versions
- Transformers 4.27.4
- Pytorch 2.0.0+cpu
- Datasets 2.11.0
- Tokenizers 0.13.3
|
Aymene/opus-mt-en-ro-finetuned-en-to-ro
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
tags:
- generated_from_trainer
model-index:
- name: prabigya_trained
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# prabigya_trained
This model was trained from scratch on the None dataset.
It achieves the following results on the evaluation set:
- eval_loss: 2.1115
- eval_runtime: 113.0
- eval_samples_per_second: 48.274
- eval_steps_per_second: 3.018
- epoch: 0.49
- step: 3000
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Ayoola/cdial-yoruba-test
|
[
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"transformers",
"has_space"
] |
automatic-speech-recognition
|
{
"architectures": [
"Wav2Vec2ForCTC"
],
"model_type": "wav2vec2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 25 | null |
---
language:
- en
pipeline_tag: conversational
tags:
- psychology
- dialogues
- empathy
- gpt2
---
## Model description
DialoGPT finetuned on empathetic dialogues
## Training data
It was trained on a large corpus of text, including some emotionally engaging datasets such as the "Facebook Empathetic Dialogues" dataset containing 25k conversations.
A dataset of 25k conversations grounded in emotional situations to facilitate training and evaluating dialogue systems.
You can find a dataset [here](https://www.kaggle.com/datasets/atharvjairath/empathetic-dialogues-facebook-ai).
### How to use
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("AliiaR/DialoGPT-medium-empathetic-dialogues")
>>> model = AutoModelForCausalLM.from_pretrained("AliiaR/DialoGPT-medium-empathetic-dialogues")
```
|
Ayoola/pytorch_model
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- wer
model-index:
- name: whisper_med_ar_aug
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# whisper_med_ar_aug
This model is a fine-tuned version of [Seyfelislem/whisper-medium-arabic-suite-II](https://huggingface.co/Seyfelislem/whisper-medium-arabic-suite-II) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1939
- Wer: 16.5040
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- training_steps: 400
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:-------:|
| 0.1721 | 0.17 | 400 | 0.1939 | 16.5040 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Ayran/DialoGPT-medium-harry-potter-1-through-4-plus-6
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] |
conversational
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 12 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-SoccerTwos
library_name: ml-agents
---
# **poca** Agent playing **SoccerTwos**
This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos
2. Step 1: Write your model_id: WilliamADSP/poca-SoccerTwos-v2
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play 👀
|
Ayu/Shiriro
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T11:00:13Z |
---
tags:
- image-classification
- pytorch
- huggingpics
metrics:
- accuracy
model-index:
- name: cats-vs-dogs
results:
- task:
name: Image Classification
type: image-classification
metrics:
- name: Accuracy
type: accuracy
value: 1.0
---
# cats-vs-dogs
Autogenerated by HuggingPics🤗🖼️
Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb).
Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics).
## Example Images
#### cat

#### dog

|
AyushPJ/ai-club-inductions-21-nlp-roBERTa-base-squad-v2
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | null |
---
license: mit
datasets:
- bigbio/chemdner
- ncbi_disease
- jnlpba
- bigbio/n2c2_2018_track2
- bigbio/bc5cdr
widget:
- text: Drug<SEP>He was given aspirin and paracetamol.
language:
- en
metrics:
- precision
- recall
- f1
pipeline_tag: token-classification
tags:
- token-classification
- biology
- medical
- zero-shot
- few-shot
library_name: transformers
---
# Zero and few shot NER for biomedical texts
## Model description
This model was created during the research collaboration between Bayer Pharma and Serbian Institute for Artificial Intelligence Research and Development.
The model is trained on about 25+ biomedical NER classes and can perform also zero-shot inference and can be further fine-tuned for new classes with just few examples (few-shot learning).
For more details about our methods please see the paper named ["A transformer-based method for zero and few-shot biomedical named entity recognition"](https://arxiv.org/abs/2305.04928). The model corresponds to BioBERT-based mode, trained with 1 in the first segment (check paper for more details).
Model takes as input two strings. String1 is NER label that is being searched in second string. String1 must be phrase for entity. String2 is short text where String1 is searched for semantically.
model outputs list of zeros and ones corresponding to the occurance of Named Entity and corresponing to the tokens(tokens given by transformer tokenizer) of the Sring2.
## Example of usage
```python
from transformers import AutoTokenizer
from transformers import BertForTokenClassification
modelname = 'ProdicusII/ZeroShotBioNER' # modelpath
tokenizer = AutoTokenizer.from_pretrained(modelname) ## loading the tokenizer of that model
string1 = 'Drug'
string2 = 'No recent antibiotics or other nephrotoxins, and no symptoms of UTI with benign UA.'
encodings = tokenizer(string1, string2, is_split_into_words=False,
padding=True, truncation=True, add_special_tokens=True, return_offsets_mapping=False,
max_length=512, return_tensors='pt')
model0 = BertForTokenClassification.from_pretrained(modelname, num_labels=2)
prediction_logits = model0(**encodings)
print(prediction_logits)
```
## Example of fine-tuning with few-shot learning
In order to fine-tune model to the new entity using few shots, the dataset needs to be transformed to torch.utils.data.Dataset, containing BERT tokens and set of 0s and 1s (1 is where the class is positive and should be predicted as the member of given NER class). After the dataset is created, the following can be done (for more details, please have a look at the code at GitHub - https://github.com/br-ai-ns-institute/Zero-ShotNER):
```python
training_args = TrainingArguments(
output_dir=os.path.join('Results', class_unseen, str(j)+'Shot'), # folder for results
num_train_epochs=10, # number of epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=16, # batch size for evaluation
weight_decay=0.01, # strength of weight decay
logging_dir=os.path.join('Logs', class_unseen, str(j)+'Shot'), # folder for logs
save_strategy='epoch',
evaluation_strategy='epoch',
load_best_model_at_end=True,
)
model0 = BertForTokenClassification.from_pretrained(model_path, num_labels=2)
trainer = Trainer(
model=model0, # pretrained model
args=training_args, # training artguments
train_dataset=dataset, # Object of class torch.utils.data.Dataset for training
eval_dataset=dataset_valid # Object of class torch.utils.data.Dataset for vaLidation
)
start_time = time.time()
trainer.train()
total_time = time.time()-start_time
model0_path = os.path.join('Results', class_unseen, str(j)+'Shot', 'Model')
os.makedirs(model0_path, exist_ok=True)
trainer.save_model(model0_path)
```
## Available classes
The following datasets and entities were used for training and therefore they can be used as label in the first segment (as a first string). Note that multiword string have been merged.
* NCBI
* Specific Disease
* Composite Mention
* Modifier
* Disease Class
* BIORED
* Sequence Variant
* Gene Or Gene Product
* Disease Or Phenotypic Feature
* Chemical Entity
* Cell Line
* Organism Taxon
* CDR
* Disease
* Chemical
* CHEMDNER
* Chemical
* Chemical Family
* JNLPBA
* Protein
* DNA
* Cell Type
* Cell Line
* RNA
* n2c2
* Drug
* Frequency
* Strength
* Dosage
* Form
* Reason
* Route
* ADE
* Duration
On top of this, one can use the model in zero-shot regime with other classes, and also fine-tune it with few examples of other classes.
## Code availibility
Code used for training and testing the model is available at https://github.com/br-ai-ns-institute/Zero-ShotNER
## Citation
If you use this model, or are inspired by it, please cite in your paper the following paper:
Košprdić M.,Prodanović N., Ljajić A., Bašaragin B., Milošević N., 2023. A transformer-based method for zero and few-shot biomedical named entity recognition. arXiv preprint arXiv:2305.04928. https://arxiv.org/abs/2305.04928
or in bibtex:
```
@misc{kosprdic2023transformerbased,
title={A transformer-based method for zero and few-shot biomedical named entity recognition},
author={Miloš Košprdić and Nikola Prodanović and Adela Ljajić and Bojana Bašaragin and Nikola Milošević},
year={2023},
eprint={2305.04928},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
AyushPJ/ai-club-inductions-21-nlp-roBERTa
|
[
"pytorch",
"roberta",
"question-answering",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | null |
---
tags:
- autotrain
- summarization
language:
- unk
widget:
- text: "I love AutoTrain 🤗"
datasets:
- DmitriyVasiliev/autotrain-data-mbart-rua-par
co2_eq_emissions:
emissions: 16.644739157086175
---
# Model Trained Using AutoTrain
- Problem type: Summarization
- Model ID: 55374129107
- CO2 Emissions (in grams): 16.6447
## Validation Metrics
- Loss: 1.187
- Rouge1: 7.954
- Rouge2: 2.916
- RougeL: 7.944
- RougeLsum: 7.852
- Gen Len: 61.430
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/DmitriyVasiliev/autotrain-mbart-rua-par-55374129107
```
|
Azaghast/DistilBERT-SCP-Class-Classification
|
[
"pytorch",
"distilbert",
"text-classification",
"transformers"
] |
text-classification
|
{
"architectures": [
"DistilBertForSequenceClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 42 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: my_awesome_qa_model
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# my_awesome_qa_model
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BJTK2/model_name
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: bert-large-uncased-finetuned-clinc150
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-large-uncased-finetuned-clinc150
This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the clinc_oos dataset.
It achieves the following results on the evaluation set:
- Loss: 2.6555
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.6666 | 1.0 | 324 | 2.9589 |
| 2.8829 | 2.0 | 648 | 2.7507 |
| 2.6216 | 3.0 | 972 | 2.6256 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BSC-LT/gpt2-large-bne
|
[
"pytorch",
"gpt2",
"text-generation",
"es",
"dataset:bne",
"arxiv:2107.07253",
"transformers",
"national library of spain",
"spanish",
"bne",
"license:apache-2.0"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 11 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: bert-base-uncased-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.49971547639767977
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-finetuned-cola
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4699
- Matthews Correlation: 0.4997
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.5016 | 1.0 | 535 | 0.4699 | 0.4997 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BSC-LT/roberta-base-biomedical-es
|
[
"pytorch",
"roberta",
"fill-mask",
"es",
"arxiv:2109.03570",
"arxiv:2109.07765",
"transformers",
"biomedical",
"spanish",
"license:apache-2.0",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 161 | null |
---
license: cc-by-nc-4.0
datasets:
- databricks/databricks-dolly-15k
language:
- en
tags:
- text-generation-inference
---
# Muwa-OPT - A budget-friendly OPT-based LLM
[Muwa Repository on GitHub](https://github.com/theSLWayne/Muwa-OPT/)

Muwa is a fine-tuned LoRA model based on Facebook's OPT model architecture. Muwa was fine-tuned using the [databricks-dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k), which is a dataset of instruction-following records that belong to multiple categories like brainstorming, classification, closed QA, generation, information extraction, open QA, and summarization. **The specialty of Muwa is that only free resources have been used to fine-tune the model**, no fancy arrays of GPUs or paid GPU processors were not used for fine-tuning the model; only the free-tier of Google Colaboratory.
Muwa is currently trained using the [OPT 1.3b model](https://huggingface.co/facebook/opt-1.3b), which is available in HuggingFace.
This work is heavily inspired from [Yudhanjaya's Eluwa model](https://github.com/yudhanjaya/Eluwa). Most of the model fine-tuning and benchmarking code is taken from their repository and I made some adjustments to the code and changed some parameters to make sure that the fine-tuning process can be done on free resources that were available to me at the time.
## Inference
Make sure you install the following Python packages in the environment where the model is intended to be run.
```shell
pip install torch peft datasets evaluate transformers accelerate bitsandbytes
```
First, OPT 1.3b model should be loaded and then Muwa should be loaded from their respective HuggingFace repositories. After the models are loaded, they can be used for inference.
```python
import torch
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Define model names to be loaded
peft_model_id = 'theSLWayne/Muwa-1.3b'
base_model = 'facebook/opt-1.3b'
# Load base model
model = AutoModelForCausalLM.from_pretrained(
base_model,
device_map='auto',
torch_dtype=torch.float16,
)
# Load Muwa
model = PeftModel.from_pretrained(
model,
peft_model_id,
device_map='auto',
torch_dtype=torch.float16,
)
# Initiate tokenizer of the base model
tokenizer = AutoTokenizer.from_pretrained(base_model)
# Create batches of inputs
batch = tokenizer("What is a deep learning model?", return_tensors='pt')
# Take predictions
with torch.cuda.amp.autocast():
output_tokens = model.generate(**batch, max_new_tokens=50)
print(tokenizer.decode(output_tokens[0], skip_special_tokens=True))
```
If you intend to use CPU (which is not recommended), you can load the models as follows:
```python
model = AutoModelForCausalLM.from_pretrained(
base_model, device_map='auto', low_cpu_mem_usage=True
)
model = PeftModel.from_pretrained(
model,
peft_model_id,
device_map='auto',
)
```
## Training Muwa
This model was fine-tuned for 2 Epochs using the aforementioned Databricks Dolly 15K dataset. This model and its base model (OPT 1.3b) can be loaded in 8-bit. The notebook that was used for training this model can be found on the [GitHub repo](https://github.com/theSLWayne/Muwa-OPT/), including my notes on each code block.
The model was trained only using T4 GPU provided by Google Colab. **In order to fit the whole model and the dataset into it, the dataset had an input limit of 1024 tokens per each query**. **This was done because with the default value, the GPU RAM was not enough to fine-tune the model**.
With the limit in input tokens, the model training took ~12 GB of GPU RAM.
### PEFT and LoRA
PEFT(Parameter-Efficient Fine-tuning) is a set of approaches that are meant to reduce the cost of fine-tuning, storing, and deploying large models. According to [this HuggingFace article on PEFT](https://huggingface.co/blog/peft),
*`PEFT approaches only fine-tune a small number of (extra) model parameters while freezing most parameters of the pretrained LLMs, thereby greatly decreasing the computational and storage costs. This also overcomes the issues of catastrophic forgetting, a behaviour observed during the full finetuning of LLMs. PEFT approaches have also shown to be better than fine-tuning in the low-data regimes and generalize better to out-of-domain scenarios. It can be applied to various modalities, e.g., image classification and stable diffusion dreambooth.`*
HuggingFace has launched a Python package with the same name and according to the documentation it implements a number of PEFT methods:
1. LoRA
2. Prefix Tuning
3. P-Tuning
4. Prompt Tuning
5. AdaLoRA
This package is used in fine-tuning and in the inference of Muwa. More details about this package can be discovered [here](https://github.com/huggingface/peft).
LoRA (Low-Rank Adaptation) is a method proposed for adapting large pre-trained language models to specific tasks or domains. It involves freezing the pre-trained model weights and adding trainable rank decomposition matrices to each layer of the Transformer architecture, which significantly reduces the number of trainable parameters for downstream tasks. This approach allows for efficient adaptation of language models with fewer trainable parameters and reduced GPU memory requirements. More information on LoRA can be found on the paper that introduced the method, which can be accessed [here](https://arxiv.org/abs/2106.09685). Also, I found [this video](https://www.youtube.com/watch?v=_K3HgjnRHCY&lc=Ugyqpr8yVUW2DHlvsoZ4AaABAg) that explains the paper in simple terms, which I found to be very useful.
## Testing and Evaluating
Muwa was tested and evaluated using SQuAD mini, wikitext, and piqa datasets. Both Muwa and its base model, OPT 1.3b were evaluated seperately using all mentioned datasets and the results can be summarized as follows:
| Dataset | OPT 1.3b | Muwa |
|---------|----------|------|
| SQuAD Mini (*avg. f1 score*) | 24.587 | **26.234** |
| wikitext (*perplexity*) | 13.91406 | **13.96875** |
| piqa (*accuracy*) | 0.495 | **0.532** |
As shown, Muwa has been able to outperform its base model by fine tuning using a rather smaller dataset (compared to others like [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) available for these tasks) for all the evaluation datasets.
This shows that LLMs that have Billions of parameters can be fine-tuned using resources which are available for free and you can actually improve the model's performance by doing so.
Code used for evaluating Muwa can be found in the notebook which is included in the [GitHub repo](https://github.com/theSLWayne/Muwa-OPT/).
## The Story Behind Muwa
As mentioned above, Muwa was heavily inspired by Eluwa model developed by Yudhanjaya et al. "Eluwa" means goat in Sinhalese. Continuing the trend of naming LLMs after even-toed ungulates, this model is named "Muwa".
Deers aren't as fearsome as Goats, or even Llamas and alpacas but they are still an impressive species. They are graceful, agile, and known for their antlers, which they shed and regrow every year. In some cultures, deers are considered a symbol of gentleness and kindness. All the more reasons to name this model after them.
About the graphic located at the beginning of this document, that is the work of someone(me) with zero knowledge and experience in design, and it shows. The initial image was taken from [freepngimg.com](https://www.freepngimg.com/png/22758-deer-head-free-download) and is protected under [Creative Commons (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/) license. Then that image was colorized using [Colorizer Models HuggingFace space](https://huggingface.co/spaces/trysem/Colorizer_Models). Then the text was added after loading the colorized image into [Canva](canva.com), which provided the final output.
## License
The base model used for this work, Facebook's OPT has its own license, which can be found [here](https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/MODEL_LICENSE.md).
Databricks Dolly 15k model is protected under [CC BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/), allowing it to be modified, redistributed, and used for any purpose, even commercially.
Although the dataset is allowed to be modified and redistributed, the licensing of OPT does not allow to use it for any commercial or any other non-research related cases, therefore making Muwa restricted to be used only for research, under CC BY NC 4.0.
|
BSC-LT/roberta-base-bne-capitel-ner-plus
|
[
"pytorch",
"roberta",
"token-classification",
"es",
"dataset:bne",
"dataset:capitel",
"arxiv:1907.11692",
"arxiv:2107.07253",
"transformers",
"national library of spain",
"spanish",
"bne",
"capitel",
"ner",
"license:apache-2.0",
"autotrain_compatible"
] |
token-classification
|
{
"architectures": [
"RobertaForTokenClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | null |
---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: Vignesh-Trender/my_awesome_model
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# Vignesh-Trender/my_awesome_model
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.1294
- Validation Loss: 0.2072
- Train Accuracy: 0.9230
- Epoch: 1
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7810, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Validation Loss | Train Accuracy | Epoch |
|:----------:|:---------------:|:--------------:|:-----:|
| 0.2500 | 0.1823 | 0.9293 | 0 |
| 0.1294 | 0.2072 | 0.9230 | 1 |
### Framework versions
- Transformers 4.28.1
- TensorFlow 2.12.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BSC-LT/roberta-base-bne
|
[
"pytorch",
"roberta",
"fill-mask",
"es",
"dataset:bne",
"arxiv:1907.11692",
"arxiv:2107.07253",
"transformers",
"national library of spain",
"spanish",
"bne",
"license:apache-2.0",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 594 | 2023-05-04T11:49:30Z |
# easy_ViTPose
<p align="center">
<img src="https://user-images.githubusercontent.com/24314647/236082274-b25a70c8-9267-4375-97b0-eddf60a7dfc6.png" width=375> easy_ViTPose
</p>
## Accurate 2d human pose estimation, finetuned on 25 keypoints COCO skeleton + feet
### Easy to use SOTA `ViTPose` [Y. Xu et al., 2022] models for fast inference.
These are just the models, refer to https://github.com/JunkyByte/easy_ViTPose for the actual code.
| Models | Path |
| :----: | :----: |
| TORCH | [Folder](https://huggingface.co/JunkyByte/easy_ViTPose/tree/main/torch) |
| ONNX | [Folder](https://huggingface.co/JunkyByte/easy_ViTPose/tree/main/onnx) |
| TENSORRT | [Folder](https://huggingface.co/JunkyByte/easy_ViTPose/tree/main/tensorrt) |
You can also download the YOLOv5 models:
| Models | Path |
| :----: | :----: |
| YOLOv5 | [Folder](https://huggingface.co/JunkyByte/easy_ViTPose/tree/main/yolov5) |
### License
Refer to official https://github.com/ViTAE-Transformer/ViTPose/blob/main/LICENSE for model license
|
BSC-LT/roberta-large-bne-capitel-ner
|
[
"pytorch",
"roberta",
"token-classification",
"es",
"dataset:bne",
"dataset:capitel",
"arxiv:1907.11692",
"arxiv:2107.07253",
"transformers",
"national library of spain",
"spanish",
"bne",
"capitel",
"ner",
"license:apache-2.0",
"autotrain_compatible"
] |
token-classification
|
{
"architectures": [
"RobertaForTokenClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 5 | 2023-05-04T11:53:34Z |
---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: anitha67/my_awesome_model
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# anitha67/my_awesome_model
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.0657
- Validation Loss: 0.2130
- Train Accuracy: 0.9325
- Epoch: 2
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7810, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Validation Loss | Train Accuracy | Epoch |
|:----------:|:---------------:|:--------------:|:-----:|
| 0.2542 | 0.2212 | 0.9096 | 0 |
| 0.1335 | 0.1956 | 0.9249 | 1 |
| 0.0657 | 0.2130 | 0.9325 | 2 |
### Framework versions
- Transformers 4.28.1
- TensorFlow 2.12.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BSC-LT/roberta-large-bne
|
[
"pytorch",
"roberta",
"fill-mask",
"es",
"dataset:bne",
"arxiv:1907.11692",
"arxiv:2107.07253",
"transformers",
"national library of spain",
"spanish",
"bne",
"license:apache-2.0",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 24 | null |
---
license: creativeml-openrail-m
tags:
- pytorch
- diffusers
- stable-diffusion
- text-to-image
- diffusion-models-class
- dreambooth-hackathon
- wildcard
widget:
- text: a photo of ccorgi dog in the Acropolis
---
# DreamBooth model for the ccorgi concept trained by SaudxInu on the lewtun/corgi dataset.
This is a Stable Diffusion model fine-tuned on the ccorgi concept with DreamBooth. It can be used by modifying the `instance_prompt`: **a photo of ccorgi dog**
This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part!
## Description
This is a Stable Diffusion model fine-tuned on `dog` images for the wildcard theme.
## Usage
```python
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained('SaudxInu/ccorgi-dog')
image = pipeline().images[0]
image
```
|
Badr/model1
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T12:15:50Z |
Users may easily use this capability on their PC using the DataVare NSF to PST converter software, which is a very effective tool for them. The utility operates without any data loss on Windows platforms. With the NSF to PST Converter, the user can convert their file with ease. By converting 10 objects, the app allows users to test it out for free. Those with and without technical expertise can both utilize this program. The function is accessible in any version of Outlook. It may be used with MS Outlook versions 2003, 2007, 2010, 2013, 2016, and 2019, among others. Without using Outlook, you can bulk import NSF files into the PST file format. If they are having problems, it can be used by both novice and experienced users. Additionally, users are able to obtain the software and use it with any version of Windows.
Read more :- https://www.datavare.com/software/nsf-to-pst-converter-expert.html
|
Bakkes/BakkesModWiki
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilledbert-finetuned-squad-assignment
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilledbert-finetuned-squad-assignment
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.28.0
- Pytorch 2.0.0+cu118
- Datasets 2.11.0
- Tokenizers 0.13.3
|
Barleysack/klue-roberta-LSTM
|
[
"pytorch",
"roberta",
"transformers"
] | null |
{
"architectures": [
"QAWithLSTMModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | 2023-05-04T12:48:56Z |
Not official! This are diffusers weights for https://civitai.com/models/7371/rev-animated
Based on Stable Diffusion v1.5
|
Barytes/hellohf
|
[
"tf",
"bert",
"fill-mask",
"en",
"dataset:bookcorpus",
"dataset:wikipedia",
"transformers",
"exbert",
"license:apache-2.0",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 2 | 2023-05-04T12:49:52Z |
---
license: apache-2.0
tags:
- text-classification
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: misogynistic-statements-classification-model
results: []
widget:
- text: Las mujeres deben ser madres antes que nada
example_title: Machista
- text: >-
Las mujeres tienen el mismo potencial y habilidades para los negocios que
los hombres
example_title: No machista
datasets:
- glombardo/misogynistic-statements-classification
language:
- es
output_data:
- format: class
- class_labels: ["Sexist", "Non-sexist"]
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Misogynistic statements classification model
**Model that classifies text as sexist or non-sexist.**
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the [misogynistic-statements-classification dataset](https://huggingface.co/datasets/glombardo/misogynistic-statements-classification).
It achieves the following results on the evaluation set:
- Loss: 0.2493
- Accuracy: 0.9524
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
Batsy24/DialoGPT-small-Twilight_EdBot
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] |
conversational
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | 2023-05-04T12:51:40Z |
---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-fr
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
config: PAN-X.fr
split: validation
args: PAN-X.fr
metrics:
- name: F1
type: f1
value: 0.8354006034193765
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-fr
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2851
- F1: 0.8354
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.6033 | 1.0 | 191 | 0.3351 | 0.7857 |
| 0.2614 | 2.0 | 382 | 0.2998 | 0.8198 |
| 0.1756 | 3.0 | 573 | 0.2851 | 0.8354 |
### Framework versions
- Transformers 4.27.4
- Pytorch 2.0.0+cpu
- Datasets 2.11.0
- Tokenizers 0.13.3
|
BatuhanYilmaz/bert-finetuned-ner
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T12:54:09Z |
---
license: creativeml-openrail-m
base_model: runwayml/stable-diffusion-v1-5
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- controlnet
- jax-diffusers-event
inference: true
---
# controlnet- MakiPan/controlnet-encoded-hands-20230504_125403
These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning. You can find some example images in the following.
prompt: a man in a colorful shirt giving a peace sign in front of a rallying crowd

prompt: a police officer signaling someone to stop in a park

|
BatuhanYilmaz/distilbert-base-uncased-finetuned-squad-d5716d28
|
[
"pytorch",
"distilbert",
"fill-mask",
"en",
"dataset:squad",
"arxiv:1910.01108",
"transformers",
"question-answering",
"license:apache-2.0",
"autotrain_compatible"
] |
question-answering
|
{
"architectures": [
"DistilBertForMaskedLM"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 18 | null |
---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-CartPole1
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
metrics:
- type: mean_reward
value: 500.00 +/- 0.00
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
BatuhanYilmaz/dummy-model
|
[
"tf",
"camembert",
"fill-mask",
"transformers",
"generated_from_keras_callback",
"license:mit",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"CamembertForMaskedLM"
],
"model_type": "camembert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | null |
T5 LARGE MODEL #3 PRETRAINED ON XSUM AND FINETUNED ON SAMSUM
|
BatuhanYilmaz/mlm-finetuned-imdb
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilbert-base-uncased-finetuned-provenances
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-provenances
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 2.5430
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 47 | 2.5506 |
| 2.7876 | 2.0 | 94 | 2.5225 |
| 2.7876 | 3.0 | 141 | 2.3152 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Tokenizers 0.13.3
|
Baybars/wav2vec2-xls-r-1b-turkish
|
[
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"tr",
"dataset:common_voice",
"transformers",
"common_voice",
"generated_from_trainer"
] |
automatic-speech-recognition
|
{
"architectures": [
"Wav2Vec2ForCTC"
],
"model_type": "wav2vec2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 13 | null |
Access to model ALLREDWAY/web01 is restricted and you are not in the authorized list. Visit https://huggingface.co/ALLREDWAY/web01 to ask for access.
|
BeIR/query-gen-msmarco-t5-base-v1
|
[
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] |
text2text-generation
|
{
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": true,
"length_penalty": 2,
"max_length": 200,
"min_length": 30,
"no_repeat_ngram_size": 3,
"num_beams": 4,
"prefix": "summarize: "
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to German: "
},
"translation_en_to_fr": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to French: "
},
"translation_en_to_ro": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to Romanian: "
}
}
}
| 1,816 | 2023-05-04T13:14:29Z |
---
license: creativeml-openrail-m
language:
- en
- ja
tags:
- Stable-Diffusion
- lora
---
# 【LoRA】witchpot-citysilhouette-sd-1-5
LoRA for 2D game city silhouette evening stage
All training data is generated by Midjourney
## Trigger
- citysilhouette
## Sample Prompts
- citysilhouette, jump game level design, house and buildings, evening
## Sample Images

## Model Description
- Model type: [LoRA]
- Base Model: Model trained with runwayml/stable-diffusion-v1-5/v1-5-pruned.ckpt (https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.ckpt)
## Recommendations
This LoRA model has been trained to generate game stages made of silhouette city at evening, based on specific patterns.
By combining it with Depth2Image, you can create consistent game stages.
This LoRA is supposed to use with [stable-diffusion-for-unity](https://docs.witchpot.com/)
## Information
- https://twitter.com/Witchpot_
|
Beatriz/model_name
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | null |
---
license: other
---
This was trained on 10 epoch with the settings on https://huggingface.co/TehVenom/Pygmalion-Vicuna-1.1-7b
|
Bee-Garbs/DialoGPT-real-cartman-small
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] |
conversational
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
---
tags:
- Pixelcopter-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-PixelCopter5
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pixelcopter-PLE-v0
type: Pixelcopter-PLE-v0
metrics:
- type: mean_reward
value: 36.10 +/- 26.09
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **Pixelcopter-PLE-v0**
This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
BertChristiaens/EmojiPredictor
|
[
"pytorch",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
] |
token-classification
|
{
"architectures": [
"DistilBertForTokenClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 6 | 2023-05-04T13:35:19Z |
---
tags:
- LunarLander-v2
- ppo
- deep-reinforcement-learning
- reinforcement-learning
- custom-implementation
- deep-rl-course
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 93.61 +/- 100.11
name: mean_reward
verified: false
---
# PPO Agent Playing LunarLander-v2
This is a trained model of a PPO agent playing LunarLander-v2.
# Hyperparameters
|
BhanuSama/gpt2-finetuned-xsum
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T13:37:07Z |
---
language: en
license: apache-2.0
library_name: diffusers
tags: []
datasets: /content/drive/MyDrive/image_and_text
metrics: []
---
<!-- This model card has been generated automatically according to the information the training script had access to. You
should probably proofread and complete it, then remove this comment. -->
# ddpm-butterflies-128
## Model description
This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library
on the `/content/drive/MyDrive/image_and_text` dataset.
## Intended uses & limitations
#### How to use
```python
# TODO: add an example code snippet for running this diffusion pipeline
```
#### Limitations and bias
[TODO: provide examples of latent issues and potential remediations]
## Training data
[TODO: describe the data used to train the model]
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 16
- gradient_accumulation_steps: 1
- optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None
- lr_scheduler: None
- lr_warmup_steps: 500
- ema_inv_gamma: None
- ema_inv_gamma: None
- ema_inv_gamma: None
- mixed_precision: fp16
### Training results
📈 [TensorBoard logs](https://huggingface.co/Tian7/ddpm-butterflies-128/tensorboard?#scalars)
|
BigSalmon/BertaMyWorda
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | 2023-05-04T13:52:47Z |
---
tags:
- medical
---
Please note that the chatbot is designed for research purposes only and is not intended for use in real medical settings. While the chatbot has been trained to provide accurate and helpful responses, it is not a substitute for professional medical advice, diagnosis, or treatment. The information provided by the chatbot should not be used to make medical decisions, and any health concerns should be addressed by a licensed healthcare provider.
## Quickstart
```python
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
path = "jianghc/medical_chatbot"
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = GPT2Tokenizer.from_pretrained(path)
model = GPT2LMHeadModel.from_pretrained(path).to(device)
prompt_input = (
"The conversation between human and AI assistant.\n"
"[|Human|] {input}\n"
"[|AI|]"
)
sentence = prompt_input.format_map({'input': "what is parkinson's disease?"})
inputs = tokenizer(sentence, return_tensors="pt").to(device)
with torch.no_grad():
beam_output = model.generate(**inputs,
min_new_tokens=1,
max_length=512,
num_beams=3,
repetition_penalty=1.2,
early_stopping=True,
eos_token_id=198
)
print(tokenizer.decode(beam_output[0], skip_special_tokens=True))
```
## Example Outputs
```
The conversation between human and AI assistant.
[|Human|] what is parkinson's disease?
[|AI|] Parkinson's disease is a neurodegenerative disorder that affects movement. It is caused by the loss of dopamine-producing cells in the brain.
```
```
The conversation between human and AI assistant.
[|Human|] what type of honey is best for a bad covid cough?
[|AI|] Manuka honey has been shown to have anti-inflammatory and antibacterial properties that can help alleviate symptoms of a bad covid cough.
```
|
BigSalmon/BlankSlots
|
[
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] |
text2text-generation
|
{
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": true,
"length_penalty": 2,
"max_length": 200,
"min_length": 30,
"no_repeat_ngram_size": 3,
"num_beams": 4,
"prefix": "summarize: "
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to German: "
},
"translation_en_to_fr": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to French: "
},
"translation_en_to_ro": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to Romanian: "
}
}
}
| 4 | null |
---
library_name: sample-factory
tags:
- deep-reinforcement-learning
- reinforcement-learning
- sample-factory
model-index:
- name: APPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: doom_health_gathering_supreme
type: doom_health_gathering_supreme
metrics:
- type: mean_reward
value: 11.50 +/- 4.84
name: mean_reward
verified: false
---
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
## Downloading the model
After installing Sample-Factory, download the model with:
```
python -m sample_factory.huggingface.load_from_hub -r adrienJeg/rl_course_vizdoom_health_gathering_supreme
```
## Using the model
To run the model after download, use the `enjoy` script corresponding to this environment:
```
python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
```
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
## Training with this model
To continue training with this model, use the `train` script corresponding to this environment:
```
python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
```
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
BigSalmon/DaBlank
|
[
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] |
text2text-generation
|
{
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": true,
"length_penalty": 2,
"max_length": 200,
"min_length": 30,
"no_repeat_ngram_size": 3,
"num_beams": 4,
"prefix": "summarize: "
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to German: "
},
"translation_en_to_fr": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to French: "
},
"translation_en_to_ro": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to Romanian: "
}
}
}
| 4 | null |
---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: Harshavardhan155/distilbert-base-uncased-finetuned-imdb
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# Harshavardhan155/distilbert-base-uncased-finetuned-imdb
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 5.6317
- Validation Loss: 5.1948
- Epoch: 0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -688, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: mixed_float16
### Training results
| Train Loss | Validation Loss | Epoch |
|:----------:|:---------------:|:-----:|
| 5.6317 | 5.1948 | 0 |
### Framework versions
- Transformers 4.28.1
- TensorFlow 2.12.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/FormalBerta
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
---
tags:
- espnet
- audio
- diarization
language: en
datasets:
- librimix
license: cc-by-4.0
---
## ESPnet2 DIAR model
### `soumi-maiti/libri3mix_eend_ss`
This model was trained by soumimaiti using librimix recipe in [espnet](https://github.com/espnet/espnet/).
### Demo: How to use in ESPnet2
Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html)
if you haven't done that already.
```bash
cd espnet
git checkout d837c97c88f13ffe655a30bcff93d814f212b225
pip install -e .
cd egs2/librimix/enh_diar1_2
./run.sh --skip_data_prep false --skip_train true --download_model soumi-maiti/libri3mix_eend_ss
```
## DIAR config
<details><summary>expand</summary>
```
config: conf/tuning/train_diar_enh_convtasnet_2.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: chunk
output_dir: exp/diar_enh_train_diar_enh_convtasnet_2_raw
ngpu: 1
seed: 0
num_workers: 4
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 100
patience: 50
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- loss_enh
- min
keep_nbest_models: 1
nbest_averaging_interval: 0
grad_clip: 5.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 4
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
use_matplotlib: true
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: null
batch_size: 4
valid_batch_size: null
batch_bins: 1000000
valid_batch_bins: null
train_shape_file:
- exp/diar_enh_stats_8k/train/speech_shape
- exp/diar_enh_stats_8k/train/text_shape
- exp/diar_enh_stats_8k/train/speech_ref1_shape
- exp/diar_enh_stats_8k/train/speech_ref2_shape
- exp/diar_enh_stats_8k/train/speech_ref3_shape
- exp/diar_enh_stats_8k/train/noise_ref1_shape
valid_shape_file:
- exp/diar_enh_stats_8k/valid/speech_shape
- exp/diar_enh_stats_8k/valid/text_shape
- exp/diar_enh_stats_8k/valid/speech_ref1_shape
- exp/diar_enh_stats_8k/valid/speech_ref2_shape
- exp/diar_enh_stats_8k/valid/speech_ref3_shape
- exp/diar_enh_stats_8k/valid/noise_ref1_shape
batch_type: folded
valid_batch_type: null
fold_length:
- 800
- 80000
- 80000
- 80000
- 80000
- 80000
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 24000
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/train/wav.scp
- speech
- sound
- - dump/raw/train/espnet_rttm
- text
- rttm
- - dump/raw/train/spk1.scp
- speech_ref1
- sound
- - dump/raw/train/spk2.scp
- speech_ref2
- sound
- - dump/raw/train/spk3.scp
- speech_ref3
- sound
- - dump/raw/train/noise1.scp
- noise_ref1
- sound
valid_data_path_and_name_and_type:
- - dump/raw/dev/wav.scp
- speech
- sound
- - dump/raw/dev/espnet_rttm
- text
- rttm
- - dump/raw/dev/spk1.scp
- speech_ref1
- sound
- - dump/raw/dev/spk2.scp
- speech_ref2
- sound
- - dump/raw/dev/spk3.scp
- speech_ref3
- sound
- - dump/raw/dev/noise1.scp
- noise_ref1
- sound
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 0.001
eps: 1.0e-07
weight_decay: 0
scheduler: reducelronplateau
scheduler_conf:
mode: min
factor: 0.5
patience: 1
token_list: null
src_token_list: null
init: xavier_uniform
input_size: null
ctc_conf:
dropout_rate: 0.0
ctc_type: builtin
reduce: true
ignore_nan_grad: null
zero_infinity: true
enh_criterions:
- name: si_snr
conf:
eps: 1.0e-07
wrapper: pit
wrapper_conf:
weight: 1.0
independent_perm: true
diar_num_spk: 3
diar_input_size: 128
enh_model_conf:
loss_type: si_snr
asr_model_conf:
ctc_weight: 0.5
interctc_weight: 0.0
ignore_id: -1
lsm_weight: 0.0
length_normalized_loss: false
report_cer: true
report_wer: true
sym_space: <space>
sym_blank: <blank>
extract_feats_in_collect_stats: true
st_model_conf:
stft_consistency: false
loss_type: mask_mse
mask_type: null
diar_model_conf:
diar_weight: 0.2
attractor_weight: 0.2
subtask_series:
- enh
- diar
model_conf:
calc_enh_loss: true
bypass_enh_prob: 0
use_preprocessor: true
token_type: bpe
bpemodel: null
src_token_type: bpe
src_bpemodel: null
non_linguistic_symbols: null
cleaner: null
g2p: null
enh_encoder: conv
enh_encoder_conf:
channel: 512
kernel_size: 16
stride: 8
enh_separator: tcn_nomask
enh_separator_conf:
layer: 8
stack: 3
bottleneck_dim: 128
hidden_dim: 512
kernel: 3
causal: false
norm_type: gLN
enh_decoder: conv
enh_decoder_conf:
channel: 512
kernel_size: 16
stride: 8
enh_mask_module: multi_mask
enh_mask_module_conf:
max_num_spk: 3
mask_nonlinear: relu
bottleneck_dim: 128
frontend: null
frontend_conf: {}
specaug: null
specaug_conf: {}
normalize: utterance_mvn
normalize_conf: {}
asr_preencoder: null
asr_preencoder_conf: {}
asr_encoder: rnn
asr_encoder_conf: {}
asr_postencoder: null
asr_postencoder_conf: {}
asr_decoder: rnn
asr_decoder_conf: {}
st_preencoder: null
st_preencoder_conf: {}
st_encoder: rnn
st_encoder_conf: {}
st_postencoder: null
st_postencoder_conf: {}
st_decoder: rnn
st_decoder_conf: {}
st_extra_asr_decoder: rnn
st_extra_asr_decoder_conf: {}
st_extra_mt_decoder: rnn
st_extra_mt_decoder_conf: {}
diar_frontend: null
diar_frontend_conf: {}
diar_specaug: null
diar_specaug_conf: {}
diar_normalize: utterance_mvn
diar_normalize_conf: {}
diar_encoder: transformer
diar_encoder_conf:
input_layer: conv2d8
num_blocks: 4
linear_units: 512
dropout_rate: 0.1
output_size: 256
attention_heads: 4
attention_dropout_rate: 0.1
diar_decoder: linear
diar_decoder_conf: {}
label_aggregator: label_aggregator
label_aggregator_conf:
win_length: 256
hop_length: 64
diar_attractor: rnn
diar_attractor_conf:
unit: 256
layer: 1
dropout: 0.0
attractor_grad: true
required:
- output_dir
version: '202205'
distributed: false
```
</details>
### Citing ESPnet
```BibTex
@inproceedings{watanabe2018espnet,
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
title={{ESPnet}: End-to-End Speech Processing Toolkit},
year={2018},
booktitle={Proceedings of Interspeech},
pages={2207--2211},
doi={10.21437/Interspeech.2018-1456},
url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
}
```
or arXiv:
```bibtex
@misc{watanabe2018espnet,
title={ESPnet: End-to-End Speech Processing Toolkit},
author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
year={2018},
eprint={1804.00015},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
|
BigSalmon/FormalRobertaaa
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 12 | null |
---
base_model: runwayml/stable-diffusion-v1-5
instance_prompt: A fantasy landscape in <shanshui-gen-style>
tags:
- stable-diffusion
- stable-diffusion-ppdiffusers
- text-to-image
- ppdiffusers
inference: false
license: mit
---
# megemini/shanshui_gen_style - 当中国水墨山水画遇上AIGC - Chinese ink-wash landscape painting meets AIGC
本仓库的模型采用 ``Textual inversion`` 技术并使用 ``style`` 进行训练。
预训练模型为 ``runwayml/stable-diffusion-v1-5`` ,训练的图片来自于由模型的 ``img2img`` 根据中国水墨山水画生成的图片。
``runwayml/stable-diffusion-v1-5`` 模型根据中国水墨山水画可以生成有意境又具像的图片,而利用这些生成的图片再次训练的模型,可以利用文本在不使用水墨画的基础上就生成相似的图片。
👉 [megemini/shanshui](https://huggingface.co/spaces/megemini/shanshui) 这个应用便是利用了上述模型。
看一下原模型 ``runwayml/stable-diffusion-v1-5`` 与此模型在相似 ``prompt`` 下生成的图片:
| image | model | prompt |
|-|-|-|
|  | runwayml/stable-diffusion-v1-5 | A fantasy landscape |
|  | runwayml/stable-diffusion-v1-5 | A fantasy landscape |
|  | runwayml/stable-diffusion-v1-5 | A fantasy landscape |
|  | megemini/shanshui_gen_style | A fantasy landscape in \<shanshui-gen-style\> |
|  | megemini/shanshui_gen_style | A fantasy landscape in \<shanshui-gen-style\> |
|  | megemini/shanshui_gen_style | A fantasy landscape in \<shanshui-gen-style\> |
新模型对比预训练模型,生成的图片风格相近,但是山峰的比例更多,层次也更丰富。
下面是用于此模型训练的部分数据图样:
| 原画 | 生成(并用于此模型的训练) | prompt |
|-|-|-|
|  |  | A fantasy landscape |
|  |  | A fantasy landscape |
|  |  | A fantasy landscape |
|  |  | A fantasy landscape |
|  |  | A fantasy landscape, trending on artstation |
P.S. 👉 [megemini/shanshui_style](https://huggingface.co/megemini/shanshui_style) 这个模型可以生成中国水墨山水画风的图片。
|
BigSalmon/GPT2HardandEasy
|
[
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | 2023-05-04T14:03:04Z |
---
language:
- am
metrics:
- accuracy
- f1
library_name: transformers
pipeline_tag: text-classification
tags:
- Sentiment-Analysis
- Hate-Speech
- Finetuning-mBERT
---
**<h1>Hate-Speech-Detection-in-Amharic-Language-mBERT</h1>**
This Hugging Face model card contains a machine learning model that uses fine-tuned mBERT to detect hate speech in Amharic language.
The model was fine-tuned using the Hugging Face Trainer API.
**<h1>Fine-Tuning</h1>**
This model was created by finetuning the mBERT model for the downstream task of Hate speech detection for the Amharic language.
The initial mBERT model used for finetuning is http://Davlan/bert-base-multilingual-cased-finetuned-amharic which was provided by Davlan on Huggingface.
**<h1>Dataset</h1>**
The model was fine-tuned on an Amharic Dataset that was made available by Mendeley Data (https://data.mendeley.com/datasets/ymtmxx385m).
The dataset contains 30,000 rows of Amharic text labeled as hate speech or not hate speech.
**<h1>Usage</h1>**
You can use the model through the Hugging Face Transformers library, either by directly loading the model in your Python code
or by using the Hugging Face model hub.
|
BigSalmon/GPTIntro
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T14:04:56Z |
---
language: en
tags:
- exbert
license: mit
pipeline_tag: text-generation
duplicated_from: Narsil/gpt2
---
# GPT-2
Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large
Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in
[this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf)
and first released at [this page](https://openai.com/blog/better-language-models/).
Disclaimer: The team releasing GPT-2 also wrote a
[model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card
has been written by the Hugging Face team to complete the information they provided and give specific examples of bias.
## Model description
GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This
means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots
of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely,
it was trained to guess the next word in sentences.
More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence,
shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the
predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens.
This way, the model learns an inner representation of the English language that can then be used to extract features
useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a
prompt.
## Intended uses & limitations
You can use the raw model for text generation or fine-tune it to a downstream task. See the
[model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you.
### How to use
You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we
set a seed for reproducibility:
```python
>>> from transformers import pipeline, set_seed
>>> generator = pipeline('text-generation', model='gpt2')
>>> set_seed(42)
>>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5)
[{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."},
{'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"},
{'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"},
{'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"},
{'generated_text': 'Hello, I\\'m a language model, not a language model"\
\
The concept of "no-tricks" comes in handy later with new'}]
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import GPT2Tokenizer, GPT2Model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
```
and in TensorFlow:
```python
from transformers import GPT2Tokenizer, TFGPT2Model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2Model.from_pretrained('gpt2')
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='tf')
output = model(encoded_input)
```
### Limitations and bias
The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of
unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their
[model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases):
> Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases
> that require the generated text to be true.
>
> Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do
> not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a
> study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race,
> and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar
> levels of caution around use cases that are sensitive to biases around human attributes.
Here's an example of how the model can have biased predictions:
```python
>>> from transformers import pipeline, set_seed
>>> generator = pipeline('text-generation', model='gpt2')
>>> set_seed(42)
>>> generator("The White man worked as a", max_length=10, num_return_sequences=5)
[{'generated_text': 'The White man worked as a mannequin for'},
{'generated_text': 'The White man worked as a maniser of the'},
{'generated_text': 'The White man worked as a bus conductor by day'},
{'generated_text': 'The White man worked as a plumber at the'},
{'generated_text': 'The White man worked as a journalist. He had'}]
>>> set_seed(42)
>>> generator("The Black man worked as a", max_length=10, num_return_sequences=5)
[{'generated_text': 'The Black man worked as a man at a restaurant'},
{'generated_text': 'The Black man worked as a car salesman in a'},
{'generated_text': 'The Black man worked as a police sergeant at the'},
{'generated_text': 'The Black man worked as a man-eating monster'},
{'generated_text': 'The Black man worked as a slave, and was'}]
```
This bias will also affect all fine-tuned versions of this model.
## Training data
The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web
pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from
this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights
40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText
[here](https://github.com/openai/gpt-2/blob/master/domains.txt).
## Training procedure
### Preprocessing
The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a
vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens.
The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact
details of training.
## Evaluation results
The model achieves the following results without any fine-tuning (zero-shot):
| Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW |
|:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:|
| (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) |
| | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 |
### BibTeX entry and citation info
```bibtex
@article{radford2019language,
title={Language Models are Unsupervised Multitask Learners},
author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya},
year={2019}
}
```
<a href="https://huggingface.co/exbert/?model=gpt2">
\t<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
</a>
|
BigSalmon/GPTNeo350MInformalToFormalLincoln2
|
[
"pytorch",
"gpt_neo",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | null |
---
license: mit
tags:
- generated_from_keras_callback
model-index:
- name: gFulvio/roberta-base-finetuned-rte
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# gFulvio/roberta-base-finetuned-rte
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.1596
- Validation Loss: 0.3847
- Train Accuracy: 0.8595
- Epoch: 2
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3750, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Validation Loss | Train Accuracy | Epoch |
|:----------:|:---------------:|:--------------:|:-----:|
| 0.4420 | 0.3546 | 0.8465 | 0 |
| 0.2676 | 0.3530 | 0.8545 | 1 |
| 0.1596 | 0.3847 | 0.8595 | 2 |
### Framework versions
- Transformers 4.28.1
- TensorFlow 2.12.0
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/GPTNeo350MInformalToFormalLincoln3
|
[
"pytorch",
"gpt_neo",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
This is the persuasion scheme detection modifier trained on [the `label_1` found in the PersuasionForGood](https://convokit.cornell.edu/documentation/persuasionforgood.html#utterance-level-information) dataset.
|
BigSalmon/GPTNeo350MInformalToFormalLincoln4
|
[
"pytorch",
"gpt_neo",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 11 | null |
---
license: other
---
ggml version of https://huggingface.co/junelee/wizard-vicuna-13b
|
BigSalmon/GPTNeo350MInformalToFormalLincoln6
|
[
"pytorch",
"gpt_neo",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 14 | 2023-05-04T14:08:23Z |
---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-en
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
config: PAN-X.en
split: validation
args: PAN-X.en
metrics:
- name: F1
type: f1
value: 0.6916201117318436
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-en
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4041
- F1: 0.6916
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.1377 | 1.0 | 50 | 0.5643 | 0.5125 |
| 0.5151 | 2.0 | 100 | 0.4740 | 0.6156 |
| 0.3772 | 3.0 | 150 | 0.4041 | 0.6916 |
### Framework versions
- Transformers 4.27.4
- Pytorch 2.0.0+cpu
- Datasets 2.11.0
- Tokenizers 0.13.3
|
BigSalmon/GPTT
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | 2023-05-04T14:10:24Z |
Arari trained dataset (LoRA model for AnyLoRA)
Prompts
arari : the painting style of the artist
arika arari : his original character
panty pooping : soils her panties (put it in the negative prompts if you want only peeing)
|
BigSalmon/GoodMaskResults
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | 2023-05-05T01:17:17Z |
---
license: other
tags:
- image-segmentation
- vision
- generated_from_trainer
model-index:
- name: segformer-finetuned-sidewalk-10k-steps
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# segformer-finetuned-sidewalk-10k-steps
This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the segments/sidewalk-semantic dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6468
- Mean Iou: 0.2931
- Mean Accuracy: 0.3665
- Overall Accuracy: 0.8121
- Accuracy Unlabeled: nan
- Accuracy Flat-road: 0.6505
- Accuracy Flat-sidewalk: 0.9345
- Accuracy Flat-crosswalk: 0.9011
- Accuracy Flat-cyclinglane: 0.7895
- Accuracy Flat-parkingdriveway: 0.2382
- Accuracy Flat-railtrack: 0.0
- Accuracy Flat-curb: 0.4519
- Accuracy Human-person: 0.5536
- Accuracy Human-rider: 0.0
- Accuracy Vehicle-car: 0.9509
- Accuracy Vehicle-truck: 0.0
- Accuracy Vehicle-bus: 0.0
- Accuracy Vehicle-tramtrain: 0.0
- Accuracy Vehicle-motorcycle: 0.0
- Accuracy Vehicle-bicycle: 0.7507
- Accuracy Vehicle-caravan: nan
- Accuracy Vehicle-cartrailer: 0.0
- Accuracy Construction-building: 0.8681
- Accuracy Construction-door: 0.0
- Accuracy Construction-wall: 0.6107
- Accuracy Construction-fenceguardrail: 0.3192
- Accuracy Construction-bridge: 0.0
- Accuracy Construction-tunnel: nan
- Accuracy Construction-stairs: 0.0
- Accuracy Object-pole: 0.5156
- Accuracy Object-trafficsign: 0.0
- Accuracy Object-trafficlight: 0.0
- Accuracy Nature-vegetation: 0.9183
- Accuracy Nature-terrain: 0.8478
- Accuracy Sky: 0.9246
- Accuracy Void-ground: 0.0
- Accuracy Void-dynamic: 0.1083
- Accuracy Void-static: 0.3940
- Accuracy Void-unclear: 0.0
- Iou Unlabeled: nan
- Iou Flat-road: 0.5472
- Iou Flat-sidewalk: 0.8329
- Iou Flat-crosswalk: 0.7961
- Iou Flat-cyclinglane: 0.5266
- Iou Flat-parkingdriveway: 0.2013
- Iou Flat-railtrack: 0.0
- Iou Flat-curb: 0.2863
- Iou Human-person: 0.3887
- Iou Human-rider: 0.0
- Iou Vehicle-car: 0.7872
- Iou Vehicle-truck: 0.0
- Iou Vehicle-bus: 0.0
- Iou Vehicle-tramtrain: 0.0
- Iou Vehicle-motorcycle: 0.0
- Iou Vehicle-bicycle: 0.4759
- Iou Vehicle-caravan: nan
- Iou Vehicle-cartrailer: 0.0
- Iou Construction-building: 0.6992
- Iou Construction-door: 0.0
- Iou Construction-wall: 0.3924
- Iou Construction-fenceguardrail: 0.2614
- Iou Construction-bridge: 0.0
- Iou Construction-tunnel: nan
- Iou Construction-stairs: 0.0
- Iou Object-pole: 0.3413
- Iou Object-trafficsign: 0.0
- Iou Object-trafficlight: 0.0
- Iou Nature-vegetation: 0.8182
- Iou Nature-terrain: 0.7517
- Iou Sky: 0.8855
- Iou Void-ground: 0.0
- Iou Void-dynamic: 0.0963
- Iou Void-static: 0.2896
- Iou Void-unclear: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 6e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 1337
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: polynomial
- training_steps: 10000
### Training results
| Training Loss | Epoch | Step | Accuracy Construction-bridge | Accuracy Construction-building | Accuracy Construction-door | Accuracy Construction-fenceguardrail | Accuracy Construction-stairs | Accuracy Construction-tunnel | Accuracy Construction-wall | Accuracy Flat-crosswalk | Accuracy Flat-curb | Accuracy Flat-cyclinglane | Accuracy Flat-parkingdriveway | Accuracy Flat-railtrack | Accuracy Flat-road | Accuracy Flat-sidewalk | Accuracy Human-person | Accuracy Human-rider | Accuracy Nature-terrain | Accuracy Nature-vegetation | Accuracy Object-pole | Accuracy Object-trafficlight | Accuracy Object-trafficsign | Accuracy Sky | Accuracy Unlabeled | Accuracy Vehicle-bicycle | Accuracy Vehicle-bus | Accuracy Vehicle-car | Accuracy Vehicle-caravan | Accuracy Vehicle-cartrailer | Accuracy Vehicle-motorcycle | Accuracy Vehicle-tramtrain | Accuracy Vehicle-truck | Accuracy Void-dynamic | Accuracy Void-ground | Accuracy Void-static | Accuracy Void-unclear | Iou Construction-bridge | Iou Construction-building | Iou Construction-door | Iou Construction-fenceguardrail | Iou Construction-stairs | Iou Construction-tunnel | Iou Construction-wall | Iou Flat-crosswalk | Iou Flat-curb | Iou Flat-cyclinglane | Iou Flat-parkingdriveway | Iou Flat-railtrack | Iou Flat-road | Iou Flat-sidewalk | Iou Human-person | Iou Human-rider | Iou Nature-terrain | Iou Nature-vegetation | Iou Object-pole | Iou Object-trafficlight | Iou Object-trafficsign | Iou Sky | Iou Unlabeled | Iou Vehicle-bicycle | Iou Vehicle-bus | Iou Vehicle-car | Iou Vehicle-caravan | Iou Vehicle-cartrailer | Iou Vehicle-motorcycle | Iou Vehicle-tramtrain | Iou Vehicle-truck | Iou Void-dynamic | Iou Void-ground | Iou Void-static | Iou Void-unclear | Validation Loss | Mean Accuracy | Mean Iou | Overall Accuracy |
|:-------------:|:-----:|:-----:|:----------------------------:|:------------------------------:|:--------------------------:|:------------------------------------:|:----------------------------:|:----------------------------:|:--------------------------:|:-----------------------:|:------------------:|:-------------------------:|:-----------------------------:|:-----------------------:|:------------------:|:----------------------:|:---------------------:|:--------------------:|:-----------------------:|:--------------------------:|:--------------------:|:----------------------------:|:---------------------------:|:------------:|:------------------:|:------------------------:|:--------------------:|:--------------------:|:------------------------:|:---------------------------:|:---------------------------:|:--------------------------:|:----------------------:|:---------------------:|:--------------------:|:--------------------:|:---------------------:|:-----------------------:|:-------------------------:|:---------------------:|:-------------------------------:|:-----------------------:|:-----------------------:|:---------------------:|:------------------:|:-------------:|:--------------------:|:------------------------:|:------------------:|:-------------:|:-----------------:|:----------------:|:---------------:|:------------------:|:---------------------:|:---------------:|:-----------------------:|:----------------------:|:-------:|:-------------:|:-------------------:|:---------------:|:---------------:|:-------------------:|:----------------------:|:----------------------:|:---------------------:|:-----------------:|:----------------:|:---------------:|:---------------:|:----------------:|:---------------:|:-------------:|:--------:|:----------------:|
| 2.5227 | 1.0 | 107 | 0.0 | 0.8334 | 0.0 | 0.0 | 0.0 | nan | 0.0000 | 0.0 | 0.0 | 0.0416 | 0.0001 | nan | 0.5390 | 0.9293 | 0.0 | 0.0 | 0.2834 | 0.9261 | 0.0 | 0.0 | 0.0 | 0.5133 | nan | 0.0 | 0.0 | 0.8875 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4909 | 0.0 | 0.0 | 0.0 | nan | 0.0000 | 0.0 | 0.0 | 0.0411 | 0.0001 | nan | 0.3808 | 0.7051 | 0.0 | 0.0 | 0.2534 | 0.5904 | 0.0 | 0.0 | 0.0 | 0.5116 | nan | 0.0 | 0.0 | 0.5403 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.7749 | 0.1548 | 0.1098 | 0.6606 |
| 1.7544 | 2.0 | 214 | 0.0 | 0.8141 | 0.0 | 0.0 | 0.0 | nan | 0.0024 | 0.0 | 0.0 | 0.2967 | 0.0009 | nan | 0.6039 | 0.9275 | 0.0 | 0.0 | 0.8832 | 0.8157 | 0.0 | 0.0 | 0.0 | 0.7111 | nan | 0.0 | 0.0 | 0.9009 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5356 | 0.0 | 0.0 | 0.0 | nan | 0.0024 | 0.0 | 0.0 | 0.2702 | 0.0009 | nan | 0.4296 | 0.7139 | 0.0 | 0.0 | 0.5124 | 0.6367 | 0.0 | 0.0 | 0.0 | 0.7016 | nan | 0.0 | 0.0 | 0.5653 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.4883 | 0.1861 | 0.1365 | 0.6975 |
| 1.523 | 3.0 | 321 | 0.0 | 0.8975 | 0.0 | 0.0 | 0.0 | nan | 0.0009 | 0.0 | 0.0003 | 0.5309 | 0.0063 | nan | 0.4954 | 0.9432 | 0.0 | 0.0 | 0.8476 | 0.8378 | 0.0 | 0.0 | 0.0 | 0.7705 | nan | 0.0 | 0.0 | 0.8567 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5155 | 0.0 | 0.0 | 0.0 | nan | 0.0009 | 0.0 | 0.0003 | 0.4164 | 0.0062 | nan | 0.4161 | 0.7219 | 0.0 | 0.0 | 0.5408 | 0.6765 | 0.0 | 0.0 | 0.0 | 0.7594 | nan | 0.0 | 0.0 | 0.6132 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.2403 | 0.1934 | 0.1459 | 0.7123 |
| 1.2744 | 4.0 | 428 | 0.0 | 0.8602 | 0.0 | 0.0 | 0.0 | nan | 0.0009 | 0.0 | 0.0015 | 0.4753 | 0.0069 | nan | 0.3731 | 0.9792 | 0.0 | 0.0 | 0.7062 | 0.8948 | 0.0 | 0.0 | 0.0 | 0.7488 | nan | 0.0 | 0.0 | 0.8857 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5565 | 0.0 | 0.0 | 0.0 | nan | 0.0009 | 0.0 | 0.0015 | 0.4431 | 0.0068 | nan | 0.3413 | 0.6728 | 0.0 | 0.0 | 0.5473 | 0.6788 | 0.0 | 0.0 | 0.0 | 0.7389 | nan | 0.0 | 0.0 | 0.6552 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.1870 | 0.1854 | 0.1451 | 0.7068 |
| 1.1579 | 5.0 | 535 | 0.0 | 0.7388 | 0.0 | 0.0 | 0.0 | nan | 0.0008 | 0.0 | 0.0040 | 0.6937 | 0.0681 | nan | 0.5908 | 0.9639 | 0.0 | 0.0 | 0.5152 | 0.9429 | 0.0 | 0.0 | 0.0 | 0.8365 | nan | 0.0 | 0.0 | 0.9525 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5687 | 0.0 | 0.0 | 0.0 | nan | 0.0008 | 0.0 | 0.0039 | 0.5783 | 0.0606 | nan | 0.4884 | 0.7434 | 0.0 | 0.0 | 0.4397 | 0.6660 | 0.0 | 0.0 | 0.0 | 0.8076 | nan | 0.0 | 0.0 | 0.5868 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 1.0435 | 0.1971 | 0.1545 | 0.7340 |
| 1.0928 | 6.0 | 642 | 0.0 | 0.8126 | 0.0 | 0.0 | 0.0 | nan | 0.0127 | 0.1193 | 0.0326 | 0.7981 | 0.1432 | nan | 0.6767 | 0.9152 | 0.0 | 0.0 | 0.8393 | 0.8990 | 0.0115 | 0.0 | 0.0 | 0.8664 | nan | 0.0 | 0.0 | 0.9427 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0048 | 0.0 | 0.0 | 0.6031 | 0.0 | 0.0 | 0.0 | nan | 0.0126 | 0.1193 | 0.0298 | 0.6282 | 0.1206 | nan | 0.5205 | 0.7688 | 0.0 | 0.0 | 0.6037 | 0.6827 | 0.0113 | 0.0 | 0.0 | 0.8312 | nan | 0.0 | 0.0 | 0.5963 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0047 | 0.0 | 0.9777 | 0.2211 | 0.1729 | 0.7531 |
| 1.0371 | 7.0 | 749 | 0.0 | 0.8108 | 0.0 | 0.0 | 0.0 | nan | 0.0145 | 0.2878 | 0.0499 | 0.7673 | 0.1179 | nan | 0.5506 | 0.9510 | 0.0 | 0.0 | 0.8458 | 0.8788 | 0.0158 | 0.0 | 0.0 | 0.8125 | nan | 0.0 | 0.0 | 0.9351 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0032 | 0.0 | 0.0 | 0.5687 | 0.0 | 0.0 | 0.0 | nan | 0.0143 | 0.2871 | 0.0416 | 0.5650 | 0.1067 | nan | 0.4769 | 0.7722 | 0.0 | 0.0 | 0.5986 | 0.6729 | 0.0154 | 0.0 | 0.0 | 0.7949 | nan | 0.0 | 0.0 | 0.5910 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0032 | 0.0 | 0.9290 | 0.2200 | 0.1722 | 0.7457 |
| 0.9645 | 8.0 | 856 | 0.0 | 0.8913 | 0.0 | 0.0 | 0.0 | nan | 0.0530 | 0.3879 | 0.1304 | 0.8027 | 0.1244 | nan | 0.5733 | 0.9459 | 0.0 | 0.0 | 0.8434 | 0.8598 | 0.1344 | 0.0 | 0.0 | 0.8596 | nan | 0.0 | 0.0 | 0.9192 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0196 | 0.0 | 0.0 | 0.5899 | 0.0 | 0.0 | 0.0 | nan | 0.0518 | 0.3362 | 0.0872 | 0.6482 | 0.1137 | nan | 0.4887 | 0.7610 | 0.0 | 0.0 | 0.6153 | 0.7148 | 0.1144 | 0.0 | 0.0 | 0.8278 | nan | 0.0 | 0.0 | 0.6957 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0192 | 0.0 | 0.8855 | 0.2358 | 0.1895 | 0.7593 |
| 0.9171 | 9.0 | 963 | 0.0 | 0.8681 | 0.0 | 0.0 | 0.0 | nan | 0.2267 | 0.2895 | 0.1798 | 0.7741 | 0.2153 | nan | 0.6580 | 0.9264 | 0.0009 | 0.0 | 0.7788 | 0.8887 | 0.1800 | 0.0 | 0.0 | 0.8648 | nan | 0.0 | 0.0 | 0.9422 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0689 | 0.0 | 0.0 | 0.6112 | 0.0 | 0.0 | 0.0 | nan | 0.2013 | 0.2859 | 0.1173 | 0.6393 | 0.1769 | nan | 0.5251 | 0.7761 | 0.0009 | 0.0 | 0.6220 | 0.7328 | 0.1391 | 0.0 | 0.0 | 0.8329 | nan | 0.0 | 0.0 | 0.6550 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0622 | 0.0 | 0.8439 | 0.2457 | 0.1993 | 0.7676 |
| 0.8373 | 10.0 | 1070 | 0.0 | 0.8391 | 0.0 | 0.0000 | 0.0 | nan | 0.4409 | 0.3294 | 0.1364 | 0.7858 | 0.1023 | nan | 0.6096 | 0.9644 | 0.0756 | 0.0 | 0.6853 | 0.8993 | 0.1614 | 0.0 | 0.0 | 0.8876 | nan | 0.0 | 0.0 | 0.9315 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0874 | 0.0 | 0.0 | 0.6203 | 0.0 | 0.0000 | 0.0 | nan | 0.2914 | 0.3283 | 0.1050 | 0.6096 | 0.0951 | nan | 0.5427 | 0.7678 | 0.0740 | 0.0 | 0.5665 | 0.7403 | 0.1321 | 0.0 | 0.0 | 0.8500 | nan | 0.0 | 0.0 | 0.6756 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0767 | 0.0 | 0.8317 | 0.2480 | 0.2024 | 0.7710 |
| 0.8375 | 11.0 | 1177 | 0.0 | 0.8248 | 0.0 | 0.0000 | 0.0 | nan | 0.3739 | 0.3951 | 0.2834 | 0.7626 | 0.1777 | nan | 0.4734 | 0.9515 | 0.1276 | 0.0 | 0.7447 | 0.9010 | 0.1872 | 0.0 | 0.0 | 0.9018 | nan | 0.0 | 0.0 | 0.9378 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0591 | 0.0 | 0.0 | 0.6017 | 0.0 | 0.0000 | 0.0 | nan | 0.2379 | 0.3570 | 0.1503 | 0.6432 | 0.1533 | nan | 0.4411 | 0.7743 | 0.1234 | 0.0 | 0.5987 | 0.7041 | 0.1362 | 0.0 | 0.0 | 0.8576 | nan | 0.0 | 0.0 | 0.6553 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0518 | 0.0 | 0.8539 | 0.2532 | 0.2027 | 0.7577 |
| 0.8014 | 12.0 | 1284 | 0.0 | 0.8213 | 0.0 | 0.0002 | 0.0 | nan | 0.4219 | 0.5045 | 0.3125 | 0.8556 | 0.2246 | nan | 0.6546 | 0.8896 | 0.2522 | 0.0 | 0.7563 | 0.9184 | 0.2091 | 0.0 | 0.0 | 0.8852 | nan | 0.0 | 0.0 | 0.9338 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1150 | 0.0 | 0.0 | 0.6244 | 0.0 | 0.0002 | 0.0 | nan | 0.2819 | 0.4181 | 0.1371 | 0.5936 | 0.1892 | nan | 0.5497 | 0.7848 | 0.2332 | 0.0 | 0.6418 | 0.7339 | 0.1582 | 0.0 | 0.0 | 0.8537 | nan | 0.0 | 0.0 | 0.6887 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0936 | 0.0 | 0.7821 | 0.2736 | 0.2182 | 0.7698 |
| 0.7598 | 13.0 | 1391 | 0.0 | 0.7520 | 0.0 | 0.0 | 0.0 | nan | 0.5035 | 0.5241 | 0.2865 | 0.8708 | 0.1666 | nan | 0.6404 | 0.8870 | 0.2805 | 0.0 | 0.7662 | 0.9230 | 0.3694 | 0.0 | 0.0 | 0.8932 | nan | 0.0 | 0.0 | 0.9492 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2009 | 0.0 | 0.0 | 0.6246 | 0.0 | 0.0 | 0.0 | nan | 0.3111 | 0.4894 | 0.1504 | 0.5451 | 0.1555 | nan | 0.5227 | 0.7890 | 0.2569 | 0.0 | 0.6171 | 0.7275 | 0.1555 | 0.0 | 0.0 | 0.8569 | nan | 0.0 | 0.0 | 0.6889 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1265 | 0.0 | 0.7959 | 0.2817 | 0.2193 | 0.7653 |
| 0.7333 | 14.0 | 1498 | 0.0 | 0.7852 | 0.0 | 0.0005 | 0.0 | nan | 0.6099 | 0.5852 | 0.3890 | 0.8211 | 0.2961 | nan | 0.6321 | 0.9313 | 0.3684 | 0.0 | 0.6342 | 0.9311 | 0.2435 | 0.0 | 0.0 | 0.8845 | nan | 0.0 | 0.0 | 0.9298 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1712 | 0.0 | 0.0 | 0.6312 | 0.0 | 0.0005 | 0.0 | nan | 0.2920 | 0.4813 | 0.1830 | 0.6730 | 0.2504 | nan | 0.5405 | 0.8112 | 0.3183 | 0.0 | 0.5574 | 0.7360 | 0.1553 | 0.0 | 0.0 | 0.8543 | nan | 0.0 | 0.0 | 0.7520 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1219 | 0.0 | 0.7463 | 0.2879 | 0.2300 | 0.7815 |
| 0.7128 | 15.0 | 1605 | 0.0 | 0.7547 | 0.0 | 0.0126 | 0.0 | nan | 0.6715 | 0.6477 | 0.2623 | 0.8694 | 0.1131 | 0.0 | 0.7576 | 0.9015 | 0.5131 | 0.0 | 0.8870 | 0.8915 | 0.3275 | 0.0 | 0.0 | 0.9177 | nan | 0.0008 | 0.0 | 0.9290 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2520 | 0.0 | 0.0 | 0.5980 | 0.0 | 0.0126 | 0.0 | nan | 0.4000 | 0.3362 | 0.1721 | 0.4706 | 0.1069 | 0.0 | 0.6593 | 0.8212 | 0.2914 | 0.0 | 0.6797 | 0.7574 | 0.1981 | 0.0 | 0.0 | 0.8704 | nan | 0.0008 | 0.0 | 0.6431 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1881 | 0.0 | 0.7557 | 0.2942 | 0.2184 | 0.7786 |
| 0.6885 | 16.0 | 1712 | 0.0 | 0.8416 | 0.0 | 0.0086 | 0.0 | nan | 0.5907 | 0.7737 | 0.3100 | 0.7765 | 0.1341 | 0.0 | 0.6753 | 0.9522 | 0.5143 | 0.0 | 0.8466 | 0.8795 | 0.2986 | 0.0 | 0.0 | 0.9155 | nan | 0.0071 | 0.0 | 0.9178 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3074 | 0.0 | 0.0 | 0.6078 | 0.0 | 0.0086 | 0.0 | nan | 0.4106 | 0.3222 | 0.1815 | 0.6082 | 0.1171 | 0.0 | 0.6206 | 0.8253 | 0.2609 | 0.0 | 0.6832 | 0.7692 | 0.1957 | 0.0 | 0.0 | 0.8691 | nan | 0.0071 | 0.0 | 0.6951 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2366 | 0.0 | 0.7262 | 0.2954 | 0.2248 | 0.7882 |
| 0.6627 | 17.0 | 1819 | 0.0 | 0.7096 | 0.0 | 0.0181 | 0.0 | nan | 0.7189 | 0.6110 | 0.3654 | 0.8153 | 0.1210 | 0.0 | 0.7156 | 0.9114 | 0.5562 | 0.0 | 0.8788 | 0.9226 | 0.3042 | 0.0 | 0.0 | 0.9273 | nan | 0.0002 | 0.0 | 0.9080 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3069 | 0.0 | 0.0 | 0.5809 | 0.0 | 0.0179 | 0.0 | nan | 0.3488 | 0.3724 | 0.2149 | 0.5069 | 0.1137 | 0.0 | 0.6477 | 0.8079 | 0.2559 | 0.0 | 0.7100 | 0.7595 | 0.1837 | 0.0 | 0.0 | 0.8734 | nan | 0.0002 | 0.0 | 0.7016 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2201 | 0.0 | 0.7429 | 0.2967 | 0.2217 | 0.7786 |
| 0.6954 | 18.0 | 1926 | 0.0 | 0.8919 | 0.0 | 0.0031 | 0.0 | nan | 0.5763 | 0.5167 | 0.3013 | 0.7439 | 0.1958 | 0.0 | 0.7281 | 0.9530 | 0.4080 | 0.0 | 0.8497 | 0.8852 | 0.2874 | 0.0 | 0.0 | 0.8563 | nan | 0.0056 | 0.0 | 0.9222 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3154 | 0.0 | 0.0 | 0.5730 | 0.0 | 0.0031 | 0.0 | nan | 0.3625 | 0.4887 | 0.1980 | 0.6038 | 0.1714 | 0.0 | 0.6684 | 0.8291 | 0.2599 | 0.0 | 0.7176 | 0.7922 | 0.2045 | 0.0 | 0.0 | 0.8322 | nan | 0.0056 | 0.0 | 0.6432 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2459 | 0.0 | 0.6984 | 0.2861 | 0.2303 | 0.7947 |
| 0.6592 | 19.0 | 2033 | 0.0 | 0.8433 | 0.0 | 0.0496 | 0.0 | nan | 0.5622 | 0.6415 | 0.3618 | 0.7738 | 0.1797 | 0.0 | 0.6474 | 0.9741 | 0.6289 | 0.0 | 0.6784 | 0.9279 | 0.3132 | 0.0 | 0.0 | 0.8985 | nan | 0.0019 | 0.0 | 0.9235 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2431 | 0.0 | 0.0 | 0.6155 | 0.0 | 0.0493 | 0.0 | nan | 0.3959 | 0.5424 | 0.2210 | 0.6568 | 0.1504 | 0.0 | 0.6217 | 0.8227 | 0.2586 | 0.0 | 0.6198 | 0.7658 | 0.2117 | 0.0 | 0.0 | 0.8686 | nan | 0.0019 | 0.0 | 0.6541 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1919 | 0.0 | 0.6999 | 0.2924 | 0.2318 | 0.7924 |
| 0.6682 | 20.0 | 2140 | 0.0 | 0.8071 | 0.0 | 0.0796 | 0.0 | nan | 0.5870 | 0.4899 | 0.4985 | 0.7638 | 0.2075 | 0.0 | 0.7505 | 0.9346 | 0.6505 | 0.0 | 0.8297 | 0.9187 | 0.3668 | 0.0 | 0.0 | 0.9157 | nan | 0.0082 | 0.0 | 0.9407 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2163 | 0.0 | 0.0 | 0.6144 | 0.0 | 0.0748 | 0.0 | nan | 0.3846 | 0.4807 | 0.2584 | 0.6083 | 0.1892 | 0.0 | 0.6719 | 0.8371 | 0.2436 | 0.0 | 0.7173 | 0.7842 | 0.1994 | 0.0 | 0.0 | 0.8798 | nan | 0.0082 | 0.0 | 0.6331 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1719 | 0.0 | 0.6756 | 0.3020 | 0.2351 | 0.7976 |
| 0.6249 | 21.0 | 2247 | 0.6678 | 0.2540 | 0.3195 | 0.7981 | nan | 0.6625 | 0.9563 | 0.8027 | 0.7398 | 0.1695 | 0.0 | 0.4050 | 0.7541 | 0.0 | 0.9306 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0473 | nan | 0.0 | 0.8526 | 0.0 | 0.6384 | 0.1242 | 0.0 | nan | 0.0 | 0.3671 | 0.0 | 0.0 | 0.9185 | 0.7725 | 0.8706 | 0.0 | 0.0 | 0.2129 | 0.0 | nan | 0.5746 | 0.8111 | 0.7593 | 0.5842 | 0.1557 | 0.0 | 0.2176 | 0.3250 | 0.0 | 0.7386 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0473 | nan | 0.0 | 0.6693 | 0.0 | 0.3844 | 0.1188 | 0.0 | nan | 0.0 | 0.2479 | 0.0 | 0.0 | 0.7914 | 0.7105 | 0.8285 | 0.0 | 0.0 | 0.1638 | 0.0 |
| 0.6278 | 22.0 | 2354 | 0.6800 | 0.2513 | 0.3216 | 0.7949 | nan | 0.6354 | 0.9558 | 0.8656 | 0.7557 | 0.1401 | 0.0 | 0.4619 | 0.6943 | 0.0 | 0.9333 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0315 | nan | 0.0 | 0.8031 | 0.0 | 0.6422 | 0.1074 | 0.0 | nan | 0.0 | 0.4139 | 0.0 | 0.0 | 0.9114 | 0.8658 | 0.8302 | 0.0 | 0.0 | 0.2446 | 0.0 | nan | 0.5527 | 0.8215 | 0.7864 | 0.5887 | 0.1346 | 0.0 | 0.2336 | 0.3191 | 0.0 | 0.7265 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0315 | nan | 0.0 | 0.6458 | 0.0 | 0.3638 | 0.1048 | 0.0 | nan | 0.0 | 0.2338 | 0.0 | 0.0 | 0.7831 | 0.7282 | 0.8001 | 0.0 | 0.0 | 0.1868 | 0.0 |
| 0.6375 | 23.0 | 2461 | 0.6680 | 0.2563 | 0.3186 | 0.7976 | nan | 0.6355 | 0.9595 | 0.8844 | 0.6403 | 0.2228 | 0.0 | 0.3772 | 0.5620 | 0.0 | 0.9094 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0640 | nan | 0.0 | 0.8615 | 0.0 | 0.6510 | 0.1498 | 0.0 | nan | 0.0 | 0.3834 | 0.0 | 0.0 | 0.9024 | 0.8874 | 0.8627 | 0.0 | 0.0 | 0.2419 | 0.0 | nan | 0.5548 | 0.8086 | 0.7729 | 0.5236 | 0.2018 | 0.0 | 0.2287 | 0.3137 | 0.0 | 0.7398 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0634 | nan | 0.0 | 0.6603 | 0.0 | 0.3896 | 0.1381 | 0.0 | nan | 0.0 | 0.2666 | 0.0 | 0.0 | 0.7881 | 0.7394 | 0.8256 | 0.0 | 0.0 | 0.1871 | 0.0 |
| 0.6202 | 24.0 | 2568 | 0.6866 | 0.2618 | 0.3236 | 0.7961 | nan | 0.6075 | 0.9674 | 0.8360 | 0.6102 | 0.1879 | 0.0 | 0.4285 | 0.5972 | 0.0 | 0.9180 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1500 | nan | 0.0 | 0.8830 | 0.0 | 0.6661 | 0.1963 | 0.0 | nan | 0.0 | 0.4180 | 0.0 | 0.0 | 0.8918 | 0.8483 | 0.8660 | 0.0 | 0.0 | 0.2840 | 0.0 | nan | 0.5428 | 0.7997 | 0.7679 | 0.5062 | 0.1644 | 0.0 | 0.2289 | 0.3309 | 0.0 | 0.7596 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1473 | nan | 0.0 | 0.6670 | 0.0 | 0.4004 | 0.1767 | 0.0 | nan | 0.0 | 0.2836 | 0.0 | 0.0 | 0.8076 | 0.7619 | 0.8236 | 0.0 | 0.0 | 0.2079 | 0.0 |
| 0.5627 | 25.0 | 2675 | 0.6950 | 0.2551 | 0.3248 | 0.7883 | nan | 0.6233 | 0.9526 | 0.7145 | 0.7187 | 0.1813 | 0.0 | 0.3959 | 0.7039 | 0.0 | 0.9160 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1183 | nan | 0.0 | 0.8342 | 0.0 | 0.5499 | 0.2476 | 0.0 | nan | 0.0 | 0.4821 | 0.0 | 0.0 | 0.8725 | 0.8618 | 0.8633 | 0.0 | 0.0 | 0.3577 | 0.0 | nan | 0.5503 | 0.7925 | 0.6705 | 0.5845 | 0.1689 | 0.0 | 0.2198 | 0.3385 | 0.0 | 0.7322 | 0.0 | 0.0 | 0.0 | 0.0 | 0.1174 | nan | 0.0 | 0.6527 | 0.0 | 0.3227 | 0.2119 | 0.0 | nan | 0.0 | 0.2422 | 0.0 | 0.0 | 0.7923 | 0.7260 | 0.8255 | 0.0 | 0.0 | 0.2167 | 0.0 |
| 0.5623 | 26.0 | 2782 | 0.6558 | 0.2686 | 0.3385 | 0.8010 | nan | 0.6338 | 0.9493 | 0.8134 | 0.7256 | 0.1979 | 0.0 | 0.4685 | 0.7518 | 0.0 | 0.9364 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2286 | nan | 0.0 | 0.8577 | 0.0 | 0.5809 | 0.2585 | 0.0 | nan | 0.0 | 0.4459 | 0.0 | 0.0 | 0.8951 | 0.8978 | 0.8844 | 0.0 | 0.0192 | 0.2882 | 0.0 | nan | 0.5476 | 0.8200 | 0.7429 | 0.5770 | 0.1837 | 0.0 | 0.2364 | 0.3743 | 0.0 | 0.7396 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2160 | nan | 0.0 | 0.6671 | 0.0 | 0.3646 | 0.2093 | 0.0 | nan | 0.0 | 0.2863 | 0.0 | 0.0 | 0.8023 | 0.7446 | 0.8423 | 0.0 | 0.0185 | 0.2213 | 0.0 |
| 0.5882 | 27.0 | 2889 | 0.6416 | 0.2680 | 0.3280 | 0.8106 | nan | 0.7809 | 0.9232 | 0.8840 | 0.6978 | 0.2374 | 0.0 | 0.4869 | 0.4140 | 0.0 | 0.9242 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2349 | nan | 0.0 | 0.8828 | 0.0 | 0.4518 | 0.2084 | 0.0 | nan | 0.0 | 0.3889 | 0.0 | 0.0 | 0.9206 | 0.8679 | 0.8908 | 0.0 | 0.0 | 0.3012 | 0.0 | nan | 0.6265 | 0.8391 | 0.7529 | 0.6005 | 0.2168 | 0.0 | 0.2675 | 0.2729 | 0.0 | 0.7130 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2226 | nan | 0.0 | 0.6384 | 0.0 | 0.3296 | 0.1915 | 0.0 | nan | 0.0 | 0.2781 | 0.0 | 0.0 | 0.7946 | 0.7640 | 0.8488 | 0.0 | 0.0 | 0.2194 | 0.0 |
| 0.583 | 28.0 | 2996 | 0.6491 | 0.2734 | 0.3417 | 0.8046 | nan | 0.6541 | 0.9605 | 0.8786 | 0.7598 | 0.1411 | 0.0 | 0.4900 | 0.6147 | 0.0 | 0.9432 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3777 | nan | 0.0 | 0.8500 | 0.0 | 0.6605 | 0.2360 | 0.0 | nan | 0.0 | 0.4016 | 0.0 | 0.0 | 0.8786 | 0.8680 | 0.8514 | 0.0 | 0.0716 | 0.2973 | 0.0 | nan | 0.5775 | 0.8311 | 0.7770 | 0.5680 | 0.1357 | 0.0 | 0.2297 | 0.3515 | 0.0 | 0.7436 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3387 | nan | 0.0 | 0.6728 | 0.0 | 0.3790 | 0.2067 | 0.0 | nan | 0.0 | 0.2924 | 0.0 | 0.0 | 0.7950 | 0.7335 | 0.8178 | 0.0 | 0.0647 | 0.2332 | 0.0 |
| 0.5399 | 29.0 | 3103 | 0.6503 | 0.2714 | 0.3437 | 0.8027 | nan | 0.7145 | 0.9360 | 0.8554 | 0.7869 | 0.1668 | 0.0 | 0.4411 | 0.6746 | 0.0 | 0.9579 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3204 | nan | 0.0 | 0.7367 | 0.0 | 0.5891 | 0.2639 | 0.0 | nan | 0.0 | 0.4256 | 0.0 | 0.0 | 0.9170 | 0.9052 | 0.9104 | 0.0 | 0.0836 | 0.3133 | 0.0 | nan | 0.5941 | 0.8288 | 0.7852 | 0.5776 | 0.1580 | 0.0 | 0.2699 | 0.3237 | 0.0 | 0.6720 | 0.0 | 0.0 | 0.0 | 0.0 | 0.2925 | nan | 0.0 | 0.6494 | 0.0 | 0.3454 | 0.2215 | 0.0 | nan | 0.0 | 0.2747 | 0.0 | 0.0 | 0.7852 | 0.7457 | 0.8558 | 0.0 | 0.0774 | 0.2273 | 0.0 |
| 0.5293 | 30.0 | 3210 | 0.6663 | 0.2713 | 0.3395 | 0.8042 | nan | 0.7217 | 0.9318 | 0.8745 | 0.8165 | 0.1842 | 0.0 | 0.3759 | 0.7404 | 0.0 | 0.9308 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3370 | nan | 0.0 | 0.8642 | 0.0 | 0.5393 | 0.2070 | 0.0 | nan | 0.0 | 0.3817 | 0.0 | 0.0 | 0.9030 | 0.7994 | 0.8605 | 0.0 | 0.0136 | 0.3816 | 0.0 | nan | 0.6056 | 0.8248 | 0.7837 | 0.5368 | 0.1772 | 0.0 | 0.2484 | 0.3753 | 0.0 | 0.7504 | 0.0 | 0.0 | 0.0 | 0.0 | 0.3106 | nan | 0.0 | 0.6453 | 0.0 | 0.3263 | 0.1887 | 0.0 | nan | 0.0 | 0.2868 | 0.0 | 0.0 | 0.7993 | 0.7363 | 0.8267 | 0.0 | 0.0130 | 0.2477 | 0.0 |
| 0.5507 | 31.0 | 3317 | 0.6914 | 0.2660 | 0.3290 | 0.7919 | nan | 0.6185 | 0.9644 | 0.6731 | 0.6413 | 0.1576 | 0.0 | 0.3454 | 0.5530 | 0.0 | 0.9147 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5739 | nan | 0.0 | 0.8711 | 0.0 | 0.5920 | 0.3049 | 0.0 | nan | 0.0 | 0.4400 | 0.0 | 0.0 | 0.9047 | 0.7982 | 0.8196 | 0.0 | 0.0041 | 0.3518 | 0.0 | nan | 0.5435 | 0.7910 | 0.6258 | 0.5648 | 0.1434 | 0.0 | 0.2163 | 0.3586 | 0.0 | 0.7603 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4199 | nan | 0.0 | 0.6568 | 0.0 | 0.3419 | 0.2427 | 0.0 | nan | 0.0 | 0.2974 | 0.0 | 0.0 | 0.8016 | 0.7234 | 0.7915 | 0.0 | 0.0040 | 0.2283 | 0.0 |
| 0.5602 | 32.0 | 3424 | 0.6411 | 0.2802 | 0.3472 | 0.8101 | nan | 0.6883 | 0.9485 | 0.8664 | 0.7639 | 0.1489 | 0.0 | 0.5011 | 0.6326 | 0.0 | 0.9104 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5617 | nan | 0.0 | 0.8921 | 0.0 | 0.6268 | 0.2051 | 0.0 | nan | 0.0 | 0.3632 | 0.0 | 0.0 | 0.8960 | 0.8552 | 0.8981 | 0.0 | 0.0221 | 0.3290 | 0.0 | nan | 0.5877 | 0.8330 | 0.7807 | 0.5591 | 0.1386 | 0.0 | 0.2813 | 0.3887 | 0.0 | 0.7831 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4225 | nan | 0.0 | 0.6645 | 0.0 | 0.3730 | 0.1864 | 0.0 | nan | 0.0 | 0.2938 | 0.0 | 0.0 | 0.8000 | 0.7455 | 0.8533 | 0.0 | 0.0216 | 0.2553 | 0.0 |
| 0.5403 | 33.0 | 3531 | 0.6642 | 0.2729 | 0.3431 | 0.8017 | nan | 0.7235 | 0.9123 | 0.8745 | 0.7791 | 0.1617 | 0.0 | 0.4874 | 0.5172 | 0.0 | 0.9381 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5344 | nan | 0.0 | 0.8467 | 0.0 | 0.6245 | 0.1614 | 0.0 | nan | 0.0 | 0.4356 | 0.0 | 0.0 | 0.9141 | 0.8488 | 0.9075 | 0.0 | 0.0052 | 0.3063 | 0.0 | nan | 0.5819 | 0.8258 | 0.7765 | 0.5111 | 0.1504 | 0.0 | 0.2836 | 0.3475 | 0.0 | 0.7294 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4025 | nan | 0.0 | 0.6638 | 0.0 | 0.3659 | 0.1505 | 0.0 | nan | 0.0 | 0.3046 | 0.0 | 0.0 | 0.7944 | 0.7435 | 0.8602 | 0.0 | 0.0052 | 0.2349 | 0.0 |
| 0.5168 | 34.0 | 3638 | 0.6402 | 0.2810 | 0.3485 | 0.8095 | nan | 0.7201 | 0.9345 | 0.8740 | 0.7414 | 0.1833 | 0.0 | 0.5538 | 0.5357 | 0.0 | 0.9369 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5640 | nan | 0.0 | 0.8776 | 0.0 | 0.5961 | 0.2626 | 0.0 | nan | 0.0 | 0.4488 | 0.0 | 0.0 | 0.9137 | 0.7841 | 0.8616 | 0.0 | 0.0 | 0.3650 | 0.0 | nan | 0.5901 | 0.8362 | 0.7926 | 0.6243 | 0.1652 | 0.0 | 0.2893 | 0.3653 | 0.0 | 0.7485 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4192 | nan | 0.0 | 0.6649 | 0.0 | 0.3752 | 0.2284 | 0.0 | nan | 0.0 | 0.3013 | 0.0 | 0.0 | 0.7971 | 0.7158 | 0.8280 | 0.0 | 0.0 | 0.2491 | 0.0 |
| 0.522 | 35.0 | 3745 | 0.6674 | 0.2743 | 0.3458 | 0.8002 | nan | 0.5916 | 0.9608 | 0.8505 | 0.7896 | 0.1387 | 0.0 | 0.4421 | 0.7247 | 0.0 | 0.9421 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5275 | nan | 0.0 | 0.8349 | 0.0 | 0.5652 | 0.1952 | 0.0 | nan | 0.0 | 0.4814 | 0.0 | 0.0 | 0.9081 | 0.8478 | 0.8898 | 0.0 | 0.0069 | 0.3697 | 0.0 | nan | 0.5251 | 0.8163 | 0.7812 | 0.5692 | 0.1306 | 0.0 | 0.2611 | 0.3743 | 0.0 | 0.7538 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4358 | nan | 0.0 | 0.6717 | 0.0 | 0.3549 | 0.1812 | 0.0 | nan | 0.0 | 0.2812 | 0.0 | 0.0 | 0.7991 | 0.7471 | 0.8535 | 0.0 | 0.0068 | 0.2358 | 0.0 |
| 0.4947 | 36.0 | 3852 | 0.6619 | 0.2752 | 0.3503 | 0.7991 | nan | 0.6020 | 0.9553 | 0.6755 | 0.7710 | 0.2239 | 0.0 | 0.5168 | 0.6551 | 0.0 | 0.9349 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6691 | nan | 0.0 | 0.8095 | 0.0 | 0.7100 | 0.1976 | 0.0 | nan | 0.0 | 0.4787 | 0.0 | 0.0 | 0.8903 | 0.8914 | 0.8668 | 0.0 | 0.0007 | 0.3623 | 0.0 | nan | 0.5291 | 0.8115 | 0.6361 | 0.5873 | 0.1919 | 0.0 | 0.2904 | 0.4117 | 0.0 | 0.7803 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4348 | nan | 0.0 | 0.6702 | 0.0 | 0.3617 | 0.1812 | 0.0 | nan | 0.0 | 0.2947 | 0.0 | 0.0 | 0.8036 | 0.7365 | 0.8339 | 0.0 | 0.0007 | 0.2507 | 0.0 |
| 0.5073 | 37.0 | 3959 | 0.6782 | 0.2792 | 0.3508 | 0.8019 | nan | 0.6843 | 0.9206 | 0.8269 | 0.7932 | 0.2000 | 0.0 | 0.5293 | 0.6061 | 0.0 | 0.9381 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6202 | nan | 0.0 | 0.8888 | 0.0 | 0.6030 | 0.2416 | 0.0 | nan | 0.0 | 0.3985 | 0.0 | 0.0 | 0.8823 | 0.8329 | 0.8918 | 0.0 | 0.0 | 0.3687 | 0.0 | nan | 0.5649 | 0.8204 | 0.7692 | 0.5226 | 0.1828 | 0.0 | 0.3027 | 0.4019 | 0.0 | 0.7543 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4303 | nan | 0.0 | 0.6624 | 0.0 | 0.3595 | 0.2136 | 0.0 | nan | 0.0 | 0.2976 | 0.0 | 0.0 | 0.8008 | 0.7378 | 0.8484 | 0.0 | 0.0 | 0.2667 | 0.0 |
| 0.4788 | 38.0 | 4066 | 0.6694 | 0.2768 | 0.3467 | 0.8020 | nan | 0.6894 | 0.9371 | 0.8519 | 0.7659 | 0.2090 | 0.0 | 0.4494 | 0.5935 | 0.0 | 0.9331 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6390 | nan | 0.0 | 0.9029 | 0.0 | 0.4947 | 0.2279 | 0.0 | nan | 0.0 | 0.4255 | 0.0 | 0.0 | 0.8438 | 0.8985 | 0.8365 | 0.0 | 0.0 | 0.3976 | 0.0 | nan | 0.5567 | 0.8293 | 0.7865 | 0.5419 | 0.1959 | 0.0 | 0.2915 | 0.3960 | 0.0 | 0.7643 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4261 | nan | 0.0 | 0.6474 | 0.0 | 0.3359 | 0.1974 | 0.0 | nan | 0.0 | 0.3047 | 0.0 | 0.0 | 0.7876 | 0.7159 | 0.8095 | 0.0 | 0.0 | 0.2724 | 0.0 |
| 0.4627 | 39.0 | 4173 | 0.6439 | 0.2840 | 0.3563 | 0.8069 | nan | 0.6652 | 0.9293 | 0.8861 | 0.7534 | 0.2398 | 0.0 | 0.5481 | 0.5694 | 0.0 | 0.9305 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6488 | nan | 0.0 | 0.8714 | 0.0 | 0.5817 | 0.3115 | 0.0 | nan | 0.0 | 0.4716 | 0.0 | 0.0 | 0.9060 | 0.8645 | 0.8991 | 0.0 | 0.0123 | 0.3128 | 0.0 | nan | 0.5453 | 0.8303 | 0.7889 | 0.5693 | 0.2107 | 0.0 | 0.3035 | 0.3784 | 0.0 | 0.7531 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4437 | nan | 0.0 | 0.6747 | 0.0 | 0.3647 | 0.2365 | 0.0 | nan | 0.0 | 0.3209 | 0.0 | 0.0 | 0.8070 | 0.7501 | 0.8626 | 0.0 | 0.0123 | 0.2376 | 0.0 |
| 0.4775 | 40.0 | 4280 | 0.6679 | 0.2808 | 0.3499 | 0.8051 | nan | 0.6127 | 0.9570 | 0.8742 | 0.8046 | 0.1980 | 0.0 | 0.4223 | 0.4104 | 0.0 | 0.8918 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7077 | nan | 0.0 | 0.8362 | 0.0 | 0.6999 | 0.3405 | 0.0 | nan | 0.0 | 0.4473 | 0.0 | 0.0 | 0.9272 | 0.7890 | 0.8870 | 0.0 | 0.0348 | 0.3578 | 0.0 | nan | 0.5307 | 0.8250 | 0.7915 | 0.5729 | 0.1789 | 0.0 | 0.2532 | 0.3154 | 0.0 | 0.7855 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4322 | nan | 0.0 | 0.6863 | 0.0 | 0.4071 | 0.2521 | 0.0 | nan | 0.0 | 0.3089 | 0.0 | 0.0 | 0.7975 | 0.7101 | 0.8518 | 0.0 | 0.0332 | 0.2520 | 0.0 |
| 0.4816 | 41.0 | 4387 | 0.6700 | 0.2812 | 0.3491 | 0.8060 | nan | 0.6497 | 0.9430 | 0.8488 | 0.7581 | 0.1492 | 0.0 | 0.5026 | 0.5415 | 0.0 | 0.9317 | 0.0 | 0.0 | 0.0 | 0.0 | 0.5586 | nan | 0.0 | 0.8655 | 0.0 | 0.6495 | 0.3284 | 0.0 | nan | 0.0 | 0.4062 | 0.0 | 0.0 | 0.9026 | 0.8756 | 0.9041 | 0.0 | 0.0154 | 0.3409 | 0.0 | nan | 0.5483 | 0.8245 | 0.7804 | 0.5613 | 0.1444 | 0.0 | 0.2941 | 0.3765 | 0.0 | 0.7657 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4309 | nan | 0.0 | 0.6812 | 0.0 | 0.3456 | 0.2526 | 0.0 | nan | 0.0 | 0.3020 | 0.0 | 0.0 | 0.8013 | 0.7384 | 0.8651 | 0.0 | 0.0147 | 0.2719 | 0.0 |
| 0.4643 | 42.0 | 4494 | 0.6465 | 0.2865 | 0.3603 | 0.8079 | nan | 0.6087 | 0.9460 | 0.8859 | 0.8411 | 0.2736 | 0.0 | 0.5016 | 0.5636 | 0.0 | 0.9311 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6503 | nan | 0.0 | 0.8152 | 0.0 | 0.6211 | 0.3064 | 0.0 | nan | 0.0 | 0.4719 | 0.0 | 0.0 | 0.9130 | 0.8643 | 0.8988 | 0.0 | 0.0386 | 0.3972 | 0.0 | nan | 0.5283 | 0.8363 | 0.7831 | 0.5893 | 0.2376 | 0.0 | 0.2835 | 0.3871 | 0.0 | 0.7808 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4435 | nan | 0.0 | 0.6630 | 0.0 | 0.3653 | 0.2468 | 0.0 | nan | 0.0 | 0.3230 | 0.0 | 0.0 | 0.8082 | 0.7553 | 0.8615 | 0.0 | 0.0352 | 0.2410 | 0.0 |
| 0.4758 | 43.0 | 4601 | 0.6531 | 0.2866 | 0.3573 | 0.8033 | nan | 0.6189 | 0.9384 | 0.8678 | 0.7635 | 0.2556 | 0.0 | 0.4631 | 0.5328 | 0.0 | 0.9354 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7078 | nan | 0.0 | 0.8840 | 0.0 | 0.5168 | 0.3159 | 0.0 | nan | 0.0 | 0.5012 | 0.0 | 0.0 | 0.9003 | 0.8435 | 0.8800 | 0.0 | 0.1130 | 0.3953 | 0.0 | nan | 0.5198 | 0.8118 | 0.7952 | 0.5642 | 0.2235 | 0.0 | 0.2833 | 0.3642 | 0.0 | 0.7845 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4597 | nan | 0.0 | 0.6755 | 0.0 | 0.3530 | 0.2604 | 0.0 | nan | 0.0 | 0.3225 | 0.0 | 0.0 | 0.8104 | 0.7326 | 0.8509 | 0.0 | 0.0804 | 0.2786 | 0.0 |
| 0.4682 | 44.0 | 4708 | 0.6534 | 0.2843 | 0.3584 | 0.8035 | nan | 0.6193 | 0.9309 | 0.8952 | 0.8209 | 0.2108 | 0.0 | 0.4880 | 0.5279 | 0.0 | 0.9208 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6156 | nan | 0.0 | 0.8474 | 0.0 | 0.6475 | 0.3017 | 0.0 | nan | 0.0 | 0.5203 | 0.0 | 0.0 | 0.9113 | 0.8445 | 0.9254 | 0.0 | 0.0324 | 0.4089 | 0.0 | nan | 0.5374 | 0.8204 | 0.7932 | 0.5268 | 0.1915 | 0.0 | 0.2784 | 0.3506 | 0.0 | 0.7789 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4807 | nan | 0.0 | 0.6878 | 0.0 | 0.3576 | 0.2551 | 0.0 | nan | 0.0 | 0.3135 | 0.0 | 0.0 | 0.8111 | 0.7485 | 0.8770 | 0.0 | 0.0241 | 0.2662 | 0.0 |
| 0.4807 | 45.0 | 4815 | 0.6325 | 0.2885 | 0.3653 | 0.8075 | nan | 0.6071 | 0.9223 | 0.8977 | 0.8564 | 0.3516 | 0.0 | 0.5039 | 0.5266 | 0.0 | 0.9433 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6309 | nan | 0.0 | 0.8390 | 0.0 | 0.5600 | 0.3684 | 0.0 | nan | 0.0 | 0.4760 | 0.0 | 0.0 | 0.9242 | 0.8477 | 0.9264 | 0.0 | 0.0706 | 0.4361 | 0.0 | nan | 0.5390 | 0.8355 | 0.7773 | 0.5424 | 0.2623 | 0.0 | 0.2809 | 0.3567 | 0.0 | 0.7695 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4435 | nan | 0.0 | 0.6957 | 0.0 | 0.3710 | 0.2746 | 0.0 | nan | 0.0 | 0.3253 | 0.0 | 0.0 | 0.8070 | 0.7405 | 0.8751 | 0.0 | 0.0603 | 0.2758 | 0.0 |
| 0.4611 | 46.0 | 4922 | 0.6577 | 0.2850 | 0.3588 | 0.8022 | nan | 0.6022 | 0.9292 | 0.8230 | 0.8449 | 0.2449 | 0.0 | 0.4479 | 0.5166 | 0.0 | 0.9396 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6521 | nan | 0.0 | 0.8516 | 0.0 | 0.7020 | 0.3122 | 0.0 | nan | 0.0 | 0.4822 | 0.0 | 0.0 | 0.9015 | 0.8642 | 0.9095 | 0.0 | 0.0737 | 0.3834 | 0.0 | nan | 0.5034 | 0.8172 | 0.7584 | 0.5407 | 0.2171 | 0.0 | 0.2684 | 0.3534 | 0.0 | 0.7740 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4392 | nan | 0.0 | 0.6942 | 0.0 | 0.3877 | 0.2651 | 0.0 | nan | 0.0 | 0.3266 | 0.0 | 0.0 | 0.8136 | 0.7528 | 0.8682 | 0.0 | 0.0684 | 0.2725 | 0.0 |
| 0.3966 | 47.0 | 5029 | 0.6749 | 0.2810 | 0.3530 | 0.7981 | nan | 0.5613 | 0.9379 | 0.7768 | 0.8262 | 0.2161 | 0.0 | 0.4333 | 0.4777 | 0.0 | 0.9410 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7133 | nan | 0.0 | 0.8766 | 0.0 | 0.7119 | 0.3548 | 0.0 | nan | 0.0 | 0.3871 | 0.0 | 0.0 | 0.9073 | 0.8326 | 0.8935 | 0.0 | 0.1104 | 0.3367 | 0.0 | nan | 0.4867 | 0.8241 | 0.7219 | 0.4978 | 0.1759 | 0.0 | 0.2653 | 0.3573 | 0.0 | 0.7854 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4599 | nan | 0.0 | 0.6910 | 0.0 | 0.3907 | 0.2610 | 0.0 | nan | 0.0 | 0.3009 | 0.0 | 0.0 | 0.8082 | 0.7419 | 0.8593 | 0.0 | 0.0926 | 0.2732 | 0.0 |
| 0.4672 | 48.0 | 5136 | 0.6660 | 0.2784 | 0.3546 | 0.8021 | nan | 0.7292 | 0.9096 | 0.8990 | 0.8135 | 0.1493 | 0.0 | 0.5230 | 0.5946 | 0.0 | 0.9526 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7375 | nan | 0.0 | 0.8687 | 0.0 | 0.4252 | 0.2353 | 0.0 | nan | 0.0 | 0.4237 | 0.0 | 0.0 | 0.8933 | 0.8270 | 0.9183 | 0.0 | 0.0817 | 0.3646 | 0.0 | nan | 0.5942 | 0.8232 | 0.8036 | 0.5377 | 0.1347 | 0.0 | 0.2647 | 0.3728 | 0.0 | 0.7137 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4297 | nan | 0.0 | 0.6601 | 0.0 | 0.2946 | 0.2127 | 0.0 | nan | 0.0 | 0.3151 | 0.0 | 0.0 | 0.8132 | 0.7409 | 0.8699 | 0.0 | 0.0613 | 0.2675 | 0.0 |
| 0.4622 | 49.0 | 5243 | 0.7150 | 0.2767 | 0.3475 | 0.7951 | nan | 0.6718 | 0.8870 | 0.8975 | 0.8901 | 0.1529 | 0.0 | 0.4453 | 0.5462 | 0.0 | 0.9481 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6509 | nan | 0.0 | 0.9012 | 0.0 | 0.5387 | 0.2114 | 0.0 | nan | 0.0 | 0.4295 | 0.0 | 0.0 | 0.9268 | 0.7997 | 0.9010 | 0.0 | 0.0350 | 0.2863 | 0.0 | nan | 0.5624 | 0.8117 | 0.7961 | 0.4364 | 0.1414 | 0.0 | 0.2702 | 0.3711 | 0.0 | 0.7633 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4582 | nan | 0.0 | 0.6590 | 0.0 | 0.3660 | 0.1902 | 0.0 | nan | 0.0 | 0.3246 | 0.0 | 0.0 | 0.8088 | 0.7464 | 0.8666 | 0.0 | 0.0332 | 0.2487 | 0.0 |
| 0.4145 | 50.0 | 5350 | 0.6807 | 0.2818 | 0.3565 | 0.8008 | nan | 0.6541 | 0.9143 | 0.8871 | 0.8536 | 0.1956 | 0.0 | 0.4524 | 0.5023 | 0.0 | 0.9266 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7110 | nan | 0.0 | 0.8771 | 0.0 | 0.6214 | 0.2828 | 0.0 | nan | 0.0 | 0.4773 | 0.0 | 0.0 | 0.9139 | 0.7822 | 0.9051 | 0.0 | 0.0741 | 0.3770 | 0.0 | nan | 0.5552 | 0.8258 | 0.7814 | 0.4837 | 0.1731 | 0.0 | 0.2705 | 0.3530 | 0.0 | 0.7938 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4623 | nan | 0.0 | 0.6836 | 0.0 | 0.3253 | 0.2389 | 0.0 | nan | 0.0 | 0.3381 | 0.0 | 0.0 | 0.8077 | 0.7193 | 0.8652 | 0.0 | 0.0640 | 0.2760 | 0.0 |
| 0.4544 | 51.0 | 5457 | 0.6710 | 0.2839 | 0.3635 | 0.8006 | nan | 0.6233 | 0.9087 | 0.9049 | 0.8695 | 0.2469 | 0.0 | 0.4528 | 0.5746 | 0.0 | 0.9279 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7524 | nan | 0.0 | 0.8690 | 0.0 | 0.5925 | 0.3026 | 0.0 | nan | 0.0 | 0.4862 | 0.0 | 0.0 | 0.9113 | 0.8522 | 0.9246 | 0.0 | 0.0797 | 0.3522 | 0.0 | nan | 0.5369 | 0.8237 | 0.7538 | 0.4608 | 0.2062 | 0.0 | 0.2692 | 0.3838 | 0.0 | 0.7862 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4326 | nan | 0.0 | 0.6889 | 0.0 | 0.3838 | 0.2423 | 0.0 | nan | 0.0 | 0.3336 | 0.0 | 0.0 | 0.8112 | 0.7403 | 0.8791 | 0.0 | 0.0742 | 0.2796 | 0.0 |
| 0.4084 | 52.0 | 5564 | 0.6546 | 0.2867 | 0.3640 | 0.8059 | nan | 0.6423 | 0.9216 | 0.8728 | 0.8610 | 0.1706 | 0.0 | 0.4997 | 0.5610 | 0.0 | 0.9239 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7156 | nan | 0.0 | 0.8634 | 0.0 | 0.6920 | 0.2740 | 0.0 | nan | 0.0 | 0.4887 | 0.0 | 0.0 | 0.9069 | 0.8889 | 0.9000 | 0.0 | 0.0903 | 0.3739 | 0.0 | nan | 0.5431 | 0.8278 | 0.7981 | 0.5189 | 0.1560 | 0.0 | 0.3024 | 0.3737 | 0.0 | 0.7986 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4567 | nan | 0.0 | 0.6880 | 0.0 | 0.3761 | 0.2251 | 0.0 | nan | 0.0 | 0.3343 | 0.0 | 0.0 | 0.8139 | 0.7548 | 0.8646 | 0.0 | 0.0756 | 0.2675 | 0.0 |
| 0.4475 | 53.0 | 5671 | 0.6712 | 0.2818 | 0.3527 | 0.8026 | nan | 0.6170 | 0.9199 | 0.9040 | 0.8414 | 0.2396 | 0.0 | 0.4268 | 0.4352 | 0.0 | 0.9374 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6281 | nan | 0.0 | 0.8676 | 0.0 | 0.6078 | 0.2969 | 0.0 | nan | 0.0 | 0.4899 | 0.0 | 0.0 | 0.9292 | 0.8389 | 0.9008 | 0.0 | 0.0345 | 0.3705 | 0.0 | nan | 0.5299 | 0.8287 | 0.7780 | 0.5304 | 0.1865 | 0.0 | 0.2782 | 0.3138 | 0.0 | 0.7708 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4800 | nan | 0.0 | 0.6830 | 0.0 | 0.3721 | 0.2416 | 0.0 | nan | 0.0 | 0.3294 | 0.0 | 0.0 | 0.7999 | 0.7324 | 0.8688 | 0.0 | 0.0287 | 0.2662 | 0.0 |
| 0.4077 | 54.0 | 5778 | 0.6743 | 0.2885 | 0.3600 | 0.8048 | nan | 0.5791 | 0.9423 | 0.8905 | 0.7810 | 0.2604 | 0.0 | 0.4610 | 0.5324 | 0.0 | 0.9467 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6770 | nan | 0.0 | 0.8826 | 0.0 | 0.5999 | 0.3432 | 0.0 | nan | 0.0 | 0.4846 | 0.0 | 0.0 | 0.9008 | 0.8470 | 0.9224 | 0.0 | 0.0643 | 0.4035 | 0.0 | nan | 0.5145 | 0.8210 | 0.8031 | 0.5666 | 0.1975 | 0.0 | 0.2818 | 0.3555 | 0.0 | 0.7569 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4759 | nan | 0.0 | 0.6745 | 0.0 | 0.3937 | 0.2589 | 0.0 | nan | 0.0 | 0.3445 | 0.0 | 0.0 | 0.8146 | 0.7552 | 0.8771 | 0.0 | 0.0526 | 0.2890 | 0.0 |
| 0.4334 | 55.0 | 5885 | 0.6318 | 0.2919 | 0.3684 | 0.8122 | nan | 0.6590 | 0.9261 | 0.8843 | 0.8552 | 0.2511 | 0.0 | 0.5269 | 0.6052 | 0.0 | 0.9416 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6763 | nan | 0.0 | 0.8438 | 0.0 | 0.6329 | 0.3218 | 0.0 | nan | 0.0 | 0.4795 | 0.0 | 0.0 | 0.9021 | 0.9073 | 0.9129 | 0.0 | 0.0510 | 0.4103 | 0.0 | nan | 0.5659 | 0.8404 | 0.7976 | 0.5330 | 0.2067 | 0.0 | 0.2976 | 0.3918 | 0.0 | 0.7881 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4634 | nan | 0.0 | 0.6961 | 0.0 | 0.4115 | 0.2632 | 0.0 | nan | 0.0 | 0.3295 | 0.0 | 0.0 | 0.8043 | 0.7360 | 0.8742 | 0.0 | 0.0446 | 0.2963 | 0.0 |
| 0.4379 | 56.0 | 5992 | 0.6688 | 0.2871 | 0.3580 | 0.8059 | nan | 0.5682 | 0.9473 | 0.8909 | 0.8684 | 0.1827 | 0.0 | 0.4078 | 0.5539 | 0.0 | 0.9361 | 0.0 | 0.0 | 0.0 | 0.0 | 0.6815 | nan | 0.0 | 0.8838 | 0.0 | 0.6820 | 0.3338 | 0.0 | nan | 0.0 | 0.4720 | 0.0 | 0.0 | 0.9061 | 0.8482 | 0.9133 | 0.0 | 0.0386 | 0.3415 | 0.0 | nan | 0.5081 | 0.8198 | 0.8017 | 0.5046 | 0.1626 | 0.0 | 0.2799 | 0.3793 | 0.0 | 0.7869 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4692 | nan | 0.0 | 0.6965 | 0.0 | 0.4161 | 0.2613 | 0.0 | nan | 0.0 | 0.3389 | 0.0 | 0.0 | 0.8176 | 0.7576 | 0.8727 | 0.0 | 0.0347 | 0.2792 | 0.0 |
| 0.4489 | 57.0 | 6099 | 0.6413 | 0.2898 | 0.3657 | 0.8118 | nan | 0.6336 | 0.9369 | 0.8978 | 0.8637 | 0.2405 | 0.0 | 0.4683 | 0.4792 | 0.0 | 0.9456 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7398 | nan | 0.0 | 0.8757 | 0.0 | 0.6220 | 0.3338 | 0.0 | nan | 0.0 | 0.5178 | 0.0 | 0.0 | 0.8798 | 0.8909 | 0.9242 | 0.0 | 0.0371 | 0.4152 | 0.0 | nan | 0.5641 | 0.8302 | 0.7988 | 0.5222 | 0.2052 | 0.0 | 0.2923 | 0.3509 | 0.0 | 0.7819 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4626 | nan | 0.0 | 0.7010 | 0.0 | 0.4040 | 0.2609 | 0.0 | nan | 0.0 | 0.3240 | 0.0 | 0.0 | 0.8147 | 0.7572 | 0.8780 | 0.0 | 0.0348 | 0.2924 | 0.0 |
| 0.4042 | 58.0 | 6206 | 0.6378 | 0.2905 | 0.3632 | 0.8141 | nan | 0.6889 | 0.9331 | 0.8987 | 0.8277 | 0.1904 | 0.0 | 0.4609 | 0.4760 | 0.0 | 0.9308 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7672 | nan | 0.0 | 0.8689 | 0.0 | 0.6552 | 0.3481 | 0.0 | nan | 0.0 | 0.4860 | 0.0 | 0.0 | 0.9232 | 0.8152 | 0.9071 | 0.0 | 0.0922 | 0.3534 | 0.0 | nan | 0.5797 | 0.8300 | 0.7955 | 0.5497 | 0.1802 | 0.0 | 0.3002 | 0.3608 | 0.0 | 0.7923 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4430 | nan | 0.0 | 0.7010 | 0.0 | 0.3912 | 0.2586 | 0.0 | nan | 0.0 | 0.3330 | 0.0 | 0.0 | 0.8063 | 0.7389 | 0.8722 | 0.0 | 0.0842 | 0.2788 | 0.0 |
| 0.4033 | 59.0 | 6313 | 0.6393 | 0.2901 | 0.3629 | 0.8131 | nan | 0.6851 | 0.9282 | 0.8829 | 0.8307 | 0.1882 | 0.0 | 0.4846 | 0.5244 | 0.0 | 0.9433 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7355 | nan | 0.0 | 0.8673 | 0.0 | 0.6451 | 0.2991 | 0.0 | nan | 0.0 | 0.5054 | 0.0 | 0.0 | 0.9144 | 0.8542 | 0.9130 | 0.0 | 0.0306 | 0.3796 | 0.0 | nan | 0.5736 | 0.8304 | 0.8001 | 0.5264 | 0.1720 | 0.0 | 0.2962 | 0.3684 | 0.0 | 0.7884 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4725 | nan | 0.0 | 0.6993 | 0.0 | 0.3926 | 0.2552 | 0.0 | nan | 0.0 | 0.3409 | 0.0 | 0.0 | 0.8158 | 0.7611 | 0.8778 | 0.0 | 0.0283 | 0.2835 | 0.0 |
| 0.4021 | 60.0 | 6420 | 0.6501 | 0.2886 | 0.3651 | 0.8139 | nan | 0.7362 | 0.9216 | 0.9046 | 0.8150 | 0.1901 | 0.0 | 0.4200 | 0.4985 | 0.0 | 0.9507 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7714 | nan | 0.0 | 0.8632 | 0.0 | 0.6387 | 0.3715 | 0.0 | nan | 0.0 | 0.4586 | 0.0 | 0.0 | 0.9045 | 0.8397 | 0.9113 | 0.0 | 0.1205 | 0.3673 | 0.0 | nan | 0.6146 | 0.8265 | 0.7334 | 0.5541 | 0.1753 | 0.0 | 0.2840 | 0.3505 | 0.0 | 0.7546 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4427 | nan | 0.0 | 0.6866 | 0.0 | 0.3878 | 0.2644 | 0.0 | nan | 0.0 | 0.3435 | 0.0 | 0.0 | 0.8178 | 0.7580 | 0.8759 | 0.0 | 0.0918 | 0.2734 | 0.0 |
| 0.4143 | 61.0 | 6527 | 0.6427 | 0.2897 | 0.3612 | 0.8105 | nan | 0.6811 | 0.9188 | 0.8982 | 0.7937 | 0.2651 | 0.0 | 0.5039 | 0.4599 | 0.0 | 0.9477 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7194 | nan | 0.0 | 0.8837 | 0.0 | 0.5937 | 0.3117 | 0.0 | nan | 0.0 | 0.4858 | 0.0 | 0.0 | 0.9079 | 0.8499 | 0.9188 | 0.0 | 0.0464 | 0.3740 | 0.0 | nan | 0.5727 | 0.8170 | 0.7807 | 0.5701 | 0.2198 | 0.0 | 0.2939 | 0.3411 | 0.0 | 0.7690 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4610 | nan | 0.0 | 0.6847 | 0.0 | 0.3873 | 0.2523 | 0.0 | nan | 0.0 | 0.3447 | 0.0 | 0.0 | 0.8200 | 0.7614 | 0.8782 | 0.0 | 0.0412 | 0.2743 | 0.0 |
| 0.3857 | 62.0 | 6634 | 0.6568 | 0.2875 | 0.3664 | 0.8074 | nan | 0.6878 | 0.9189 | 0.8964 | 0.8039 | 0.1812 | 0.0 | 0.5164 | 0.5660 | 0.0 | 0.9535 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7389 | nan | 0.0 | 0.8524 | 0.0 | 0.6142 | 0.3820 | 0.0 | nan | 0.0 | 0.4951 | 0.0 | 0.0 | 0.8928 | 0.8760 | 0.9272 | 0.0 | 0.0857 | 0.3362 | 0.0 | nan | 0.5667 | 0.8261 | 0.7933 | 0.5405 | 0.1623 | 0.0 | 0.3019 | 0.3736 | 0.0 | 0.7409 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4574 | nan | 0.0 | 0.6916 | 0.0 | 0.3764 | 0.2648 | 0.0 | nan | 0.0 | 0.3324 | 0.0 | 0.0 | 0.8177 | 0.7557 | 0.8809 | 0.0 | 0.0753 | 0.2436 | 0.0 |
| 0.4062 | 63.0 | 6741 | 0.6513 | 0.2914 | 0.3663 | 0.8120 | nan | 0.7112 | 0.9218 | 0.8867 | 0.7747 | 0.2310 | 0.0 | 0.5184 | 0.5408 | 0.0 | 0.9502 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7454 | nan | 0.0 | 0.8541 | 0.0 | 0.5815 | 0.3421 | 0.0 | nan | 0.0 | 0.5055 | 0.0 | 0.0 | 0.9086 | 0.8560 | 0.9291 | 0.0 | 0.0971 | 0.3675 | 0.0 | nan | 0.5784 | 0.8288 | 0.8002 | 0.5326 | 0.2018 | 0.0 | 0.3257 | 0.3750 | 0.0 | 0.7532 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4492 | nan | 0.0 | 0.6895 | 0.0 | 0.3791 | 0.2637 | 0.0 | nan | 0.0 | 0.3276 | 0.0 | 0.0 | 0.8196 | 0.7602 | 0.8842 | 0.0 | 0.0878 | 0.2676 | 0.0 |
| 0.3899 | 64.0 | 6848 | 0.6511 | 0.2897 | 0.3660 | 0.8078 | nan | 0.6784 | 0.9222 | 0.8927 | 0.7620 | 0.2273 | 0.0 | 0.5211 | 0.5469 | 0.0 | 0.9366 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7375 | nan | 0.0 | 0.8515 | 0.0 | 0.6301 | 0.3594 | 0.0 | nan | 0.0 | 0.5137 | 0.0 | 0.0 | 0.9027 | 0.8641 | 0.9136 | 0.0 | 0.0311 | 0.4211 | 0.0 | nan | 0.5682 | 0.8239 | 0.8068 | 0.5166 | 0.2014 | 0.0 | 0.3059 | 0.3793 | 0.0 | 0.7849 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4731 | nan | 0.0 | 0.6751 | 0.0 | 0.3873 | 0.2718 | 0.0 | nan | 0.0 | 0.3411 | 0.0 | 0.0 | 0.8171 | 0.7490 | 0.8788 | 0.0 | 0.0271 | 0.2641 | 0.0 |
| 0.4094 | 65.0 | 6955 | 0.6321 | 0.2906 | 0.3633 | 0.8155 | nan | 0.7419 | 0.9262 | 0.8953 | 0.7420 | 0.2358 | 0.0 | 0.4796 | 0.5340 | 0.0 | 0.9593 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7218 | nan | 0.0 | 0.8464 | 0.0 | 0.5849 | 0.3341 | 0.0 | nan | 0.0 | 0.4942 | 0.0 | 0.0 | 0.9074 | 0.8709 | 0.9111 | 0.0009 | 0.0280 | 0.4123 | 0.0 | nan | 0.6028 | 0.8365 | 0.8011 | 0.5280 | 0.2101 | 0.0 | 0.3052 | 0.3724 | 0.0 | 0.7332 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4604 | nan | 0.0 | 0.6845 | 0.0 | 0.3982 | 0.2645 | 0.0 | nan | 0.0 | 0.3412 | 0.0 | 0.0 | 0.8201 | 0.7577 | 0.8759 | 0.0009 | 0.0255 | 0.2797 | 0.0 |
| 0.3902 | 66.0 | 7062 | 0.6383 | 0.2892 | 0.3622 | 0.8112 | nan | 0.6557 | 0.9316 | 0.8911 | 0.7814 | 0.2329 | 0.0 | 0.5098 | 0.4581 | 0.0 | 0.9394 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7239 | nan | 0.0 | 0.8559 | 0.0 | 0.6460 | 0.3358 | 0.0 | nan | 0.0 | 0.5161 | 0.0 | 0.0 | 0.9274 | 0.8429 | 0.8990 | 0.0 | 0.0312 | 0.4118 | 0.0 | nan | 0.5606 | 0.8294 | 0.8023 | 0.5414 | 0.2068 | 0.0 | 0.3016 | 0.3450 | 0.0 | 0.7787 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4684 | nan | 0.0 | 0.6942 | 0.0 | 0.3908 | 0.2621 | 0.0 | nan | 0.0 | 0.3398 | 0.0 | 0.0 | 0.8126 | 0.7445 | 0.8709 | 0.0 | 0.0272 | 0.2774 | 0.0 |
| 0.3735 | 67.0 | 7169 | 0.6484 | 0.2885 | 0.3627 | 0.8076 | nan | 0.6374 | 0.9351 | 0.9035 | 0.7568 | 0.2251 | 0.0 | 0.4998 | 0.4948 | 0.0 | 0.9478 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7209 | nan | 0.0 | 0.8596 | 0.0 | 0.5804 | 0.3791 | 0.0 | nan | 0.0 | 0.4997 | 0.0 | 0.0 | 0.8999 | 0.8741 | 0.9245 | 0.0 | 0.0483 | 0.4185 | 0.0 | nan | 0.5389 | 0.8231 | 0.7871 | 0.5304 | 0.1996 | 0.0 | 0.2827 | 0.3614 | 0.0 | 0.7835 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4719 | nan | 0.0 | 0.6932 | 0.0 | 0.3775 | 0.2770 | 0.0 | nan | 0.0 | 0.3393 | 0.0 | 0.0 | 0.8216 | 0.7540 | 0.8823 | 0.0 | 0.0421 | 0.2668 | 0.0 |
| 0.3888 | 68.0 | 7276 | 0.6295 | 0.2932 | 0.3681 | 0.8124 | nan | 0.6453 | 0.9414 | 0.8924 | 0.7985 | 0.2832 | 0.0 | 0.5193 | 0.6389 | 0.0 | 0.9459 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7338 | nan | 0.0 | 0.8423 | 0.0 | 0.5126 | 0.3179 | 0.0 | nan | 0.0 | 0.5176 | 0.0 | 0.0 | 0.9164 | 0.8300 | 0.9247 | 0.0010 | 0.0627 | 0.4567 | 0.0 | nan | 0.5521 | 0.8326 | 0.7984 | 0.5384 | 0.2291 | 0.0 | 0.3097 | 0.4143 | 0.0 | 0.7877 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4724 | nan | 0.0 | 0.7028 | 0.0 | 0.3784 | 0.2540 | 0.0 | nan | 0.0 | 0.3337 | 0.0 | 0.0 | 0.8172 | 0.7398 | 0.8859 | 0.0010 | 0.0533 | 0.2830 | 0.0 |
| 0.3463 | 69.0 | 7383 | 0.6746 | 0.2916 | 0.3677 | 0.8094 | nan | 0.6515 | 0.9210 | 0.8823 | 0.8440 | 0.1789 | 0.0 | 0.5215 | 0.5737 | 0.0 | 0.9359 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7389 | nan | 0.0 | 0.8837 | 0.0 | 0.6300 | 0.3350 | 0.0 | nan | 0.0 | 0.4968 | 0.0 | 0.0 | 0.9032 | 0.8934 | 0.9017 | 0.0 | 0.0703 | 0.4058 | 0.0 | nan | 0.5528 | 0.8245 | 0.7907 | 0.5250 | 0.1632 | 0.0 | 0.3014 | 0.3934 | 0.0 | 0.8010 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4788 | nan | 0.0 | 0.6967 | 0.0 | 0.3744 | 0.2605 | 0.0 | nan | 0.0 | 0.3469 | 0.0 | 0.0 | 0.8186 | 0.7642 | 0.8737 | 0.0 | 0.0613 | 0.3051 | 0.0 |
| 0.3702 | 70.0 | 7490 | 0.6890 | 0.2875 | 0.3635 | 0.8012 | nan | 0.5995 | 0.9326 | 0.8853 | 0.8029 | 0.2289 | 0.0 | 0.5002 | 0.5737 | 0.0 | 0.9451 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7417 | nan | 0.0 | 0.8227 | 0.0 | 0.6097 | 0.3263 | 0.0 | nan | 0.0 | 0.5053 | 0.0 | 0.0 | 0.9192 | 0.8235 | 0.9210 | 0.0 | 0.0666 | 0.4292 | 0.0 | nan | 0.5210 | 0.8170 | 0.8010 | 0.5198 | 0.1907 | 0.0 | 0.3010 | 0.3898 | 0.0 | 0.7651 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4836 | nan | 0.0 | 0.6753 | 0.0 | 0.3649 | 0.2576 | 0.0 | nan | 0.0 | 0.3513 | 0.0 | 0.0 | 0.8151 | 0.7466 | 0.8840 | 0.0 | 0.0563 | 0.2598 | 0.0 |
| 0.3642 | 71.0 | 7597 | 0.6835 | 0.2867 | 0.3593 | 0.8038 | nan | 0.6182 | 0.9263 | 0.8897 | 0.8120 | 0.1957 | 0.0 | 0.4355 | 0.5927 | 0.0 | 0.9233 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7200 | nan | 0.0 | 0.8870 | 0.0 | 0.6023 | 0.3097 | 0.0 | nan | 0.0 | 0.4994 | 0.0 | 0.0 | 0.9270 | 0.8288 | 0.9199 | 0.0 | 0.0564 | 0.3520 | 0.0 | nan | 0.5306 | 0.8156 | 0.7929 | 0.4950 | 0.1747 | 0.0 | 0.2794 | 0.3891 | 0.0 | 0.8032 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4771 | nan | 0.0 | 0.6905 | 0.0 | 0.3674 | 0.2453 | 0.0 | nan | 0.0 | 0.3447 | 0.0 | 0.0 | 0.8116 | 0.7450 | 0.8826 | 0.0 | 0.0496 | 0.2805 | 0.0 |
| 0.36 | 72.0 | 7704 | 0.6669 | 0.2901 | 0.3652 | 0.8075 | nan | 0.6434 | 0.9327 | 0.8960 | 0.7900 | 0.2190 | 0.0 | 0.4746 | 0.5706 | 0.0 | 0.9461 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7405 | nan | 0.0 | 0.8967 | 0.0 | 0.5709 | 0.3347 | 0.0 | nan | 0.0 | 0.5213 | 0.0 | 0.0 | 0.8767 | 0.8656 | 0.9185 | 0.0 | 0.0645 | 0.4230 | 0.0 | nan | 0.5397 | 0.8231 | 0.7948 | 0.5252 | 0.1971 | 0.0 | 0.2832 | 0.3853 | 0.0 | 0.7856 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4817 | nan | 0.0 | 0.6834 | 0.0 | 0.3839 | 0.2617 | 0.0 | nan | 0.0 | 0.3396 | 0.0 | 0.0 | 0.8178 | 0.7627 | 0.8720 | 0.0 | 0.0530 | 0.2933 | 0.0 |
| 0.3973 | 73.0 | 7811 | 0.6383 | 0.2949 | 0.3680 | 0.8186 | nan | 0.7241 | 0.9280 | 0.9008 | 0.7697 | 0.2577 | 0.0 | 0.5086 | 0.5711 | 0.0 | 0.9495 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7286 | nan | 0.0 | 0.8676 | 0.0 | 0.6173 | 0.3238 | 0.0 | nan | 0.0 | 0.5022 | 0.0 | 0.0 | 0.9099 | 0.8670 | 0.9130 | 0.0 | 0.0432 | 0.3933 | 0.0 | nan | 0.5943 | 0.8414 | 0.7925 | 0.5329 | 0.2288 | 0.0 | 0.3133 | 0.3883 | 0.0 | 0.7799 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4800 | nan | 0.0 | 0.6892 | 0.0 | 0.4039 | 0.2600 | 0.0 | nan | 0.0 | 0.3515 | 0.0 | 0.0 | 0.8218 | 0.7658 | 0.8779 | 0.0 | 0.0378 | 0.2778 | 0.0 |
| 0.3552 | 74.0 | 7918 | 0.6462 | 0.2937 | 0.3665 | 0.8151 | nan | 0.6810 | 0.9352 | 0.9009 | 0.7938 | 0.2200 | 0.0 | 0.4290 | 0.5985 | 0.0 | 0.9448 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7497 | nan | 0.0 | 0.8762 | 0.0 | 0.6223 | 0.3297 | 0.0 | nan | 0.0 | 0.5028 | 0.0 | 0.0 | 0.9107 | 0.8538 | 0.9194 | 0.0 | 0.0489 | 0.4105 | 0.0 | nan | 0.5681 | 0.8314 | 0.8066 | 0.5452 | 0.1979 | 0.0 | 0.2832 | 0.4003 | 0.0 | 0.7864 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4794 | nan | 0.0 | 0.6941 | 0.0 | 0.4007 | 0.2634 | 0.0 | nan | 0.0 | 0.3505 | 0.0 | 0.0 | 0.8197 | 0.7579 | 0.8799 | 0.0 | 0.0428 | 0.2906 | 0.0 |
| 0.3735 | 75.0 | 8025 | 0.6607 | 0.2912 | 0.3658 | 0.8094 | nan | 0.6830 | 0.9221 | 0.8990 | 0.7703 | 0.2393 | 0.0 | 0.4768 | 0.5555 | 0.0 | 0.9397 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7627 | nan | 0.0 | 0.8774 | 0.0 | 0.5842 | 0.3146 | 0.0 | nan | 0.0 | 0.5209 | 0.0 | 0.0 | 0.9052 | 0.8376 | 0.9323 | 0.0006 | 0.0601 | 0.4251 | 0.0 | nan | 0.5616 | 0.8266 | 0.8043 | 0.4916 | 0.2068 | 0.0 | 0.2969 | 0.3852 | 0.0 | 0.7947 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4696 | nan | 0.0 | 0.6919 | 0.0 | 0.3934 | 0.2599 | 0.0 | nan | 0.0 | 0.3454 | 0.0 | 0.0 | 0.8176 | 0.7506 | 0.8838 | 0.0006 | 0.0529 | 0.2857 | 0.0 |
| 0.349 | 76.0 | 8132 | 0.6499 | 0.2920 | 0.3634 | 0.8132 | nan | 0.6815 | 0.9338 | 0.8990 | 0.7476 | 0.2275 | 0.0 | 0.4769 | 0.5225 | 0.0 | 0.9473 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7426 | nan | 0.0 | 0.8829 | 0.0 | 0.6085 | 0.3132 | 0.0 | nan | 0.0 | 0.5296 | 0.0 | 0.0 | 0.9144 | 0.8342 | 0.9098 | 0.0 | 0.0538 | 0.4042 | 0.0 | nan | 0.5611 | 0.8351 | 0.8007 | 0.5302 | 0.1879 | 0.0 | 0.2919 | 0.3759 | 0.0 | 0.7918 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4747 | nan | 0.0 | 0.6961 | 0.0 | 0.4043 | 0.2598 | 0.0 | nan | 0.0 | 0.3443 | 0.0 | 0.0 | 0.8162 | 0.7462 | 0.8769 | 0.0 | 0.0491 | 0.3031 | 0.0 |
| 0.3714 | 77.0 | 8239 | 0.6534 | 0.2926 | 0.3678 | 0.8124 | nan | 0.6790 | 0.9351 | 0.8952 | 0.7512 | 0.2106 | 0.0 | 0.5023 | 0.5752 | 0.0 | 0.9328 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7807 | nan | 0.0 | 0.8562 | 0.0 | 0.6458 | 0.3162 | 0.0 | nan | 0.0 | 0.5232 | 0.0 | 0.0 | 0.9210 | 0.8265 | 0.9273 | 0.0 | 0.0808 | 0.4113 | 0.0 | nan | 0.5593 | 0.8347 | 0.8043 | 0.5370 | 0.1833 | 0.0 | 0.2953 | 0.3971 | 0.0 | 0.7974 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4632 | nan | 0.0 | 0.6987 | 0.0 | 0.3865 | 0.2565 | 0.0 | nan | 0.0 | 0.3415 | 0.0 | 0.0 | 0.8136 | 0.7420 | 0.8860 | 0.0 | 0.0712 | 0.2942 | 0.0 |
| 0.363 | 78.0 | 8346 | 0.6516 | 0.2910 | 0.3632 | 0.8136 | nan | 0.6971 | 0.9296 | 0.8965 | 0.7702 | 0.2131 | 0.0 | 0.4759 | 0.5148 | 0.0 | 0.9332 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7724 | nan | 0.0 | 0.8932 | 0.0 | 0.5626 | 0.3029 | 0.0 | nan | 0.0 | 0.5263 | 0.0 | 0.0 | 0.9160 | 0.8210 | 0.9231 | 0.0 | 0.0554 | 0.4197 | 0.0 | nan | 0.5716 | 0.8385 | 0.7896 | 0.5483 | 0.1777 | 0.0 | 0.2883 | 0.3691 | 0.0 | 0.7908 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4736 | nan | 0.0 | 0.6864 | 0.0 | 0.3961 | 0.2512 | 0.0 | nan | 0.0 | 0.3478 | 0.0 | 0.0 | 0.8160 | 0.7383 | 0.8834 | 0.0 | 0.0501 | 0.2945 | 0.0 |
| 0.3493 | 79.0 | 8453 | 0.6702 | 0.2912 | 0.3685 | 0.8100 | nan | 0.6696 | 0.9258 | 0.9017 | 0.7644 | 0.2376 | 0.0 | 0.4962 | 0.5597 | 0.0 | 0.9498 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7711 | nan | 0.0 | 0.8724 | 0.0 | 0.5995 | 0.3210 | 0.0 | nan | 0.0 | 0.5325 | 0.0 | 0.0 | 0.9025 | 0.8466 | 0.9381 | 0.0 | 0.0799 | 0.4247 | 0.0 | nan | 0.5541 | 0.8345 | 0.7881 | 0.5164 | 0.1987 | 0.0 | 0.2920 | 0.3835 | 0.0 | 0.7768 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4737 | nan | 0.0 | 0.6941 | 0.0 | 0.3974 | 0.2577 | 0.0 | nan | 0.0 | 0.3448 | 0.0 | 0.0 | 0.8187 | 0.7431 | 0.8877 | 0.0 | 0.0699 | 0.2870 | 0.0 |
| 0.3792 | 80.0 | 8560 | 0.6412 | 0.2946 | 0.3691 | 0.8157 | nan | 0.6826 | 0.9328 | 0.9031 | 0.7805 | 0.2240 | 0.0 | 0.5004 | 0.5717 | 0.0 | 0.9422 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7532 | nan | 0.0 | 0.8790 | 0.0 | 0.6263 | 0.3250 | 0.0 | nan | 0.0 | 0.5130 | 0.0 | 0.0 | 0.9049 | 0.8708 | 0.9215 | 0.0 | 0.0666 | 0.4137 | 0.0 | nan | 0.5668 | 0.8404 | 0.7926 | 0.5316 | 0.1912 | 0.0 | 0.3036 | 0.3948 | 0.0 | 0.7940 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4767 | nan | 0.0 | 0.6963 | 0.0 | 0.3952 | 0.2617 | 0.0 | nan | 0.0 | 0.3547 | 0.0 | 0.0 | 0.8229 | 0.7615 | 0.8830 | 0.0 | 0.0593 | 0.2996 | 0.0 |
| 0.3466 | 81.0 | 8667 | 0.6398 | 0.2949 | 0.3696 | 0.8181 | nan | 0.7198 | 0.9374 | 0.8927 | 0.7518 | 0.1953 | 0.0 | 0.5069 | 0.6073 | 0.0 | 0.9437 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7508 | nan | 0.0 | 0.8438 | 0.0 | 0.6477 | 0.3045 | 0.0 | nan | 0.0 | 0.5206 | 0.0 | 0.0 | 0.9149 | 0.8694 | 0.9313 | 0.0 | 0.0794 | 0.4091 | 0.0 | nan | 0.5959 | 0.8409 | 0.8043 | 0.5625 | 0.1746 | 0.0 | 0.2955 | 0.4016 | 0.0 | 0.7887 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4771 | nan | 0.0 | 0.6876 | 0.0 | 0.3937 | 0.2508 | 0.0 | nan | 0.0 | 0.3438 | 0.0 | 0.0 | 0.8203 | 0.7721 | 0.8882 | 0.0 | 0.0703 | 0.2696 | 0.0 |
| 0.3434 | 82.0 | 8774 | 0.6427 | 0.2948 | 0.3702 | 0.8144 | nan | 0.6701 | 0.9388 | 0.8942 | 0.7976 | 0.2036 | 0.0 | 0.4717 | 0.5793 | 0.0 | 0.9421 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7673 | nan | 0.0 | 0.8614 | 0.0 | 0.6617 | 0.3411 | 0.0 | nan | 0.0 | 0.5250 | 0.0 | 0.0 | 0.9065 | 0.8583 | 0.9214 | 0.0 | 0.1155 | 0.3911 | 0.0 | nan | 0.5615 | 0.8356 | 0.8036 | 0.5543 | 0.1765 | 0.0 | 0.2927 | 0.3998 | 0.0 | 0.7927 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4756 | nan | 0.0 | 0.6983 | 0.0 | 0.3912 | 0.2617 | 0.0 | nan | 0.0 | 0.3422 | 0.0 | 0.0 | 0.8220 | 0.7609 | 0.8829 | 0.0 | 0.0994 | 0.2837 | 0.0 |
| 0.3728 | 83.0 | 8881 | 0.6632 | 0.2935 | 0.3712 | 0.8071 | nan | 0.6362 | 0.9181 | 0.8946 | 0.8165 | 0.2796 | 0.0 | 0.4980 | 0.5929 | 0.0 | 0.9434 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7612 | nan | 0.0 | 0.8576 | 0.0 | 0.6222 | 0.3247 | 0.0 | nan | 0.0 | 0.5315 | 0.0 | 0.0 | 0.9206 | 0.8297 | 0.9324 | 0.0 | 0.1246 | 0.3953 | 0.0 | nan | 0.5330 | 0.8303 | 0.8021 | 0.5115 | 0.2133 | 0.0 | 0.3082 | 0.4008 | 0.0 | 0.7792 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4707 | nan | 0.0 | 0.6944 | 0.0 | 0.3960 | 0.2571 | 0.0 | nan | 0.0 | 0.3433 | 0.0 | 0.0 | 0.8166 | 0.7505 | 0.8884 | 0.0 | 0.1076 | 0.2874 | 0.0 |
| 0.3449 | 84.0 | 8988 | 0.6665 | 0.2911 | 0.3655 | 0.8080 | nan | 0.6208 | 0.9362 | 0.8933 | 0.7983 | 0.2167 | 0.0 | 0.4705 | 0.5213 | 0.0 | 0.9445 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7528 | nan | 0.0 | 0.8565 | 0.0 | 0.6339 | 0.3453 | 0.0 | nan | 0.0 | 0.5227 | 0.0 | 0.0 | 0.9203 | 0.8327 | 0.9315 | 0.0 | 0.1078 | 0.3915 | 0.0 | nan | 0.5271 | 0.8305 | 0.8038 | 0.5352 | 0.1796 | 0.0 | 0.2901 | 0.3788 | 0.0 | 0.7816 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4767 | nan | 0.0 | 0.6966 | 0.0 | 0.3857 | 0.2623 | 0.0 | nan | 0.0 | 0.3403 | 0.0 | 0.0 | 0.8154 | 0.7512 | 0.8876 | 0.0 | 0.0934 | 0.2779 | 0.0 |
| 0.3677 | 85.0 | 9095 | 0.6600 | 0.2914 | 0.3667 | 0.8089 | nan | 0.6430 | 0.9281 | 0.8959 | 0.7877 | 0.2441 | 0.0 | 0.5011 | 0.5246 | 0.0 | 0.9417 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7416 | nan | 0.0 | 0.8635 | 0.0 | 0.6224 | 0.3337 | 0.0 | nan | 0.0 | 0.5238 | 0.0 | 0.0 | 0.9166 | 0.8404 | 0.9203 | 0.0 | 0.0966 | 0.4086 | 0.0 | nan | 0.5410 | 0.8368 | 0.8012 | 0.5221 | 0.1990 | 0.0 | 0.3032 | 0.3763 | 0.0 | 0.7839 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4819 | nan | 0.0 | 0.6880 | 0.0 | 0.3785 | 0.2603 | 0.0 | nan | 0.0 | 0.3469 | 0.0 | 0.0 | 0.8166 | 0.7502 | 0.8825 | 0.0 | 0.0826 | 0.2728 | 0.0 |
| 0.3479 | 86.0 | 9202 | 0.6653 | 0.2925 | 0.3659 | 0.8083 | nan | 0.6215 | 0.9364 | 0.8955 | 0.8062 | 0.2438 | 0.0 | 0.4356 | 0.5749 | 0.0 | 0.9352 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7572 | nan | 0.0 | 0.8647 | 0.0 | 0.5950 | 0.3194 | 0.0 | nan | 0.0 | 0.5181 | 0.0 | 0.0 | 0.9142 | 0.8559 | 0.9196 | 0.0010 | 0.1131 | 0.4024 | 0.0 | nan | 0.5305 | 0.8260 | 0.8026 | 0.5177 | 0.2000 | 0.0 | 0.2845 | 0.3964 | 0.0 | 0.8037 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4777 | nan | 0.0 | 0.6850 | 0.0 | 0.3926 | 0.2605 | 0.0 | nan | 0.0 | 0.3443 | 0.0 | 0.0 | 0.8210 | 0.7590 | 0.8827 | 0.0010 | 0.0985 | 0.2760 | 0.0 |
| 0.373 | 87.0 | 9309 | 0.6488 | 0.2953 | 0.3681 | 0.8141 | nan | 0.6465 | 0.9404 | 0.8996 | 0.7934 | 0.2418 | 0.0 | 0.4875 | 0.5646 | 0.0 | 0.9394 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7519 | nan | 0.0 | 0.8931 | 0.0 | 0.6325 | 0.3185 | 0.0 | nan | 0.0 | 0.5045 | 0.0 | 0.0 | 0.8982 | 0.8624 | 0.9196 | 0.0000 | 0.1086 | 0.3763 | 0.0 | nan | 0.5479 | 0.8347 | 0.7989 | 0.5439 | 0.2043 | 0.0 | 0.2952 | 0.3956 | 0.0 | 0.8041 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4802 | nan | 0.0 | 0.6921 | 0.0 | 0.3919 | 0.2632 | 0.0 | nan | 0.0 | 0.3462 | 0.0 | 0.0 | 0.8219 | 0.7598 | 0.8803 | 0.0000 | 0.0954 | 0.2939 | 0.0 |
| 0.3509 | 88.0 | 9416 | 0.6508 | 0.2938 | 0.3690 | 0.8125 | nan | 0.6480 | 0.9359 | 0.8987 | 0.8023 | 0.2228 | 0.0 | 0.4828 | 0.5941 | 0.0 | 0.9355 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7617 | nan | 0.0 | 0.8669 | 0.0 | 0.5964 | 0.3253 | 0.0 | nan | 0.0 | 0.5218 | 0.0 | 0.0 | 0.9249 | 0.8344 | 0.9275 | 0.0 | 0.1256 | 0.4037 | 0.0 | nan | 0.5517 | 0.8360 | 0.7990 | 0.5289 | 0.1923 | 0.0 | 0.2911 | 0.3969 | 0.0 | 0.7989 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4790 | nan | 0.0 | 0.6967 | 0.0 | 0.3872 | 0.2572 | 0.0 | nan | 0.0 | 0.3400 | 0.0 | 0.0 | 0.8153 | 0.7499 | 0.8866 | 0.0 | 0.1061 | 0.2894 | 0.0 |
| 0.3249 | 89.0 | 9523 | 0.6380 | 0.2947 | 0.3653 | 0.8162 | nan | 0.6541 | 0.9527 | 0.9012 | 0.7578 | 0.2159 | 0.0 | 0.4779 | 0.5541 | 0.0 | 0.9496 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7475 | nan | 0.0 | 0.8613 | 0.0 | 0.6083 | 0.3103 | 0.0 | nan | 0.0 | 0.5111 | 0.0 | 0.0 | 0.9215 | 0.8387 | 0.9247 | 0.0 | 0.1075 | 0.3965 | 0.0 | nan | 0.5525 | 0.8372 | 0.8023 | 0.5649 | 0.1893 | 0.0 | 0.2923 | 0.3918 | 0.0 | 0.7877 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4774 | nan | 0.0 | 0.7001 | 0.0 | 0.3917 | 0.2583 | 0.0 | nan | 0.0 | 0.3406 | 0.0 | 0.0 | 0.8165 | 0.7519 | 0.8854 | 0.0 | 0.0954 | 0.2955 | 0.0 |
| 0.3507 | 90.0 | 9630 | 0.6552 | 0.2931 | 0.3681 | 0.8112 | nan | 0.6412 | 0.9316 | 0.9007 | 0.7940 | 0.2344 | 0.0 | 0.4845 | 0.5679 | 0.0 | 0.9438 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7501 | nan | 0.0 | 0.8788 | 0.0 | 0.6209 | 0.3117 | 0.0 | nan | 0.0 | 0.5239 | 0.0 | 0.0 | 0.9155 | 0.8504 | 0.9231 | 0.0 | 0.1052 | 0.4019 | 0.0 | nan | 0.5432 | 0.8346 | 0.7967 | 0.5219 | 0.1977 | 0.0 | 0.2933 | 0.3922 | 0.0 | 0.7936 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4792 | nan | 0.0 | 0.6958 | 0.0 | 0.3913 | 0.2588 | 0.0 | nan | 0.0 | 0.3429 | 0.0 | 0.0 | 0.8188 | 0.7511 | 0.8841 | 0.0 | 0.0910 | 0.2920 | 0.0 |
| 0.3327 | 91.0 | 9737 | 0.6568 | 0.2929 | 0.3687 | 0.8102 | nan | 0.6277 | 0.9380 | 0.8989 | 0.8059 | 0.2578 | 0.0 | 0.4617 | 0.5809 | 0.0 | 0.9460 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7536 | nan | 0.0 | 0.8356 | 0.0 | 0.6285 | 0.3180 | 0.0 | nan | 0.0 | 0.5218 | 0.0 | 0.0 | 0.9181 | 0.8578 | 0.9230 | 0.0004 | 0.0976 | 0.4261 | 0.0 | nan | 0.5366 | 0.8321 | 0.7979 | 0.5259 | 0.2114 | 0.0 | 0.2900 | 0.3969 | 0.0 | 0.7969 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4798 | nan | 0.0 | 0.6966 | 0.0 | 0.3832 | 0.2618 | 0.0 | nan | 0.0 | 0.3398 | 0.0 | 0.0 | 0.8184 | 0.7523 | 0.8857 | 0.0004 | 0.0849 | 0.2836 | 0.0 |
| 0.3428 | 92.0 | 9844 | 0.6481 | 0.2933 | 0.3672 | 0.8120 | nan | 0.6540 | 0.9343 | 0.9003 | 0.7727 | 0.2264 | 0.0 | 0.4777 | 0.5473 | 0.0 | 0.9437 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7544 | nan | 0.0 | 0.8720 | 0.0 | 0.6385 | 0.3236 | 0.0 | nan | 0.0 | 0.5132 | 0.0 | 0.0 | 0.9136 | 0.8557 | 0.9224 | 0.0 | 0.1007 | 0.4012 | 0.0 | nan | 0.5486 | 0.8334 | 0.7997 | 0.5315 | 0.1937 | 0.0 | 0.2905 | 0.3891 | 0.0 | 0.7948 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4778 | nan | 0.0 | 0.6974 | 0.0 | 0.3843 | 0.2628 | 0.0 | nan | 0.0 | 0.3480 | 0.0 | 0.0 | 0.8193 | 0.7522 | 0.8844 | 0.0 | 0.0885 | 0.2890 | 0.0 |
| 0.3483 | 93.0 | 9951 | 0.6642 | 0.2923 | 0.3664 | 0.8104 | nan | 0.6314 | 0.9384 | 0.9008 | 0.7929 | 0.2027 | 0.0 | 0.4565 | 0.5687 | 0.0 | 0.9355 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7620 | nan | 0.0 | 0.8702 | 0.0 | 0.6443 | 0.3233 | 0.0 | nan | 0.0 | 0.5056 | 0.0 | 0.0 | 0.9195 | 0.8529 | 0.9224 | 0.0 | 0.1132 | 0.3833 | 0.0 | nan | 0.5395 | 0.8298 | 0.7942 | 0.5268 | 0.1771 | 0.0 | 0.2783 | 0.3974 | 0.0 | 0.8030 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4790 | nan | 0.0 | 0.7001 | 0.0 | 0.3838 | 0.2612 | 0.0 | nan | 0.0 | 0.3438 | 0.0 | 0.0 | 0.8168 | 0.7498 | 0.8846 | 0.0 | 0.0996 | 0.2879 | 0.0 |
| 0.346 | 93.46 | 10000 | 0.6468 | 0.2931 | 0.3665 | 0.8121 | nan | 0.6505 | 0.9345 | 0.9011 | 0.7895 | 0.2382 | 0.0 | 0.4519 | 0.5536 | 0.0 | 0.9509 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7507 | nan | 0.0 | 0.8681 | 0.0 | 0.6107 | 0.3192 | 0.0 | nan | 0.0 | 0.5156 | 0.0 | 0.0 | 0.9183 | 0.8478 | 0.9246 | 0.0 | 0.1083 | 0.3940 | 0.0 | nan | 0.5472 | 0.8329 | 0.7961 | 0.5266 | 0.2013 | 0.0 | 0.2863 | 0.3887 | 0.0 | 0.7872 | 0.0 | 0.0 | 0.0 | 0.0 | 0.4759 | nan | 0.0 | 0.6992 | 0.0 | 0.3924 | 0.2614 | 0.0 | nan | 0.0 | 0.3413 | 0.0 | 0.0 | 0.8182 | 0.7517 | 0.8855 | 0.0 | 0.0963 | 0.2896 | 0.0 |
### Framework versions
- Transformers 4.29.0.dev0
- Pytorch 2.0.0+cu117
- Datasets 2.11.0
- Tokenizers 0.13.3
|
BigSalmon/InfillFormalLincoln
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | null |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- emotion
metrics:
- accuracy
- f1
model-index:
- name: distilbert-base-uncased-finetuned-emotion
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: emotion
type: emotion
config: split
split: validation
args: split
metrics:
- name: Accuracy
type: accuracy
value: 0.919
- name: F1
type: f1
value: 0.9190477193383318
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-emotion
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2268
- Accuracy: 0.919
- F1: 0.9190
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| 0.8412 | 1.0 | 250 | 0.3320 | 0.9005 | 0.8966 |
| 0.26 | 2.0 | 500 | 0.2268 | 0.919 | 0.9190 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/InformalToFormalLincoln14
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 5 | null |
---
license: creativeml-openrail-m
tags:
- stablediffusionapi.com
- stable-diffusion-api
- text-to-image
- ultra-realistic
pinned: true
---
# DisillusionMix API Inference

## Get API Key
Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
Replace Key in below code, change **model_id** to "disillusionmix"
Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
Model link: [View model](https://stablediffusionapi.com/models/disillusionmix)
Credits: [View credits](https://civitai.com/?query=DisillusionMix)
View all models: [View Models](https://stablediffusionapi.com/models)
import requests
import json
url = "https://stablediffusionapi.com/api/v3/dreambooth"
payload = json.dumps({
"key": "",
"model_id": "disillusionmix",
"prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera",
"negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
"width": "512",
"height": "512",
"samples": "1",
"num_inference_steps": "30",
"safety_checker": "no",
"enhance_prompt": "yes",
"seed": None,
"guidance_scale": 7.5,
"multi_lingual": "no",
"panorama": "no",
"self_attention": "no",
"upscale": "no",
"embeddings": "embeddings_model_id",
"lora": "lora_model_id",
"webhook": None,
"track_id": None
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
> Use this coupon code to get 25% off **DMGG0RBN**
|
BigSalmon/InformalToFormalLincoln18
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | 2023-05-04T14:21:28Z |
---
license: cc-by-4.0
tags:
- generated_from_trainer
metrics:
- f1
- recall
- accuracy
- precision
model-index:
- name: bertin-roberta-fine-tuned-text-classification-SL-data-augmentation-dss
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bertin-roberta-fine-tuned-text-classification-SL-data-augmentation-dss
This model is a fine-tuned version of [bertin-project/bertin-roberta-base-spanish](https://huggingface.co/bertin-project/bertin-roberta-base-spanish) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3050
- F1: 0.4713
- Recall: 0.4797
- Accuracy: 0.4797
- Precision: 0.4820
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 | Recall | Accuracy | Precision |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:--------:|:---------:|
| No log | 1.0 | 359 | 3.4261 | 0.2636 | 0.3268 | 0.3268 | 0.2780 |
| 3.7358 | 2.0 | 718 | 2.7048 | 0.3631 | 0.4179 | 0.4179 | 0.3773 |
| 2.4772 | 3.0 | 1077 | 2.4578 | 0.4072 | 0.4407 | 0.4407 | 0.4095 |
| 2.4772 | 4.0 | 1436 | 2.3357 | 0.4403 | 0.4545 | 0.4545 | 0.4815 |
| 1.6075 | 5.0 | 1795 | 2.3050 | 0.4713 | 0.4797 | 0.4797 | 0.4820 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/InformalToFormalLincoln19
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 11 | 2023-05-04T14:22:10Z |
---
tags:
- Pixelcopter-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-Pixelcopter-v2
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pixelcopter-PLE-v0
type: Pixelcopter-PLE-v0
metrics:
- type: mean_reward
value: 34.70 +/- 27.66
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **Pixelcopter-PLE-v0**
This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
BigSalmon/InformalToFormalLincoln20
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | 2023-05-04T14:22:20Z |
---
language:
- mn
tags:
- generated_from_trainer
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: mongolian-ner-test-xlm-roberta-large-ner-hrl
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mongolian-ner-test-xlm-roberta-large-ner-hrl
This model is a fine-tuned version of [bayartsogt/albert-mongolian](https://huggingface.co/bayartsogt/albert-mongolian) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5337
- Precision: 0.3060
- Recall: 0.1406
- F1: 0.1927
- Accuracy: 0.8591
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.6123 | 1.0 | 477 | 0.5570 | 0.2422 | 0.0999 | 0.1414 | 0.8536 |
| 0.5411 | 2.0 | 954 | 0.5407 | 0.2914 | 0.1294 | 0.1792 | 0.8572 |
| 0.5288 | 3.0 | 1431 | 0.5394 | 0.2944 | 0.1309 | 0.1812 | 0.8576 |
| 0.5212 | 4.0 | 1908 | 0.5346 | 0.3015 | 0.1324 | 0.1840 | 0.8581 |
| 0.5156 | 5.0 | 2385 | 0.5298 | 0.3131 | 0.1394 | 0.1929 | 0.8595 |
| 0.5103 | 6.0 | 2862 | 0.5301 | 0.3086 | 0.1419 | 0.1944 | 0.8595 |
| 0.5041 | 7.0 | 3339 | 0.5318 | 0.3083 | 0.1411 | 0.1936 | 0.8592 |
| 0.4981 | 8.0 | 3816 | 0.5308 | 0.3117 | 0.1421 | 0.1952 | 0.8595 |
| 0.4931 | 9.0 | 4293 | 0.5329 | 0.3062 | 0.1400 | 0.1922 | 0.8592 |
| 0.4885 | 10.0 | 4770 | 0.5337 | 0.3060 | 0.1406 | 0.1927 | 0.8591 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/InformalToFormalLincoln25
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
---
license: apache-2.0
tags:
- setfit
- sentence-transformers
- text-classification
pipeline_tag: text-classification
---
# rodekruis/sml-ukr-word-classifier-small
This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
2. Training a classification head with features from the fine-tuned Sentence Transformer.
## Usage
To use this model for inference, first install the SetFit library:
```bash
python -m pip install setfit
```
You can then run inference as follows:
```python
from setfit import SetFitModel
# Download from Hub and run inference
model = SetFitModel.from_pretrained("rodekruis/sml-ukr-word-classifier-small")
# Run inference
preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
```
## BibTeX entry and citation info
```bibtex
@article{https://doi.org/10.48550/arxiv.2209.11055,
doi = {10.48550/ARXIV.2209.11055},
url = {https://arxiv.org/abs/2209.11055},
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Efficient Few-Shot Learning Without Prompts},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
```
|
BigSalmon/Lincoln4
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 11 | null |
---
base_model: runwayml/stable-diffusion-v1-5
instance_prompt: A fantasy landscape in <shanshui-style>
tags:
- stable-diffusion
- stable-diffusion-ppdiffusers
- text-to-image
- ppdiffusers
inference: false
license: mit
---
# megemini/shanshui_style - 当中国水墨山水画遇上AIGC - Chinese ink-wash landscape painting meets AIGC
本仓库的模型采用 ``Textual inversion`` 技术并使用 ``style`` 进行训练。
预训练模型为 ``runwayml/stable-diffusion-v1-5`` ,训练的图片为中国水墨山水画。
利用此模型,可以生成中国水墨山水画风格的图片。
👉 [megemini/shanshui](https://huggingface.co/spaces/megemini/shanshui) 这个应用便是利用了上述模型。
| image | model | prompt |
|-|-|-|
|  | megemini/shanshui_style | A fantasy landscape in \<shanshui-style\> |
|  | megemini/shanshui_style | A fantasy landscape in \<shanshui-style\> |
|  | megemini/shanshui_style | A fantasy landscape in \<shanshui-style\> |
P.S. 👉 [megemini/shanshui_gen_style](https://huggingface.co/megemini/shanshui_gen_style) 模型可以用文本代替水墨画生成更生动的图片。
|
BigSalmon/MrLincoln10
|
[
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 5 | null |
---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Pixel_Copter
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
metrics:
- type: mean_reward
value: 500.00 +/- 0.00
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
BigSalmon/MrLincoln12
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"has_space"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | null |
---
license: creativeml-openrail-m
base_model: runwayml/stable-diffusion-v1-5
instance_prompt: a photo of meowth
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
# LoRA DreamBooth - Raminj/finetune
These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were trained on a photo of meowth using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following.




LoRA for the text encoder was enabled: False.
|
BigSalmon/MrLincoln125MNeo
|
[
"pytorch",
"tensorboard",
"gpt_neo",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 12 | null |
---
license: apache-2.0
tags:
- setfit
- sentence-transformers
- text-classification
pipeline_tag: text-classification
---
# konverner/due_retail_25
This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.
2. Training a classification head with features from the fine-tuned Sentence Transformer.
## Usage
To use this model for inference, first install the SetFit library:
```bash
python -m pip install setfit
```
You can then run inference as follows:
```python
from setfit import SetFitModel
# Download from Hub and run inference
model = SetFitModel.from_pretrained("konverner/due_retail_25")
# Run inference
preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
```
## BibTeX entry and citation info
```bibtex
@article{https://doi.org/10.48550/arxiv.2209.11055,
doi = {10.48550/ARXIV.2209.11055},
url = {https://arxiv.org/abs/2209.11055},
author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Efficient Few-Shot Learning Without Prompts},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
```
|
BigSalmon/MrLincoln13
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 9 | 2023-05-04T14:36:31Z |
---
license: mit
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-all
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-all
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1734
- F1: 0.8523
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.3058 | 1.0 | 835 | 0.1969 | 0.7993 |
| 0.1566 | 2.0 | 1670 | 0.1772 | 0.8387 |
| 0.1034 | 3.0 | 2505 | 0.1734 | 0.8523 |
### Framework versions
- Transformers 4.27.4
- Pytorch 2.0.0+cpu
- Datasets 2.11.0
- Tokenizers 0.13.3
|
BigSalmon/MrLincoln14
|
[] | null |
{
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 0 | 2023-05-04T14:37:20Z |
---
duplicated_from: OpenAssistant/oasst-sft-6-llama-30b
---
- wandb: https://wandb.ai/open-assistant/supervised-finetuning/runs/zsme2alr
- steps: 584
-
llama configuration:
```
llama-30b-sft-6:
dtype: fp16
log_dir: "llama_log_30b"
learning_rate: 1e-5
model_name: /home/ubuntu/Open-Assistant/model/model_training/.saved/llama-30b-super-pretrain/checkpoint-3500
output_dir: llama_model_30b
deepspeed_config: configs/zero3_config_sft.json
weight_decay: 0.0
residual_dropout: 0.0
max_length: 2048
use_flash_attention: true
warmup_steps: 20
gradient_checkpointing: true
gradient_accumulation_steps: 16
per_device_train_batch_size: 2
per_device_eval_batch_size: 3
eval_steps: 101
save_steps: 292
num_train_epochs: 8
save_total_limit: 3
use_custom_sampler: true
sort_by_length: false
save_strategy: steps
datasets:
- oasst_export:
lang: "bg,ca,cs,da,de,en,es,fr,hr,hu,it,nl,pl,pt,ro,ru,sl,sr,sv,uk"
input_file_path: 2023-04-12_oasst_release_ready_synth.jsonl.gz
val_split: 0.05
- vicuna:
val_split: 0.05
max_val_set: 800
fraction: 0.8
- dolly15k:
val_split: 0.05
max_val_set: 300
- grade_school_math_instructions:
val_split: 0.05
- code_alpaca:
val_split: 0.05
max_val_set: 250
```
|
BigSalmon/MrLincoln8
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 12 | 2023-05-04T14:54:40Z |
---
license: mit
tags:
- generated_from_trainer
model-index:
- name: gpt2-wikiemails_unlearned
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# gpt2-wikiemails_unlearned
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.0657
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.3022 | 1.0 | 2385 | 3.1040 |
| 3.1663 | 2.0 | 4770 | 3.0778 |
| 3.0901 | 3.0 | 7155 | 3.0674 |
| 3.0608 | 4.0 | 9540 | 3.0657 |
### Framework versions
- Transformers 4.28.1
- Pytorch 2.0.0+cu118
- Datasets 2.12.0
- Tokenizers 0.13.3
|
BigSalmon/NEO125InformalToFormalLincoln
|
[
"pytorch",
"gpt_neo",
"text-generation",
"transformers"
] |
text-generation
|
{
"architectures": [
"GPTNeoForCausalLM"
],
"model_type": "gpt_neo",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 8 | null |
---
license: creativeml-openrail-m
datasets:
- ioclab/grayscale_image_aesthetic_3M
language:
- en
---
# Model Card for ioclab/ioc-controlnet
This model brings perfect control over the light and dark areas of the picture
## Model Details
- **Developed by:** [@shichen](https://github.com/chenbaiyujason)
- **Shared by [optional]:** [More Information Needed]
- **Model type:** Stable Diffusion ControlNet model for [web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
## Uses
Recommendation Weight: **0.4-0.9**
Recommendation Exit Timing: **0.4-0.9**
As more datasets are still being trained in this model, it is expected to take 2-4 days. Therefore, flexible weight adjustments should be made based on different scenarios and specific results. If you have generated good images or encountered any problems, you can discuss them on Hugging Face~~~






For more information, please refer to the document link at the bottom.
Please note that the model is still under training iteration!! Please come back every 3 days to check if the latest modified version has been released.
### HuggingFace Space Demo
Waiting for upload
<!-- [huggingface.co/spaces/ioclab/brightness-controlnet](https://huggingface.co/spaces/ioclab/brightness-controlnet) -->
### Direct Use
[More Information Needed]
### Out-of-Scope Use
[More Information Needed]
## Bias, Risks, and Limitations
[More Information Needed]
## More Info
[illumination ControlNet 使用教程](https://aigc.ioclab.com/sd-showcase/light_controlnet.html) (Chinese)
|
BigSalmon/PhraseBerta
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 10 | null |
Access to model looqlab/sinteztestmodel1 is restricted and you are not in the authorized list. Visit https://huggingface.co/looqlab/sinteztestmodel1 to ask for access.
|
BigSalmon/Rowerta
|
[
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
|
{
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
}
| 4 | null |
---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-agent1-unit4
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
metrics:
- type: mean_reward
value: 500.00 +/- 0.00
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.