modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-09-09 00:41:25
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
549 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-09-09 00:41:08
card
stringlengths
11
1.01M
adhavan23/olai_suvadi_ocr
adhavan23
2024-06-04T06:04:28Z
0
0
null
[ "license:apache-2.0", "region:us" ]
null
2024-06-04T06:04:28Z
--- license: apache-2.0 ---
kiatkock/sentiment_pc_weightedLoss
kiatkock
2024-06-04T06:04:13Z
107
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:ahmedrachid/FinancialBERT-Sentiment-Analysis", "base_model:finetune:ahmedrachid/FinancialBERT-Sentiment-Analysis", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-05-31T01:30:25Z
--- base_model: ahmedrachid/FinancialBERT-Sentiment-Analysis tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: sentiment_pc_weightedLoss results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment_pc_weightedLoss This model is a fine-tuned version of [ahmedrachid/FinancialBERT-Sentiment-Analysis](https://huggingface.co/ahmedrachid/FinancialBERT-Sentiment-Analysis) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6463 - Accuracy: 0.86 - F1: 0.8290 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:------:|:----:|:---------------:|:--------:|:------:| | No log | 0.1739 | 50 | 0.6153 | 0.8087 | 0.7818 | | No log | 0.3478 | 100 | 0.4938 | 0.8165 | 0.7843 | | No log | 0.5217 | 150 | 0.4613 | 0.8339 | 0.8016 | | No log | 0.6957 | 200 | 0.4918 | 0.7913 | 0.7619 | | No log | 0.8696 | 250 | 0.4520 | 0.8283 | 0.7961 | | No log | 1.0435 | 300 | 0.4821 | 0.8339 | 0.8054 | | No log | 1.2174 | 350 | 0.4868 | 0.8639 | 0.8327 | | No log | 1.3913 | 400 | 0.5093 | 0.8574 | 0.8259 | | No log | 1.5652 | 450 | 0.4648 | 0.8474 | 0.8175 | | 0.4528 | 1.7391 | 500 | 0.4556 | 0.8470 | 0.8151 | | 0.4528 | 1.9130 | 550 | 0.4747 | 0.8361 | 0.8062 | | 0.4528 | 2.0870 | 600 | 0.5520 | 0.8543 | 0.8234 | | 0.4528 | 2.2609 | 650 | 0.6130 | 0.8652 | 0.8367 | | 0.4528 | 2.4348 | 700 | 0.5657 | 0.8722 | 0.8415 | | 0.4528 | 2.6087 | 750 | 0.5357 | 0.8339 | 0.8033 | | 0.4528 | 2.7826 | 800 | 0.5729 | 0.8513 | 0.8233 | | 0.4528 | 2.9565 | 850 | 0.5304 | 0.8522 | 0.8215 | | 0.4528 | 3.1304 | 900 | 0.5982 | 0.8683 | 0.8375 | | 0.4528 | 3.3043 | 950 | 0.5684 | 0.8513 | 0.8197 | | 0.1978 | 3.4783 | 1000 | 0.6463 | 0.86 | 0.8290 | | 0.1978 | 3.6522 | 1050 | 0.6566 | 0.8565 | 0.8262 | | 0.1978 | 3.8261 | 1100 | 0.6497 | 0.8578 | 0.8282 | | 0.1978 | 4.0 | 1150 | 0.6531 | 0.8591 | 0.8266 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
srbdtwentyfour/mystery-llama-3-8b-v3
srbdtwentyfour
2024-06-04T06:02:53Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-06-04T06:01:57Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-Instruct-bnb-4bit --- # Uploaded model - **Developed by:** srbdtwentyfour - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
goniii/Llama-3-Ko-8B-passthrough-merge-test
goniii
2024-06-04T06:02:46Z
7
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "conversational", "base_model:beomi/Llama-3-Open-Ko-8B", "base_model:merge:beomi/Llama-3-Open-Ko-8B", "base_model:beomi/Llama-3-Open-Ko-8B-Instruct-preview", "base_model:merge:beomi/Llama-3-Open-Ko-8B-Instruct-preview", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T05:58:30Z
--- base_model: - beomi/Llama-3-Open-Ko-8B-Instruct-preview - beomi/Llama-3-Open-Ko-8B library_name: transformers tags: - mergekit - merge --- # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the passthrough merge method. ### Models Merged The following models were included in the merge: * [beomi/Llama-3-Open-Ko-8B-Instruct-preview](https://huggingface.co/beomi/Llama-3-Open-Ko-8B-Instruct-preview) * [beomi/Llama-3-Open-Ko-8B](https://huggingface.co/beomi/Llama-3-Open-Ko-8B) ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: beomi/Llama-3-Open-Ko-8B-Instruct-preview layer_range: [0, 24] - sources: - model: beomi/Llama-3-Open-Ko-8B layer_range: [8, 32] merge_method: passthrough dtype: bfloat16 ```
tyzhu/find_marker_both_sent_train_400_eval_40_first_permute_meta-llama_Llama-2-7b-hf_3e-4_lora
tyzhu
2024-06-04T05:56:41Z
0
0
null
[ "generated_from_trainer", "base_model:meta-llama/Llama-2-7b-hf", "base_model:finetune:meta-llama/Llama-2-7b-hf", "license:llama2", "region:us" ]
null
2024-06-03T16:30:50Z
--- license: llama2 base_model: meta-llama/Llama-2-7b-hf tags: - generated_from_trainer metrics: - accuracy model-index: - name: find_marker_both_sent_train_400_eval_40_first_permute_meta-llama_Llama-2-7b-hf_3e-4_lora results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # find_marker_both_sent_train_400_eval_40_first_permute_meta-llama_Llama-2-7b-hf_3e-4_lora This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1941 - Accuracy: 0.7845 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.05 - num_epochs: 50.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3448 | 0.99 | 130 | 1.0018 | 0.7086 | | 0.7275 | 1.99 | 261 | 0.5024 | 0.7566 | | 0.3211 | 3.0 | 392 | 0.2915 | 0.7763 | | 0.1342 | 4.0 | 523 | 0.2350 | 0.7808 | | 0.1216 | 5.0 | 654 | 0.2253 | 0.7828 | | 0.1179 | 6.0 | 785 | 0.2039 | 0.7837 | | 0.1051 | 7.0 | 916 | 0.1984 | 0.7847 | | 0.1034 | 8.0 | 1047 | 0.1880 | 0.7856 | | 0.0996 | 8.99 | 1177 | 0.1813 | 0.7862 | | 0.0981 | 9.99 | 1308 | 0.1874 | 0.7862 | | 0.098 | 11.0 | 1439 | 0.1819 | 0.7863 | | 0.0983 | 12.0 | 1570 | 0.1802 | 0.7862 | | 0.0965 | 13.0 | 1701 | 0.1807 | 0.7867 | | 0.0941 | 14.0 | 1832 | 0.1746 | 0.7871 | | 0.0936 | 15.0 | 1963 | 0.1701 | 0.7871 | | 0.0922 | 16.0 | 2094 | 0.1714 | 0.7869 | | 0.0913 | 16.99 | 2224 | 0.1724 | 0.7870 | | 0.0939 | 17.99 | 2355 | 0.1826 | 0.7860 | | 0.0993 | 19.0 | 2486 | 0.1853 | 0.7868 | | 0.0982 | 20.0 | 2617 | 0.1809 | 0.7844 | | 0.0951 | 21.0 | 2748 | 0.1692 | 0.7865 | | 0.0927 | 22.0 | 2879 | 0.1745 | 0.7847 | | 0.0899 | 23.0 | 3010 | 0.1816 | 0.7844 | | 0.09 | 24.0 | 3141 | 0.2142 | 0.7818 | | 0.0909 | 24.99 | 3271 | 0.2207 | 0.7821 | | 0.0906 | 25.99 | 3402 | 0.2711 | 0.7819 | | 0.0897 | 27.0 | 3533 | 0.2210 | 0.7815 | | 0.0929 | 28.0 | 3664 | 0.2641 | 0.7809 | | 0.0954 | 29.0 | 3795 | 0.2717 | 0.7804 | | 0.0995 | 30.0 | 3926 | 0.2107 | 0.7814 | | 0.1025 | 31.0 | 4057 | 0.2482 | 0.7806 | | 0.1003 | 32.0 | 4188 | 0.2500 | 0.7819 | | 0.0905 | 32.99 | 4318 | 0.2899 | 0.7816 | | 0.0895 | 33.99 | 4449 | 0.2583 | 0.7814 | | 0.0894 | 35.0 | 4580 | 0.2976 | 0.7814 | | 0.0882 | 36.0 | 4711 | 0.2885 | 0.7814 | | 0.0877 | 37.0 | 4842 | 0.2893 | 0.7813 | | 0.0899 | 38.0 | 4973 | 0.2516 | 0.7813 | | 0.0884 | 39.0 | 5104 | 0.3089 | 0.7813 | | 0.0877 | 40.0 | 5235 | 0.2653 | 0.7814 | | 0.0888 | 40.99 | 5365 | 0.2988 | 0.7812 | | 0.0886 | 41.99 | 5496 | 0.3264 | 0.7814 | | 0.0886 | 43.0 | 5627 | 0.3656 | 0.7812 | | 0.0873 | 44.0 | 5758 | 0.3335 | 0.7812 | | 0.0899 | 45.0 | 5889 | 0.1658 | 0.7874 | | 0.0883 | 46.0 | 6020 | 0.2884 | 0.7816 | | 0.0876 | 47.0 | 6151 | 0.3085 | 0.7809 | | 0.0912 | 48.0 | 6282 | 0.2772 | 0.7811 | | 0.1224 | 48.99 | 6412 | 0.1965 | 0.7839 | | 0.1116 | 49.67 | 6500 | 0.1941 | 0.7845 | ### Framework versions - Transformers 4.34.0 - Pytorch 2.1.0+cu121 - Datasets 2.18.0 - Tokenizers 0.14.1
WbjuSrceu/gamme2b_loar_New_data
WbjuSrceu
2024-06-04T05:54:02Z
0
0
null
[ "safetensors", "license:apache-2.0", "region:us" ]
null
2024-06-04T05:48:01Z
--- license: apache-2.0 ---
mradermacher/Medichat-V2-Llama3-8B-GGUF
mradermacher
2024-06-04T05:50:49Z
22
1
transformers
[ "transformers", "gguf", "mergekit", "merge", "medical", "en", "dataset:ruslanmv/ai-medical-chatbot", "dataset:Locutusque/hercules-v5.0", "base_model:sethuiyer/Medichat-V2-Llama3-8B", "base_model:quantized:sethuiyer/Medichat-V2-Llama3-8B", "license:other", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-02T13:42:48Z
--- base_model: sethuiyer/Medichat-V2-Llama3-8B datasets: - ruslanmv/ai-medical-chatbot - Locutusque/hercules-v5.0 language: - en library_name: transformers license: other quantized_by: mradermacher tags: - mergekit - merge - medical --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/sethuiyer/Medichat-V2-Llama3-8B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Medichat-V2-Llama3-8B-GGUF/resolve/main/Medichat-V2-Llama3-8B.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mradermacher/TroyDoesAGI-GGUF
mradermacher
2024-06-04T05:49:52Z
67
0
transformers
[ "transformers", "gguf", "en", "base_model:TroyDoesAI/TroyDoesAGI", "base_model:quantized:TroyDoesAI/TroyDoesAGI", "license:cc-by-nd-4.0", "endpoints_compatible", "region:us" ]
null
2024-06-03T10:39:29Z
--- base_model: TroyDoesAI/TroyDoesAGI language: - en library_name: transformers license: cc-by-nd-4.0 quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/TroyDoesAI/TroyDoesAGI <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/TroyDoesAGI-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q2_K.gguf) | Q2_K | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.IQ3_XS.gguf) | IQ3_XS | 6.4 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q3_K_S.gguf) | Q3_K_S | 6.7 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.IQ3_S.gguf) | IQ3_S | 6.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.IQ3_M.gguf) | IQ3_M | 7.0 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q3_K_M.gguf) | Q3_K_M | 7.5 | lower quality | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q3_K_L.gguf) | Q3_K_L | 8.1 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.IQ4_XS.gguf) | IQ4_XS | 8.3 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q4_K_S.gguf) | Q4_K_S | 8.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q4_K_M.gguf) | Q4_K_M | 9.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q5_K_S.gguf) | Q5_K_S | 10.6 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q5_K_M.gguf) | Q5_K_M | 10.9 | | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q6_K.gguf) | Q6_K | 12.6 | very good quality | | [GGUF](https://huggingface.co/mradermacher/TroyDoesAGI-GGUF/resolve/main/TroyDoesAGI.Q8_0.gguf) | Q8_0 | 16.3 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
hilmiatha/ViT-Emotion-Classifier
hilmiatha
2024-06-04T05:49:45Z
27
0
transformers
[ "transformers", "tensorboard", "safetensors", "vit", "image-classification", "generated_from_trainer", "dataset:imagefolder", "base_model:google/vit-base-patch16-224-in21k", "base_model:finetune:google/vit-base-patch16-224-in21k", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2024-06-04T04:28:12Z
--- license: apache-2.0 base_model: google/vit-base-patch16-224-in21k tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: ViT-Emotion-Classifier results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.575 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ViT-Emotion-Classifier This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.3652 - Accuracy: 0.575 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 1.8992 | 0.3312 | | No log | 2.0 | 80 | 1.5939 | 0.4062 | | No log | 3.0 | 120 | 1.4776 | 0.4688 | | No log | 4.0 | 160 | 1.4012 | 0.4813 | | No log | 5.0 | 200 | 1.3471 | 0.4875 | | No log | 6.0 | 240 | 1.2877 | 0.5375 | | No log | 7.0 | 280 | 1.2598 | 0.575 | | No log | 8.0 | 320 | 1.3595 | 0.4938 | | No log | 9.0 | 360 | 1.2825 | 0.5375 | | No log | 10.0 | 400 | 1.3291 | 0.5062 | | No log | 11.0 | 440 | 1.2422 | 0.5563 | | No log | 12.0 | 480 | 1.2659 | 0.575 | | 1.0646 | 13.0 | 520 | 1.3048 | 0.5062 | | 1.0646 | 14.0 | 560 | 1.2993 | 0.5563 | | 1.0646 | 15.0 | 600 | 1.2935 | 0.5563 | | 1.0646 | 16.0 | 640 | 1.3589 | 0.5437 | | 1.0646 | 17.0 | 680 | 1.2447 | 0.5938 | | 1.0646 | 18.0 | 720 | 1.3298 | 0.5563 | | 1.0646 | 19.0 | 760 | 1.2829 | 0.6 | | 1.0646 | 20.0 | 800 | 1.3092 | 0.5813 | | 1.0646 | 21.0 | 840 | 1.2895 | 0.5875 | | 1.0646 | 22.0 | 880 | 1.3810 | 0.5625 | | 1.0646 | 23.0 | 920 | 1.3833 | 0.5563 | | 1.0646 | 24.0 | 960 | 1.4841 | 0.5312 | | 0.3074 | 25.0 | 1000 | 1.3619 | 0.6062 | | 0.3074 | 26.0 | 1040 | 1.3776 | 0.5563 | | 0.3074 | 27.0 | 1080 | 1.3917 | 0.5875 | | 0.3074 | 28.0 | 1120 | 1.3585 | 0.575 | | 0.3074 | 29.0 | 1160 | 1.3455 | 0.5625 | | 0.3074 | 30.0 | 1200 | 1.4409 | 0.5813 | ### Framework versions - Transformers 4.41.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
mradermacher/Dirty-Alice-GGUF
mradermacher
2024-06-04T05:49:23Z
50
1
transformers
[ "transformers", "gguf", "nsfw", "en", "base_model:D1rtyB1rd/Dirty-Alice", "base_model:quantized:D1rtyB1rd/Dirty-Alice", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-03T17:02:16Z
--- base_model: D1rtyB1rd/Dirty-Alice language: - en library_name: transformers license: mit quantized_by: mradermacher tags: - nsfw --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/D1rtyB1rd/Dirty-Alice <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/Dirty-Alice-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q2_K.gguf) | Q2_K | 0.3 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.IQ3_XS.gguf) | IQ3_XS | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.IQ3_S.gguf) | IQ3_S | 0.4 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q3_K_S.gguf) | Q3_K_S | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.IQ3_M.gguf) | IQ3_M | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q3_K_M.gguf) | Q3_K_M | 0.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q3_K_L.gguf) | Q3_K_L | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.IQ4_XS.gguf) | IQ4_XS | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q4_K_S.gguf) | Q4_K_S | 0.4 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q4_K_M.gguf) | Q4_K_M | 0.4 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q5_K_S.gguf) | Q5_K_S | 0.4 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q5_K_M.gguf) | Q5_K_M | 0.5 | | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q6_K.gguf) | Q6_K | 0.5 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.Q8_0.gguf) | Q8_0 | 0.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Dirty-Alice-GGUF/resolve/main/Dirty-Alice.f16.gguf) | f16 | 1.0 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mradermacher/piano-medley-7b-GGUF
mradermacher
2024-06-04T05:48:58Z
28
0
transformers
[ "transformers", "gguf", "merge", "mergekit", "en", "dataset:pankajmathur/orca_mini_v1_dataset", "dataset:openai/summarize_from_feedback", "dataset:PygmalionAI/PIPPA", "dataset:chargoddard/rpguild", "dataset:lemonilia/LimaRP", "dataset:PKU-Alignment/PKU-SafeRLHF", "dataset:Intel/orca_dpo_pairs", "dataset:allenai/ultrafeedback_binarized_cleaned", "base_model:chargoddard/piano-medley-7b", "base_model:quantized:chargoddard/piano-medley-7b", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
2024-06-04T03:46:01Z
--- base_model: chargoddard/piano-medley-7b datasets: - pankajmathur/orca_mini_v1_dataset - openai/summarize_from_feedback - PygmalionAI/PIPPA - chargoddard/rpguild - lemonilia/LimaRP - PKU-Alignment/PKU-SafeRLHF - Intel/orca_dpo_pairs - allenai/ultrafeedback_binarized_cleaned language: - en library_name: transformers license: cc-by-nc-4.0 quantized_by: mradermacher tags: - merge - mergekit --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/chargoddard/piano-medley-7b <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/piano-medley-7b-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q2_K.gguf) | Q2_K | 2.8 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.IQ3_XS.gguf) | IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q3_K_S.gguf) | Q3_K_S | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.IQ3_S.gguf) | IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.IQ3_M.gguf) | IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q3_K_M.gguf) | Q3_K_M | 3.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q3_K_L.gguf) | Q3_K_L | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.IQ4_XS.gguf) | IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q4_K_S.gguf) | Q4_K_S | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q4_K_M.gguf) | Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q5_K_S.gguf) | Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q5_K_M.gguf) | Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q6_K.gguf) | Q6_K | 6.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.Q8_0.gguf) | Q8_0 | 7.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/piano-medley-7b-GGUF/resolve/main/piano-medley-7b.f16.gguf) | f16 | 14.6 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
corto-ai/bge-reranker-large-onnx
corto-ai
2024-06-04T05:46:55Z
163
0
transformers
[ "transformers", "onnx", "xlm-roberta", "text-classification", "mteb", "feature-extraction", "en", "zh", "arxiv:2401.03462", "arxiv:2312.15503", "arxiv:2311.13534", "arxiv:2310.07554", "arxiv:2309.07597", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
feature-extraction
2024-06-04T04:22:32Z
--- license: mit language: - en - zh tags: - mteb model-index: - name: bge-reranker-base results: - task: type: Reranking dataset: type: C-MTEB/CMedQAv1-reranking name: MTEB CMedQAv1 config: default split: test revision: None metrics: - type: map value: 81.27206722525007 - type: mrr value: 84.14238095238095 - task: type: Reranking dataset: type: C-MTEB/CMedQAv2-reranking name: MTEB CMedQAv2 config: default split: test revision: None metrics: - type: map value: 84.10369934291236 - type: mrr value: 86.79376984126984 - task: type: Reranking dataset: type: C-MTEB/Mmarco-reranking name: MTEB MMarcoReranking config: default split: dev revision: None metrics: - type: map value: 35.4600511272538 - type: mrr value: 34.60238095238095 - task: type: Reranking dataset: type: C-MTEB/T2Reranking name: MTEB T2Reranking config: default split: dev revision: None metrics: - type: map value: 67.27728847727172 - type: mrr value: 77.1315192743764 pipeline_tag: feature-extraction --- <br><br> # bge-reranker-large-onnx This repo was forked from the **BAAI/bge-reranker-large** model and contains only the ONNX version of the model. Below is the original model card from the source repo. --- **We have updated the [new reranker](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), supporting larger lengths, more languages, and achieving better performance.** <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#frequently-asked-questions>FAQ</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#citation">Citation</a> | <a href="#license">License</a> <p> </h4> **More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding).** [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding focuses on retrieval-augmented LLMs, consisting of the following projects currently: - **Long-Context LLM**: [Activation Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon) - **Fine-tuning of LM** : [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail) - **Embedding Model**: [Visualized-BGE](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/visual), [BGE-M3](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3), [LLM Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), [BGE Embedding](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/baai_general_embedding) - **Reranker Model**: [llm rerankers](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), [BGE Reranker](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) - **Benchmark**: [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) ## News - 3/18/2024: Release new [rerankers](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker), built upon powerful M3 and LLM (GEMMA and MiniCPM, not so large actually) backbones, supporitng multi-lingual processing and larger inputs, massive improvements of ranking performances on BEIR, C-MTEB/Retrieval, MIRACL, LlamaIndex Evaluation. - 3/18/2024: Release [Visualized-BGE](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/visual), equipping BGE with visual capabilities. Visualized-BGE can be utilized to generate embeddings for hybrid image-text data. - 1/30/2024: Release **BGE-M3**, a new member to BGE model series! M3 stands for **M**ulti-linguality (100+ languages), **M**ulti-granularities (input length up to 8192), **M**ulti-Functionality (unification of dense, lexical, multi-vec/colbert retrieval). It is the first embedding model which supports all three retrieval methods, achieving new SOTA on multi-lingual (MIRACL) and cross-lingual (MKQA) benchmarks. [Technical Report](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/BGE_M3/BGE_M3.pdf) and [Code](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3). :fire: - 1/9/2024: Release [Activation-Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon), an effective, efficient, compatible, and low-cost (training) method to extend the context length of LLM. [Technical Report](https://arxiv.org/abs/2401.03462) :fire: - 12/24/2023: Release **LLaRA**, a LLaMA-7B based dense retriever, leading to state-of-the-art performances on MS MARCO and BEIR. Model and code will be open-sourced. Please stay tuned. [Technical Report](https://arxiv.org/abs/2312.15503) - 11/23/2023: Release [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail), a method to maintain general capabilities during fine-tuning by merging multiple language models. [Technical Report](https://arxiv.org/abs/2311.13534) :fire: - 10/12/2023: Release [LLM-Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), a unified embedding model to support diverse retrieval augmentation needs for LLMs. [Technical Report](https://arxiv.org/pdf/2310.07554.pdf) - 09/15/2023: The [technical report](https://arxiv.org/pdf/2309.07597.pdf) of BGE has been released - 09/15/2023: The [massive training data](https://data.baai.ac.cn/details/BAAI-MTP) of BGE has been released - 09/12/2023: New models: - **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models. - **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction. <details> <summary>More</summary> <!-- ### More --> - 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning. - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada: - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. </details> ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval [1] | |:-------------------------------|:--------:| :--------:| :--------:|:--------:| | [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | [Inference](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3#usage) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3) | Multi-Functionality(dense retrieval, sparse retrieval, multi-vector(colbert)), Multi-Linguality, and Multi-Granularity(8192 tokens) | | | [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | [1\]: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages. [2\]: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models. For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results. All models have been uploaded to Huggingface Hub, and you can see them at https://huggingface.co/BAAI. If you cannot open the Huggingface Hub, you also can download the models at https://model.baai.ac.cn/models . ## Frequently asked questions <details> <summary>1. How to fine-tune bge embedding model?</summary> <!-- ### How to fine-tune bge embedding model? --> Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model. Some suggestions: - Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance. - If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity. - If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results. Hard negatives also are needed to fine-tune reranker. Refer to this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) for the fine-tuning for reranker </details> <details> <summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary> <!-- ### The similarity score between two dissimilar sentences is higher than 0.5 --> **Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.** Since we finetune the models by contrastive learning with a temperature of 0.01, the similarity distribution of the current BGE model is about in the interval \[0.6, 1\]. So a similarity score greater than 0.5 does not indicate that the two sentences are similar. For downstream tasks, such as passage retrieval or semantic similarity, **what matters is the relative order of the scores, not the absolute value.** If you need to filter similar sentences based on a similarity threshold, please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9). </details> <details> <summary>3. When does the query instruction need to be used</summary> <!-- ### When does the query instruction need to be used --> For the `bge-*-v1.5`, we improve its retrieval ability when not using instruction. No instruction only has a slight degradation in retrieval performance compared with using instruction. So you can generate embedding without instruction in all cases for convenience. For a retrieval task that uses short queries to find long related documents, it is recommended to add instructions for these short queries. **The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.** In all cases, the documents/passages do not need to add the instruction. </details> ## Usage ### Usage for Embedding Model Here are some examples for using `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = FlagModel('BAAI/bge-large-zh-v1.5', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode(sentences_1) embeddings_2 = model.encode(sentences_2) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). By default, FlagModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. #### Using Sentence-Transformers You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net): ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = SentenceTransformer('BAAI/bge-large-zh-v1.5') embeddings_1 = model.encode(sentences_1, normalize_embeddings=True) embeddings_2 = model.encode(sentences_2, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh-v1.5') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en-v1.5" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction="为这个句子生成表示以用于检索相关文章:" ) model.query_instruction = "为这个句子生成表示以用于检索相关文章:" ``` #### Using HuggingFace Transformers With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5') model.eval() # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` ### Usage for Reranker Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range. #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### Using Huggingface transformers ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` #### Usage reranker with the ONNX files ```python from optimum.onnxruntime import ORTModelForSequenceClassification # type: ignore import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-base') model_ort = ORTModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-base', file_name="onnx/model.onnx") # Sentences we want sentence embeddings for pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] # Tokenize sentences encoded_input = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt') scores_ort = model_ort(**encoded_input, return_dict=True).logits.view(-1, ).float() # Compute token embeddings with torch.inference_mode(): scores = model_ort(**encoded_input, return_dict=True).logits.view(-1, ).float() # scores and scores_ort are identical ``` #### Usage reranker with infinity Its also possible to deploy the onnx/torch files with the [infinity_emb](https://github.com/michaelfeil/infinity) pip package. ```python import asyncio from infinity_emb import AsyncEmbeddingEngine, EngineArgs query='what is a panda?' docs = ['The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear', "Paris is in France."] engine = AsyncEmbeddingEngine.from_args( EngineArgs(model_name_or_path = "BAAI/bge-reranker-base", device="cpu", engine="torch" # or engine="optimum" for onnx )) async def main(): async with engine: ranking, usage = await engine.rerank(query=query, docs=docs) print(list(zip(ranking, docs))) asyncio.run(main()) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 | | [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 | | [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | - **C-MTEB**: We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 | | [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 | | [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 | | [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 | | [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 | | [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 | - **Reranking**: See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script. | Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 | | multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 | | multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 | | multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 | | m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 | | m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 | | bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 | | bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 | \* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks ## Train ### BAAI Embedding We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning. **You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).** We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain). Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned. More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). ### BGE Reranker Cross-encoder will perform full-attention over the input pair, which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model. Therefore, it can be used to re-rank the top-k documents returned by embedding model. We train the cross-encoder on a multilingual pair data, The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker). More details please refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) ## Citation If you find this repository useful, please consider giving a star :star: and citation ``` @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
Ariffiq99/CRAB_COPA_KUCI_Albert_Base_finetuned
Ariffiq99
2024-06-04T05:46:02Z
108
0
transformers
[ "transformers", "tensorboard", "safetensors", "albert", "multiple-choice", "generated_from_trainer", "base_model:Ariffiq99/COPA_KUCI_albert_base_finetuned", "base_model:finetune:Ariffiq99/COPA_KUCI_albert_base_finetuned", "license:apache-2.0", "endpoints_compatible", "region:us" ]
multiple-choice
2024-06-04T05:02:41Z
--- license: apache-2.0 base_model: Ariffiq99/COPA_KUCI_albert_base_finetuned tags: - generated_from_trainer metrics: - f1 model-index: - name: CRAB_COPA_KUCI_Albert_Base_finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CRAB_COPA_KUCI_Albert_Base_finetuned This model is a fine-tuned version of [Ariffiq99/COPA_KUCI_albert_base_finetuned](https://huggingface.co/Ariffiq99/COPA_KUCI_albert_base_finetuned) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0723 - F1: 0.7417 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 1.2662 | 1.0 | 2880 | 0.9492 | 0.7083 | | 1.1817 | 2.0 | 5760 | 1.2988 | 0.7111 | | 0.9847 | 3.0 | 8640 | 0.9879 | 0.7167 | | 0.9304 | 4.0 | 11520 | 1.2139 | 0.7083 | | 0.9566 | 5.0 | 14400 | 1.0738 | 0.7250 | | 0.8718 | 6.0 | 17280 | 1.1113 | 0.7236 | | 0.8314 | 7.0 | 20160 | 1.0836 | 0.7361 | | 0.7331 | 8.0 | 23040 | 1.0723 | 0.7417 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
windmaple/gemma-2-finetuned-model-axolotl
windmaple
2024-06-04T05:44:10Z
7
0
transformers
[ "transformers", "safetensors", "gemma", "feature-extraction", "arxiv:1910.09700", "text-generation-inference", "endpoints_compatible", "region:us" ]
feature-extraction
2024-06-04T02:52:21Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
MuhammadYousef/Quora-Sincerity-Insincerity
MuhammadYousef
2024-06-04T05:43:46Z
191
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-06-04T05:43:28Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
kiatkock/sentiment_pc_combinedBase
kiatkock
2024-06-04T05:41:46Z
107
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:ahmedrachid/FinancialBERT-Sentiment-Analysis", "base_model:finetune:ahmedrachid/FinancialBERT-Sentiment-Analysis", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-05-31T01:05:21Z
--- base_model: ahmedrachid/FinancialBERT-Sentiment-Analysis tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: sentiment_pc_combinedBase results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment_pc_combinedBase This model is a fine-tuned version of [ahmedrachid/FinancialBERT-Sentiment-Analysis](https://huggingface.co/ahmedrachid/FinancialBERT-Sentiment-Analysis) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5153 - Accuracy: 0.8683 - F1: 0.8376 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:------:|:----:|:---------------:|:--------:|:------:| | No log | 0.1739 | 50 | 0.5234 | 0.8096 | 0.7723 | | No log | 0.3478 | 100 | 0.4390 | 0.8457 | 0.8151 | | No log | 0.5217 | 150 | 0.4168 | 0.8491 | 0.8137 | | No log | 0.6957 | 200 | 0.4252 | 0.8522 | 0.8150 | | No log | 0.8696 | 250 | 0.3931 | 0.8561 | 0.8196 | | No log | 1.0435 | 300 | 0.4409 | 0.8409 | 0.8118 | | No log | 1.2174 | 350 | 0.4108 | 0.8657 | 0.8271 | | No log | 1.3913 | 400 | 0.4382 | 0.8613 | 0.8292 | | No log | 1.5652 | 450 | 0.4147 | 0.8622 | 0.8287 | | 0.415 | 1.7391 | 500 | 0.4069 | 0.8652 | 0.8331 | | 0.415 | 1.9130 | 550 | 0.4170 | 0.8591 | 0.8275 | | 0.415 | 2.0870 | 600 | 0.4533 | 0.8626 | 0.8296 | | 0.415 | 2.2609 | 650 | 0.4613 | 0.87 | 0.8401 | | 0.415 | 2.4348 | 700 | 0.4531 | 0.8770 | 0.8447 | | 0.415 | 2.6087 | 750 | 0.4534 | 0.8583 | 0.8277 | | 0.415 | 2.7826 | 800 | 0.4756 | 0.8570 | 0.8274 | | 0.415 | 2.9565 | 850 | 0.4482 | 0.8683 | 0.8391 | | 0.415 | 3.1304 | 900 | 0.4858 | 0.8665 | 0.8350 | | 0.415 | 3.3043 | 950 | 0.4873 | 0.8639 | 0.8341 | | 0.1812 | 3.4783 | 1000 | 0.5153 | 0.8683 | 0.8376 | | 0.1812 | 3.6522 | 1050 | 0.5345 | 0.8578 | 0.8281 | | 0.1812 | 3.8261 | 1100 | 0.5372 | 0.8609 | 0.8331 | | 0.1812 | 4.0 | 1150 | 0.5172 | 0.8670 | 0.8379 | | 0.1812 | 4.1739 | 1200 | 0.5643 | 0.8643 | 0.8342 | | 0.1812 | 4.3478 | 1250 | 0.5783 | 0.8622 | 0.8326 | | 0.1812 | 4.5217 | 1300 | 0.5909 | 0.8565 | 0.8273 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
0xfaskety/Qwen-Qwen1.5-7B-1717478956
0xfaskety
2024-06-04T05:37:00Z
7
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T05:29:24Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
alwanrahmana/indobert-large-p2_bus_focal_loss
alwanrahmana
2024-06-04T05:36:20Z
39
0
transformers
[ "transformers", "safetensors", "bert", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-06-04T05:35:48Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Ciel127/Baichuan2-7B-chat-mathreasoning_enhanced
Ciel127
2024-06-04T05:34:29Z
0
0
null
[ "safetensors", "license:mit", "region:us" ]
null
2024-05-30T14:55:49Z
--- license: mit --- # We are proud to present our customized LoRA model tailored specifically for the Baichuan2-7B-Chat LLM. This meticulously crafted LoRA significantly enhances BaiChuan's mathematical reasoning capabilities. Through rigorous testing, we have observed a remarkable improvement in accuracy on the GSM8K dataset – a leap from **6.55%** to **19.18%** (Zero-Shot). Remarkably, this boost in mathematical prowess does not come at the expense of other functionalities: proficiency in Chinese language processing, translation abilities, general English comprehension, and knowledge of open-world subjects remain consistently high. Integration is effortless, requiring only a simple **LoRA-Merge process** to unlock these enhanced capabilities. Completed with Unakar,so you can also find it at https://huggingface.co/unakar/Baichuan2-7B-Chat_Math_Reasoning_Enhanced.
FINwillson/git_test_SFT
FINwillson
2024-06-04T05:34:00Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-06-04T05:33:00Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-bnb-4bit --- # Uploaded model - **Developed by:** FINwillson - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
hdve/google-gemma-7b-1717479082
hdve
2024-06-04T05:33:57Z
7
0
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T05:31:25Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
olanasir/bart-cnn-dailymail
olanasir
2024-06-04T05:32:40Z
120
0
transformers
[ "transformers", "tensorboard", "safetensors", "bart", "text2text-generation", "generated_from_trainer", "base_model:facebook/bart-large-cnn", "base_model:finetune:facebook/bart-large-cnn", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2024-06-04T04:11:20Z
--- license: mit base_model: facebook/bart-large-cnn tags: - generated_from_trainer model-index: - name: bart-cnn-dailymail results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-dailymail This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
andricValdez/multilingual-e5-large-finetuned-autext24-subtask2
andricValdez
2024-06-04T05:29:09Z
5
0
transformers
[ "transformers", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:intfloat/multilingual-e5-large", "base_model:finetune:intfloat/multilingual-e5-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-06-04T01:17:16Z
--- license: mit base_model: intfloat/multilingual-e5-large tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: multilingual-e5-large-finetuned-autext24-subtask2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multilingual-e5-large-finetuned-autext24-subtask2 This model is a fine-tuned version of [intfloat/multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9903 - Accuracy: 0.8543 - F1: 0.8553 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | No log | 1.0 | 2571 | 0.6295 | 0.7574 | 0.7566 | | 0.5796 | 2.0 | 5142 | 0.4672 | 0.8416 | 0.8424 | | 0.5796 | 3.0 | 7713 | 0.7611 | 0.8160 | 0.8169 | | 0.1763 | 4.0 | 10284 | 0.8108 | 0.8562 | 0.8571 | | 0.1763 | 5.0 | 12855 | 0.9903 | 0.8543 | 0.8553 | ### Framework versions - Transformers 4.40.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
apwic/nerui-unipelt-0
apwic
2024-06-04T05:28:28Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-06-04T05:13:24Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-unipelt-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-unipelt-0 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0592 - Location Precision: 0.8738 - Location Recall: 0.9574 - Location F1: 0.9137 - Location Number: 94 - Organization Precision: 0.8994 - Organization Recall: 0.9102 - Organization F1: 0.9048 - Organization Number: 167 - Person Precision: 0.9781 - Person Recall: 0.9781 - Person F1: 0.9781 - Person Number: 137 - Overall Precision: 0.9193 - Overall Recall: 0.9447 - Overall F1: 0.9318 - Overall Accuracy: 0.9867 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.8691 | 1.0 | 96 | 0.5068 | 0.0 | 0.0 | 0.0 | 94 | 0.6667 | 0.0120 | 0.0235 | 167 | 0.1429 | 0.0073 | 0.0139 | 137 | 0.3 | 0.0075 | 0.0147 | 0.8351 | | 0.3831 | 2.0 | 192 | 0.2198 | 0.3763 | 0.3723 | 0.3743 | 94 | 0.4935 | 0.6826 | 0.5729 | 167 | 0.7453 | 0.8759 | 0.8054 | 137 | 0.5546 | 0.6759 | 0.6093 | 0.9318 | | 0.1821 | 3.0 | 288 | 0.0957 | 0.7789 | 0.7872 | 0.7831 | 94 | 0.7407 | 0.8383 | 0.7865 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8208 | 0.8744 | 0.8467 | 0.9691 | | 0.1207 | 4.0 | 384 | 0.0757 | 0.7632 | 0.9255 | 0.8365 | 94 | 0.7865 | 0.8383 | 0.8116 | 167 | 0.9712 | 0.9854 | 0.9783 | 137 | 0.8399 | 0.9095 | 0.8733 | 0.9740 | | 0.0982 | 5.0 | 480 | 0.0582 | 0.8020 | 0.8617 | 0.8308 | 94 | 0.8820 | 0.8503 | 0.8659 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8881 | 0.8970 | 0.8925 | 0.9796 | | 0.0798 | 6.0 | 576 | 0.0528 | 0.8019 | 0.9043 | 0.8500 | 94 | 0.8571 | 0.8982 | 0.8772 | 167 | 0.9855 | 0.9927 | 0.9891 | 137 | 0.8854 | 0.9322 | 0.9082 | 0.9818 | | 0.0718 | 7.0 | 672 | 0.0513 | 0.7857 | 0.9362 | 0.8544 | 94 | 0.8735 | 0.8683 | 0.8709 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.8846 | 0.9246 | 0.9042 | 0.9815 | | 0.0652 | 8.0 | 768 | 0.0449 | 0.8241 | 0.9468 | 0.8812 | 94 | 0.8848 | 0.8743 | 0.8795 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9046 | 0.9296 | 0.9170 | 0.9840 | | 0.0583 | 9.0 | 864 | 0.0453 | 0.8333 | 0.9574 | 0.8911 | 94 | 0.8963 | 0.8802 | 0.8882 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9120 | 0.9372 | 0.9244 | 0.9848 | | 0.0527 | 10.0 | 960 | 0.0432 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8613 | 0.8922 | 0.8765 | 167 | 1.0 | 0.9781 | 0.9889 | 137 | 0.9049 | 0.9322 | 0.9183 | 0.9829 | | 0.0489 | 11.0 | 1056 | 0.0411 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.8922 | 0.8922 | 0.8922 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9165 | 0.9372 | 0.9267 | 0.9854 | | 0.0463 | 12.0 | 1152 | 0.0461 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.8795 | 0.8743 | 0.8769 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9136 | 0.9296 | 0.9215 | 0.9840 | | 0.0432 | 13.0 | 1248 | 0.0435 | 0.8396 | 0.9468 | 0.89 | 94 | 0.8817 | 0.8922 | 0.8869 | 167 | 0.9779 | 0.9708 | 0.9744 | 137 | 0.9027 | 0.9322 | 0.9172 | 0.9851 | | 0.0394 | 14.0 | 1344 | 0.0464 | 0.8529 | 0.9255 | 0.8878 | 94 | 0.8795 | 0.8743 | 0.8769 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8995 | 0.9221 | 0.9107 | 0.9820 | | 0.0366 | 15.0 | 1440 | 0.0398 | 0.8980 | 0.9362 | 0.9167 | 94 | 0.9102 | 0.9102 | 0.9102 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9303 | 0.9397 | 0.9350 | 0.9856 | | 0.0356 | 16.0 | 1536 | 0.0372 | 0.9158 | 0.9255 | 0.9206 | 94 | 0.9012 | 0.9281 | 0.9145 | 167 | 0.9852 | 0.9708 | 0.9779 | 137 | 0.9328 | 0.9422 | 0.9375 | 0.9867 | | 0.0308 | 17.0 | 1632 | 0.0406 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.9085 | 0.8922 | 0.9003 | 167 | 0.9710 | 0.9781 | 0.9745 | 137 | 0.9185 | 0.9347 | 0.9265 | 0.9848 | | 0.0313 | 18.0 | 1728 | 0.0389 | 0.8725 | 0.9468 | 0.9082 | 94 | 0.9744 | 0.9102 | 0.9412 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.9422 | 0.9422 | 0.9422 | 0.9876 | | 0.0288 | 19.0 | 1824 | 0.0402 | 0.8878 | 0.9255 | 0.9062 | 94 | 0.9383 | 0.9102 | 0.9240 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9397 | 0.9397 | 0.9397 | 0.9873 | | 0.0263 | 20.0 | 1920 | 0.0443 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.9074 | 0.8802 | 0.8936 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9181 | 0.9296 | 0.9238 | 0.9854 | | 0.0245 | 21.0 | 2016 | 0.0430 | 0.8365 | 0.9255 | 0.8788 | 94 | 0.8935 | 0.9042 | 0.8988 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9075 | 0.9372 | 0.9221 | 0.9848 | | 0.0254 | 22.0 | 2112 | 0.0498 | 0.8381 | 0.9362 | 0.8844 | 94 | 0.8929 | 0.8982 | 0.8955 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9098 | 0.9372 | 0.9233 | 0.9837 | | 0.0232 | 23.0 | 2208 | 0.0435 | 0.8667 | 0.9681 | 0.9146 | 94 | 0.9299 | 0.8743 | 0.9012 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9323 | 0.9347 | 0.9335 | 0.9859 | | 0.0204 | 24.0 | 2304 | 0.0446 | 0.8679 | 0.9787 | 0.9200 | 94 | 0.8896 | 0.8683 | 0.8788 | 167 | 0.9640 | 0.9781 | 0.9710 | 137 | 0.9093 | 0.9322 | 0.9206 | 0.9856 | | 0.0222 | 25.0 | 2400 | 0.0430 | 0.9010 | 0.9681 | 0.9333 | 94 | 0.9102 | 0.9102 | 0.9102 | 167 | 0.9855 | 0.9927 | 0.9891 | 137 | 0.9335 | 0.9523 | 0.9428 | 0.9865 | | 0.0204 | 26.0 | 2496 | 0.0383 | 0.8878 | 0.9255 | 0.9062 | 94 | 0.8706 | 0.8862 | 0.8783 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9113 | 0.9296 | 0.9204 | 0.9859 | | 0.0186 | 27.0 | 2592 | 0.0407 | 0.9167 | 0.9362 | 0.9263 | 94 | 0.8851 | 0.9222 | 0.9032 | 167 | 0.9855 | 0.9927 | 0.9891 | 137 | 0.9265 | 0.9497 | 0.9380 | 0.9873 | | 0.0209 | 28.0 | 2688 | 0.0472 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.8922 | 0.8922 | 0.8922 | 167 | 0.9855 | 0.9927 | 0.9891 | 137 | 0.9120 | 0.9372 | 0.9244 | 0.9845 | | 0.0171 | 29.0 | 2784 | 0.0443 | 0.8713 | 0.9362 | 0.9026 | 94 | 0.9136 | 0.8862 | 0.8997 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9252 | 0.9322 | 0.9287 | 0.9856 | | 0.0171 | 30.0 | 2880 | 0.0457 | 0.8958 | 0.9149 | 0.9053 | 94 | 0.8844 | 0.9162 | 0.9 | 167 | 0.9855 | 0.9927 | 0.9891 | 137 | 0.9214 | 0.9422 | 0.9317 | 0.9867 | | 0.0164 | 31.0 | 2976 | 0.0497 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9136 | 0.8862 | 0.8997 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9323 | 0.9347 | 0.9335 | 0.9859 | | 0.0151 | 32.0 | 3072 | 0.0477 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9024 | 0.8862 | 0.8943 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9277 | 0.9347 | 0.9312 | 0.9854 | | 0.0147 | 33.0 | 3168 | 0.0459 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.9080 | 0.8862 | 0.8970 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9256 | 0.9372 | 0.9313 | 0.9862 | | 0.0142 | 34.0 | 3264 | 0.0485 | 0.8447 | 0.9255 | 0.8832 | 94 | 0.8941 | 0.9102 | 0.9021 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9146 | 0.9422 | 0.9282 | 0.9856 | | 0.0134 | 35.0 | 3360 | 0.0579 | 0.8257 | 0.9574 | 0.8867 | 94 | 0.9091 | 0.8383 | 0.8723 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.91 | 0.9146 | 0.9123 | 0.9831 | | 0.0123 | 36.0 | 3456 | 0.0508 | 0.8725 | 0.9468 | 0.9082 | 94 | 0.9102 | 0.9102 | 0.9102 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9236 | 0.9422 | 0.9328 | 0.9867 | | 0.0133 | 37.0 | 3552 | 0.0473 | 0.9158 | 0.9255 | 0.9206 | 94 | 0.8715 | 0.9341 | 0.9017 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9197 | 0.9497 | 0.9345 | 0.9862 | | 0.0105 | 38.0 | 3648 | 0.0483 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.9212 | 0.9102 | 0.9157 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9330 | 0.9447 | 0.9388 | 0.9865 | | 0.012 | 39.0 | 3744 | 0.0404 | 0.9293 | 0.9787 | 0.9534 | 94 | 0.9006 | 0.9222 | 0.9112 | 167 | 0.9710 | 0.9781 | 0.9745 | 137 | 0.9314 | 0.9548 | 0.9429 | 0.9895 | | 0.0116 | 40.0 | 3840 | 0.0490 | 0.8627 | 0.9362 | 0.8980 | 94 | 0.8862 | 0.8862 | 0.8862 | 167 | 0.9640 | 0.9781 | 0.9710 | 137 | 0.9069 | 0.9296 | 0.9181 | 0.9862 | | 0.0104 | 41.0 | 3936 | 0.0579 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8982 | 0.8982 | 0.8982 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9189 | 0.9397 | 0.9292 | 0.9848 | | 0.0111 | 42.0 | 4032 | 0.0515 | 0.875 | 0.9681 | 0.9192 | 94 | 0.8902 | 0.8743 | 0.8822 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9210 | 0.9372 | 0.9290 | 0.9859 | | 0.0094 | 43.0 | 4128 | 0.0655 | 0.8224 | 0.9362 | 0.8756 | 94 | 0.9042 | 0.9042 | 0.9042 | 167 | 0.9927 | 0.9927 | 0.9927 | 137 | 0.9124 | 0.9422 | 0.9271 | 0.9837 | | 0.0091 | 44.0 | 4224 | 0.0648 | 0.8396 | 0.9468 | 0.89 | 94 | 0.9006 | 0.8683 | 0.8841 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9109 | 0.9246 | 0.9177 | 0.9845 | | 0.0085 | 45.0 | 4320 | 0.0548 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8844 | 0.9162 | 0.9 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9130 | 0.9497 | 0.9310 | 0.9856 | | 0.0092 | 46.0 | 4416 | 0.0582 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8795 | 0.8743 | 0.8769 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9066 | 0.9271 | 0.9168 | 0.9837 | | 0.0086 | 47.0 | 4512 | 0.0598 | 0.8571 | 0.9574 | 0.9045 | 94 | 0.8929 | 0.8982 | 0.8955 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9122 | 0.9397 | 0.9257 | 0.9848 | | 0.0088 | 48.0 | 4608 | 0.0569 | 0.8788 | 0.9255 | 0.9016 | 94 | 0.8929 | 0.8982 | 0.8955 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9183 | 0.9322 | 0.9252 | 0.9845 | | 0.0079 | 49.0 | 4704 | 0.0570 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.9 | 0.9162 | 0.9080 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9195 | 0.9472 | 0.9332 | 0.9854 | | 0.008 | 50.0 | 4800 | 0.0592 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.9030 | 0.8922 | 0.8976 | 167 | 0.9712 | 0.9854 | 0.9783 | 137 | 0.9140 | 0.9347 | 0.9242 | 0.9837 | | 0.0075 | 51.0 | 4896 | 0.0636 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.9241 | 0.8743 | 0.8985 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9225 | 0.9271 | 0.9248 | 0.9840 | | 0.0065 | 52.0 | 4992 | 0.0522 | 0.8911 | 0.9574 | 0.9231 | 94 | 0.9053 | 0.9162 | 0.9107 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.9265 | 0.9497 | 0.9380 | 0.9881 | | 0.0059 | 53.0 | 5088 | 0.0700 | 0.8396 | 0.9468 | 0.89 | 94 | 0.9141 | 0.8922 | 0.9030 | 167 | 0.9712 | 0.9854 | 0.9783 | 137 | 0.9142 | 0.9372 | 0.9256 | 0.9831 | | 0.0061 | 54.0 | 5184 | 0.0684 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8701 | 0.9222 | 0.8953 | 167 | 0.9779 | 0.9708 | 0.9744 | 137 | 0.9014 | 0.9422 | 0.9214 | 0.9840 | | 0.0075 | 55.0 | 5280 | 0.0515 | 0.8990 | 0.9468 | 0.9223 | 94 | 0.9107 | 0.9162 | 0.9134 | 167 | 0.9712 | 0.9854 | 0.9783 | 137 | 0.9286 | 0.9472 | 0.9378 | 0.9870 | | 0.0067 | 56.0 | 5376 | 0.0545 | 0.89 | 0.9468 | 0.9175 | 94 | 0.8908 | 0.9281 | 0.9091 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9197 | 0.9497 | 0.9345 | 0.9862 | | 0.0068 | 57.0 | 5472 | 0.0620 | 0.8725 | 0.9468 | 0.9082 | 94 | 0.8895 | 0.9162 | 0.9027 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9148 | 0.9447 | 0.9295 | 0.9859 | | 0.0064 | 58.0 | 5568 | 0.0645 | 0.8558 | 0.9468 | 0.8990 | 94 | 0.8941 | 0.9102 | 0.9021 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9148 | 0.9447 | 0.9295 | 0.9845 | | 0.0081 | 59.0 | 5664 | 0.0579 | 0.8627 | 0.9362 | 0.8980 | 94 | 0.8909 | 0.8802 | 0.8855 | 167 | 0.9708 | 0.9708 | 0.9708 | 137 | 0.9109 | 0.9246 | 0.9177 | 0.9845 | | 0.0047 | 60.0 | 5760 | 0.0560 | 0.8824 | 0.9574 | 0.9184 | 94 | 0.8862 | 0.8862 | 0.8862 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9187 | 0.9372 | 0.9279 | 0.9867 | | 0.0051 | 61.0 | 5856 | 0.0563 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9042 | 0.9042 | 0.9042 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9259 | 0.9422 | 0.9340 | 0.9862 | | 0.0047 | 62.0 | 5952 | 0.0566 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.9130 | 0.8802 | 0.8963 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9252 | 0.9322 | 0.9287 | 0.9876 | | 0.0048 | 63.0 | 6048 | 0.0556 | 0.875 | 0.9681 | 0.9192 | 94 | 0.9107 | 0.9162 | 0.9134 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9242 | 0.9497 | 0.9368 | 0.9870 | | 0.0052 | 64.0 | 6144 | 0.0580 | 0.8911 | 0.9574 | 0.9231 | 94 | 0.9125 | 0.8743 | 0.8930 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9296 | 0.9296 | 0.9296 | 0.9878 | | 0.0043 | 65.0 | 6240 | 0.0596 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9006 | 0.8683 | 0.8841 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9223 | 0.9246 | 0.9235 | 0.9862 | | 0.0045 | 66.0 | 6336 | 0.0550 | 0.8911 | 0.9574 | 0.9231 | 94 | 0.8941 | 0.9102 | 0.9021 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9263 | 0.9472 | 0.9366 | 0.9873 | | 0.0055 | 67.0 | 6432 | 0.0650 | 0.87 | 0.9255 | 0.8969 | 94 | 0.8908 | 0.9281 | 0.9091 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9173 | 0.9472 | 0.9320 | 0.9859 | | 0.0044 | 68.0 | 6528 | 0.0656 | 0.8614 | 0.9255 | 0.8923 | 94 | 0.8929 | 0.8982 | 0.8955 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9185 | 0.9347 | 0.9265 | 0.9851 | | 0.0043 | 69.0 | 6624 | 0.0586 | 0.8990 | 0.9468 | 0.9223 | 94 | 0.8941 | 0.9102 | 0.9021 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9284 | 0.9447 | 0.9365 | 0.9867 | | 0.0047 | 70.0 | 6720 | 0.0600 | 0.88 | 0.9362 | 0.9072 | 94 | 0.9146 | 0.8982 | 0.9063 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9325 | 0.9372 | 0.9348 | 0.9862 | | 0.0039 | 71.0 | 6816 | 0.0705 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8882 | 0.9042 | 0.8961 | 167 | 0.9854 | 0.9854 | 0.9854 | 137 | 0.9122 | 0.9397 | 0.9257 | 0.9848 | | 0.0052 | 72.0 | 6912 | 0.0639 | 0.8713 | 0.9362 | 0.9026 | 94 | 0.8889 | 0.9102 | 0.8994 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9191 | 0.9422 | 0.9305 | 0.9859 | | 0.0049 | 73.0 | 7008 | 0.0575 | 0.8922 | 0.9681 | 0.9286 | 94 | 0.9157 | 0.9102 | 0.9129 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9356 | 0.9497 | 0.9426 | 0.9887 | | 0.0046 | 74.0 | 7104 | 0.0575 | 0.8911 | 0.9574 | 0.9231 | 94 | 0.9048 | 0.9102 | 0.9075 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9309 | 0.9472 | 0.9390 | 0.9876 | | 0.0038 | 75.0 | 7200 | 0.0606 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9042 | 0.9042 | 0.9042 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9235 | 0.9397 | 0.9315 | 0.9862 | | 0.0044 | 76.0 | 7296 | 0.0588 | 0.8889 | 0.9362 | 0.9119 | 94 | 0.9096 | 0.9042 | 0.9069 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9279 | 0.9372 | 0.9325 | 0.9862 | | 0.0035 | 77.0 | 7392 | 0.0580 | 0.9020 | 0.9787 | 0.9388 | 94 | 0.9212 | 0.9102 | 0.9157 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9356 | 0.9497 | 0.9426 | 0.9876 | | 0.0038 | 78.0 | 7488 | 0.0585 | 0.9 | 0.9574 | 0.9278 | 94 | 0.9202 | 0.8982 | 0.9091 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.935 | 0.9397 | 0.9373 | 0.9867 | | 0.0044 | 79.0 | 7584 | 0.0628 | 0.89 | 0.9468 | 0.9175 | 94 | 0.8895 | 0.9162 | 0.9027 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9193 | 0.9447 | 0.9318 | 0.9862 | | 0.0035 | 80.0 | 7680 | 0.0628 | 0.88 | 0.9362 | 0.9072 | 94 | 0.8743 | 0.9162 | 0.8947 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9102 | 0.9422 | 0.9259 | 0.9856 | | 0.0029 | 81.0 | 7776 | 0.0658 | 0.8558 | 0.9468 | 0.8990 | 94 | 0.8882 | 0.9042 | 0.8961 | 167 | 0.9710 | 0.9781 | 0.9745 | 137 | 0.9078 | 0.9397 | 0.9235 | 0.9848 | | 0.0024 | 82.0 | 7872 | 0.0574 | 0.8990 | 0.9468 | 0.9223 | 94 | 0.875 | 0.9222 | 0.8980 | 167 | 0.9708 | 0.9708 | 0.9708 | 137 | 0.9126 | 0.9447 | 0.9284 | 0.9862 | | 0.0041 | 83.0 | 7968 | 0.0613 | 0.88 | 0.9362 | 0.9072 | 94 | 0.9048 | 0.9102 | 0.9075 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9282 | 0.9422 | 0.9352 | 0.9856 | | 0.0041 | 84.0 | 8064 | 0.0559 | 0.8824 | 0.9574 | 0.9184 | 94 | 0.9102 | 0.9102 | 0.9102 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9261 | 0.9447 | 0.9353 | 0.9870 | | 0.0035 | 85.0 | 8160 | 0.0533 | 0.9082 | 0.9468 | 0.9271 | 94 | 0.9321 | 0.9042 | 0.9179 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9421 | 0.9397 | 0.9409 | 0.9878 | | 0.003 | 86.0 | 8256 | 0.0551 | 0.8835 | 0.9681 | 0.9239 | 94 | 0.9096 | 0.9042 | 0.9069 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9261 | 0.9447 | 0.9353 | 0.9876 | | 0.0036 | 87.0 | 8352 | 0.0583 | 0.8824 | 0.9574 | 0.9184 | 94 | 0.8994 | 0.9102 | 0.9048 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9216 | 0.9447 | 0.9330 | 0.9862 | | 0.0029 | 88.0 | 8448 | 0.0542 | 0.8824 | 0.9574 | 0.9184 | 94 | 0.9 | 0.9162 | 0.9080 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9218 | 0.9472 | 0.9343 | 0.9867 | | 0.0025 | 89.0 | 8544 | 0.0624 | 0.8654 | 0.9574 | 0.9091 | 94 | 0.9091 | 0.8982 | 0.9036 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9212 | 0.9397 | 0.9303 | 0.9862 | | 0.0026 | 90.0 | 8640 | 0.0577 | 0.8812 | 0.9468 | 0.9128 | 94 | 0.9212 | 0.9102 | 0.9157 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9305 | 0.9422 | 0.9363 | 0.9873 | | 0.003 | 91.0 | 8736 | 0.0582 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8786 | 0.9102 | 0.8941 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9150 | 0.9472 | 0.9309 | 0.9862 | | 0.0029 | 92.0 | 8832 | 0.0570 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8941 | 0.9102 | 0.9021 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9171 | 0.9447 | 0.9307 | 0.9870 | | 0.0039 | 93.0 | 8928 | 0.0583 | 0.8835 | 0.9681 | 0.9239 | 94 | 0.9036 | 0.8982 | 0.9009 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9284 | 0.9447 | 0.9365 | 0.9867 | | 0.0034 | 94.0 | 9024 | 0.0584 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.8889 | 0.9102 | 0.8994 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9124 | 0.9422 | 0.9271 | 0.9867 | | 0.0024 | 95.0 | 9120 | 0.0588 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.9152 | 0.9042 | 0.9096 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9259 | 0.9422 | 0.9340 | 0.9873 | | 0.0034 | 96.0 | 9216 | 0.0598 | 0.8641 | 0.9468 | 0.9036 | 94 | 0.8824 | 0.8982 | 0.8902 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9098 | 0.9372 | 0.9233 | 0.9856 | | 0.003 | 97.0 | 9312 | 0.0613 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8876 | 0.8982 | 0.8929 | 167 | 0.9708 | 0.9708 | 0.9708 | 137 | 0.9120 | 0.9372 | 0.9244 | 0.9854 | | 0.0027 | 98.0 | 9408 | 0.0602 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8988 | 0.9042 | 0.9015 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9191 | 0.9422 | 0.9305 | 0.9865 | | 0.0023 | 99.0 | 9504 | 0.0590 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8994 | 0.9102 | 0.9048 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9193 | 0.9447 | 0.9318 | 0.9867 | | 0.0024 | 100.0 | 9600 | 0.0592 | 0.8738 | 0.9574 | 0.9137 | 94 | 0.8994 | 0.9102 | 0.9048 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.9193 | 0.9447 | 0.9318 | 0.9867 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
mynameisdidit/UTS_DeepLearning
mynameisdidit
2024-06-04T05:20:11Z
0
0
null
[ "region:us" ]
null
2024-06-04T05:17:29Z
Name : NPM : Dataset : Google Collab Link:
BVRA/convnext_base.in1k_ft_fungitastic_224
BVRA
2024-06-04T05:19:54Z
10
0
DanishFungi
[ "DanishFungi", "pytorch", "image-classification", "ecology", "fungi", "FGVC", "license:cc-by-nc-4.0", "region:us" ]
image-classification
2024-05-26T20:20:34Z
--- tags: - image-classification - ecology - fungi - FGVC library_name: DanishFungi license: cc-by-nc-4.0 --- # Model card for BVRA/convnext_base.in1k_ft_df24_224 ## Model Details - **Model Type:** Danish Fungi Classification - **Model Stats:** - Params (M): 90.5 - Image size: 224 x 224 - **Papers:** - **Original:** ?? - **Train Dataset:** DF24 --> https://sites.google.com/view/danish-fungi-dataset ## Model Usage ### Image Embeddings ```python import timm import torch import torchvision.transforms as T from PIL import Image from urllib.request import urlopen model = timm.create_model("hf-hub:BVRA/convnext_base.in1k_ft_df24_224", pretrained=True) model = model.eval() train_transforms = T.Compose([T.Resize((224, 224)), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img = Image.open(PATH_TO_YOUR_IMAGE) output = model(train_transforms(img).unsqueeze(0)) # output is a (1, num_features) shaped tensor ``` ## Citation ```bibtex @InProceedings{Picek_2022_WACV, author = {Picek, Luk'a {s} and {S}ulc, Milan and Matas, Ji {r}{'\i} and Jeppesen, Thomas S. and Heilmann-Clausen, Jacob and L{e}ss{\o}e, Thomas and Fr{\o}slev, Tobias}, title = {Danish Fungi 2020 - Not Just Another Image Recognition Dataset}, booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)}, month = {January}, year = {2022}, pages = {1525-1535} } ``` ```bibtex @article{picek2022automatic, title={Automatic Fungi Recognition: Deep Learning Meets Mycology}, author={Picek, Luk{'a}{ {s}} and { {S}}ulc, Milan and Matas, Ji{ {r}}{'\i} and Heilmann-Clausen, Jacob and Jeppesen, Thomas S and Lind, Emil}, journal={Sensors}, volume={22}, number={2}, pages={633}, year={2022}, publisher={Multidisciplinary Digital Publishing Institute} } ```
ALI-B/mistral-7b
ALI-B
2024-06-04T05:15:39Z
3
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "unsloth", "trl", "sft", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:58:49Z
--- library_name: transformers tags: - unsloth - trl - sft --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
nttwt1597/test_v1_Biomed
nttwt1597
2024-06-04T05:14:51Z
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-05-30T09:24:19Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
KYUNGHYUN9/itos_v0.025_1.3b-1000step_onlyitos
KYUNGHYUN9
2024-06-04T05:14:44Z
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-06-04T05:14:42Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
kimdeokgi/ko-pt-model-test1
kimdeokgi
2024-06-04T05:14:01Z
2,252
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "kor", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:39:48Z
--- license: apache-2.0 language: - kor --- ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
shinyice/chatvector-llava-v1.5-plus-houou-v3-7b
shinyice
2024-06-04T05:12:10Z
9
0
transformers
[ "transformers", "safetensors", "llava_llama", "text-generation", "vision", "image-captioning", "ja", "arxiv:2310.04799", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:24:06Z
--- license: cc-by-nc-4.0 language: - ja library_name: transformers tags: - vision - image-captioning --- # Chatvector-llava-v1.5-plus-Houou-v3-7b Model Card # Model Details ※好奇心から生まれたモデルです。精度は保証できませんが、v1.6を用いたものよりは良い気がしています。<br> chatvector-llava-v1.5-plus-houou-v3-7bは日本語で画像を説明することが可能なVLMです。<br> [Chat Vector](https://arxiv.org/abs/2310.04799)の手法に影響を受けています。 このモデルはChat Vectorを参考に[llava-v1.5-7b](https://huggingface.co/liuhaotian/llava-v1.5-7b)と[houou-instruction-7b-v3](https://huggingface.co/moneyforward/houou-instruction-7b-v3)、[Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) の重みを以下のように加減算することで作成してみました。<br> ``` houou-instruction-7b-v3 + (llava-v1.5-7b - Llama-2-7b-hf) ``` 次のプログラムは引用させていただいたサイトにあったものをベースにしています。以下文献もぜひご覧ください。 ## Uses ```sh git clone https://github.com/haotian-liu/LLaVA.git cd LLaVA pip install -e . ``` ```python import requests import torch import transformers from PIL import Image from transformers.generation.streamers import TextStreamer from llava.constants import DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX from llava.conversation import conv_templates, SeparatorStyle from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM from llava.mm_utils import tokenizer_image_token, process_images model_path = "shinyice/chatvector-llava-v1.5-plus-houou-v3-7b" device = "cuda" if torch.cuda.is_available() else "cpu" model = LlavaLlamaForCausalLM.from_pretrained( model_path, device_map=device, low_cpu_mem_usage=True, use_safetensors=True, torch_dtype=torch.float16, ).eval() tokenizer = transformers.AutoTokenizer.from_pretrained( model_path, model_max_length=1024, padding_side="right", use_fast=False, ) model.get_model().vision_tower.load_model() model = model.to(device) eos_token_id_list = [ tokenizer.eos_token_id, tokenizer.bos_token_id, ] image_url = "https://huggingface.co/rinna/bilingual-gpt-neox-4b-minigpt4/resolve/main/sample.jpg" image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB') if not isinstance(image, list): image = [image] image_tensor = process_images(image, model.get_model().vision_tower.image_processor, model.config) image_sizes = [img.size for img in image] if isinstance(image_tensor, list): image_tensor = [img.to(model.device, dtype=torch.float16) for img in image_tensor] else: image_tensor = image_tensor.to(device, dtype=torch.float16) image_sizes_tensor = torch.tensor(image_sizes, dtype=torch.int32, device=device) conv_mode = "v1" conv = conv_templates[conv_mode].copy() prompt = "猫の隣には何がありますか?" inp = DEFAULT_IMAGE_TOKEN + '\n' + prompt conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], None) prompt = conv.get_prompt() input_ids = tokenizer_image_token( prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt' ).unsqueeze(0) if device == "cuda": input_ids = input_ids.to(device) temperature = 0.0 top_p = 1.0 max_new_tokens = 256 with torch.inference_mode(): output = model.generate( inputs=input_ids, images=image_tensor, image_sizes=image_sizes_tensor, do_sample=True if temperature > 0 else False, temperature=temperature, top_p=top_p, max_new_tokens=max_new_tokens, use_cache=True, eos_token_id=eos_token_id_list, ) print(tokenizer.decode(output[0])) ``` ## Bibliography - [Chat VectorでLLaVAを日本語対応させる](https://zenn.dev/toshi_456/articles/0166a6eaa81c7b) - [Chat Vectorを使って日本語LLMをチャットモデルに改造する](https://qiita.com/jovyan/items/ee6affa5ee5bdaada6b4)
mradermacher/ArMistral-GEC-GGUF
mradermacher
2024-06-04T05:09:02Z
18
0
transformers
[ "transformers", "gguf", "en", "base_model:gagan3012/ArMistral-GEC", "base_model:quantized:gagan3012/ArMistral-GEC", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-04T04:08:34Z
--- base_model: gagan3012/ArMistral-GEC language: - en library_name: transformers quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/gagan3012/ArMistral-GEC <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q2_K.gguf) | Q2_K | 2.8 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.IQ3_XS.gguf) | IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q3_K_S.gguf) | Q3_K_S | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.IQ3_S.gguf) | IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.IQ3_M.gguf) | IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q3_K_M.gguf) | Q3_K_M | 3.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q3_K_L.gguf) | Q3_K_L | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.IQ4_XS.gguf) | IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q4_K_S.gguf) | Q4_K_S | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q4_K_M.gguf) | Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q5_K_S.gguf) | Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q5_K_M.gguf) | Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q6_K.gguf) | Q6_K | 6.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.Q8_0.gguf) | Q8_0 | 7.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/ArMistral-GEC-GGUF/resolve/main/ArMistral-GEC.f16.gguf) | f16 | 14.6 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
gridoneai/Llama-3-8B-Jungso-Instruct-DoRA-3k
gridoneai
2024-06-04T05:08:31Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:33:41Z
--- license: cc-by-nc-sa-4.0 ---
LucasEllenberger/CSE144-science-final-test
LucasEllenberger
2024-06-04T05:06:42Z
1
0
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers-training", "lora", "base_model:runwayml/stable-diffusion-v1-5", "base_model:adapter:runwayml/stable-diffusion-v1-5", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2024-06-04T04:50:16Z
--- license: creativeml-openrail-m library_name: diffusers tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - diffusers-training - lora base_model: runwayml/stable-diffusion-v1-5 inference: true --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # LoRA text2image fine-tuning - LucasEllenberger/CSE144-science-final-test These are LoRA adaption weights for runwayml/stable-diffusion-v1-5. The weights were fine-tuned on the niabalaji123/googlewebscrapedataset dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training details [TODO: describe the data used to train the model]
Minjae99/test_llama-3-Kor
Minjae99
2024-06-04T05:06:29Z
77
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "en", "ko", "arxiv:1910.09700", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
2024-06-03T23:37:50Z
--- library_name: transformers license: llama3 language: - en - ko --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description This model is an LLaMA3-based language model trained for text classification. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** L4 GPU - **Hours used:** 1 hours - **Cloud Provider:** Google Colab - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed]
martinsinnona/visdecode_vega_3
martinsinnona
2024-06-04T05:06:13Z
49
0
transformers
[ "transformers", "safetensors", "pix2struct", "image-text-to-text", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
image-text-to-text
2024-06-04T04:27:43Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
dmavkgo/vilt_finetuned_200
dmavkgo
2024-06-04T05:02:13Z
63
0
transformers
[ "transformers", "safetensors", "vilt", "visual-question-answering", "generated_from_trainer", "dataset:vqa", "base_model:dandelin/vilt-b32-mlm", "base_model:finetune:dandelin/vilt-b32-mlm", "license:apache-2.0", "endpoints_compatible", "region:us" ]
visual-question-answering
2024-06-04T03:32:11Z
--- license: apache-2.0 base_model: dandelin/vilt-b32-mlm tags: - generated_from_trainer datasets: - vqa model-index: - name: vilt_finetuned_200 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vilt_finetuned_200 This model is a fine-tuned version of [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) on the vqa dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
richardkelly/Qwen-Qwen1.5-1.8B-1717476207
richardkelly
2024-06-04T05:01:47Z
142
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:43:27Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
FuturisticVibes/Meta-Llama-3-70B-Instruct-abliterated-v3.5-6.0bpw-h8-exl2
FuturisticVibes
2024-06-04T04:58:52Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "6-bit", "exl2", "region:us" ]
text-generation
2024-06-04T04:51:48Z
--- library_name: transformers license: llama3 --- I have no idea what I’m doing… if this causes the apocalypse someone please let me know. Meta-Llama-3-70B-Instruct-abliterated-v3.5 6.0bpw h8 EXL2 Includes [measurement.json](https://huggingface.co/FuturisticVibes/Meta-Llama-3-70B-Instruct-abliterated-v3.5-6.0bpw-h8-exl2/tree/measurement) file for further quantization Up next is a new, old, long dead, but never forgotten friend… Assuming I can put enough money into RunPod to rent an H100 for a bit… Original Model: https://huggingface.co/failspy/Meta-Llama-3-70B-Instruct-abliterated-v3.5 # Original Model Card # Llama-3-70B-Instruct-abliterated-v3.5 Model Card [My original Jupyter "cookbook" to replicate the methodology can be found here](https://huggingface.co/failspy/llama-3-70B-Instruct-abliterated/blob/main/ortho_cookbook.ipynb) [My personal library o' code used](https://github.com/FailSpy/abliterator) (WIP, looking to improve and generalize) This is [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) with orthogonalized bfloat16 safetensor weights, generated with a refined methodology based on that which was described in the preview paper/blog post: '[Refusal in LLMs is mediated by a single direction](https://www.alignmentforum.org/posts/jGuXSZgv6qfdhMCuJ/refusal-in-llms-is-mediated-by-a-single-direction)' which I encourage you to read to understand more. ## V3.5? Second try. I felt that the V3 methodology of 70B wasn't well applied, and u/Nexesenex on reddit kinda confirmed my suspicions. So go blame them. :P This one has only a single layer modified(!) and that seems to have completely eliminated moralizing disclaimers. I hope you'll find this model better than 70B-V3! As well, this also fixes the tokenizer. ## Hang on, "abliteration"? Orthogonalization? Ablation? What is this? TL;DR: This model has had certain weights manipulated to "inhibit" the model's ability to express refusal. It is not in anyway _guaranteed_ that it won't refuse you, understand your request, it may still lecture you about ethics/safety, etc. It is tuned in all other respects the same as the original 70B instruct model was, just with the strongest refusal directions orthogonalized out. **TL;TL;DR;DR: It's uncensored in the purest form I can manage -- no new or changed behaviour in any other respect from the original model.** As far as "abliteration": it's just a fun play-on-words using the original "ablation" term used in the original paper to refer to removing features, which I made up particularly to differentiate the model from "uncensored" fine-tunes. Ablate + obliterated = Abliterated Anyways, orthogonalization/ablation are both aspects to refer to the same thing here, the technique in which the refusal feature was "ablated" from the model was via orthogonalization. ## A little more on the methodology, and why this is interesting To me, ablation (or applying the methodology for the inverse, "augmentation") seems to be good for inducing/removing very specific features that you'd have to spend way too many tokens on encouraging or discouraging in your system prompt. Instead, you just apply your system prompt in the ablation script against a blank system prompt on the same dataset and orthogonalize for the desired behaviour in the final model weights. > Why this over fine-tuning? Ablation is much more surgical in nature whilst also being effectively executed with a _lot_ less data than fine-tuning, which I think is its main advantage. As well, and its most valuable aspect is it keeps as much of the original model's knowledge and training intact, whilst removing its tendency to behave in one very specific undesireable manner. (In this case, refusing user requests.) Fine tuning is still exceptionally useful and the go-to for broad behaviour changes; however, you may be able to get close to your desired behaviour with very few samples using the ablation/augmentation techniques. It may also be a useful step to add to your model refinement: orthogonalize -> fine-tune or vice-versa. I haven't really gotten around to exploring this model stacked with fine-tuning, I encourage others to give it a shot if they've got the capacity. > Okay, fine, but why V3? There's no V2 70B? Well, I released a V2 a while back for 8B under Cognitive Computations. It ended up being not worth it to try V2 with 70B, I wanted to refine the model before wasting compute cycles on what might not even be a better model. I am however quite pleased about this latest methodology, it seems to have induced fewer hallucinations. So to show that it's a new fancy methodology from even that of the 8B V2, I decided to do a Microsoft and double up on my version jump because it's *such* an advancement (or so the excuse went, when in actuality it was because too many legacy but actively used Microsoft libraries checked for 'Windows 9' in the OS name to detect Windows 95/98 as one.) ## Quirkiness awareness notice This model may come with interesting quirks, with the methodology being so new. I encourage you to play with the model, and post any quirks you notice in the community tab, as that'll help us further understand what this orthogonalization has in the way of side effects. If you manage to develop further improvements, please share! This is really the most basic way to use ablation, but there are other possibilities that I believe are as-yet unexplored. Additionally, feel free to reach out in any way about this. I'm on the Cognitive Computations Discord, I'm watching the Community tab, reach out! I'd love to see this methodology used in other ways, and so would gladly support whoever whenever I can.
bella05/pogny-1-64-test
bella05
2024-06-04T04:57:24Z
111
0
transformers
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:klue/roberta-large", "base_model:finetune:klue/roberta-large", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-06-04T04:30:40Z
--- base_model: klue/roberta-large tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: pogny-1-64-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/bella05/huggingface/runs/dhs60jrt) # pogny-1-64-test This model is a fine-tuned version of [klue/roberta-large](https://huggingface.co/klue/roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7022 - Accuracy: 0.4376 - F1: 0.2665 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.01 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 1.9657 | 1.0 | 1205 | 1.7022 | 0.4376 | 0.2665 | ### Framework versions - Transformers 4.41.0 - Pytorch 2.2.2 - Datasets 2.19.1 - Tokenizers 0.19.1
nawadkar/human_ai_v2
nawadkar
2024-06-04T04:53:24Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-05-30T23:19:38Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-bnb-4bit --- # Uploaded model - **Developed by:** nawadkar - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Jimheaver/T5-text_code_Lora
Jimheaver
2024-06-04T04:52:02Z
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-06-03T12:46:56Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
jianzongwu/lgvi
jianzongwu
2024-06-04T04:50:56Z
71
0
diffusers
[ "diffusers", "safetensors", "arxiv:2401.10226", "license:mit", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2024-03-04T07:04:25Z
--- license: mit --- # Model Card for LGVI ## Dataset Description - **Paper:** https://arxiv.org/abs/2401.10226 - **Project Page:** https://jianzongwu.github.io/projects/rovi - **Github Repository:** https://github.com/jianzongwu/Language-Driven-Video-Inpainting ### Model Summary The LGVI model is trained on [ROVI](https://huggingface.co/datasets/jianzongwu/rovi) and [Inst-Inpaint](https://github.com/abyildirim/inst-inpaint) for the referring inpainting task. Please check our [project page](https://jianzongwu.github.io/projects/rovi) for more details. ``` @article{wu2024lgvi, title={Towards language-driven video inpainting via multimodal large language models}, author={Wu, Jianzong and Li, Xiangtai and Si, Chenyang and Zhou, Shangchen and Yang, Jingkang and Zhang, Jiangning and Li, Yining and Chen, Kai and Tong, Yunhai and Liu, Ziwei and others}, journal={arXiv preprint arXiv:2401.10226}, year={2024} }
spsither/mms_300_v3.1020
spsither
2024-06-04T04:48:30Z
97
0
transformers
[ "transformers", "safetensors", "wav2vec2", "automatic-speech-recognition", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2024-06-04T04:46:16Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
vaibhavchavan/flan-t5-small-finetuned-xsum
vaibhavchavan
2024-06-04T04:45:04Z
110
0
transformers
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:google/flan-t5-small", "base_model:finetune:google/flan-t5-small", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2024-05-30T03:20:29Z
--- license: apache-2.0 base_model: google/flan-t5-small tags: - generated_from_trainer metrics: - rouge model-index: - name: flan-t5-small-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-small-finetuned-xsum This model is a fine-tuned version of [google/flan-t5-small](https://huggingface.co/google/flan-t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 3.5714 - Rouge2: 1.2195 - Rougel: 3.5714 - Rougelsum: 3.5714 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:------:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | No log | 1.0 | 1 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 2.0 | 2 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 3.0 | 3 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 4.0 | 4 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 5.0 | 5 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 6.0 | 6 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 7.0 | 7 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 8.0 | 8 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 9.0 | 9 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 10.0 | 10 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 11.0 | 11 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 12.0 | 12 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 13.0 | 13 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 14.0 | 14 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 15.0 | 15 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 16.0 | 16 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 17.0 | 17 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 18.0 | 18 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 19.0 | 19 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 20.0 | 20 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 21.0 | 21 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 22.0 | 22 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 23.0 | 23 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 24.0 | 24 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 25.0 | 25 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 26.0 | 26 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 27.0 | 27 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 28.0 | 28 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 29.0 | 29 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 30.0 | 30 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 31.0 | 31 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 32.0 | 32 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 33.0 | 33 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 34.0 | 34 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 35.0 | 35 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 36.0 | 36 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 37.0 | 37 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 38.0 | 38 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 39.0 | 39 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 40.0 | 40 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 41.0 | 41 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 42.0 | 42 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 43.0 | 43 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 44.0 | 44 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 45.0 | 45 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 46.0 | 46 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 47.0 | 47 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 48.0 | 48 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 49.0 | 49 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 50.0 | 50 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 51.0 | 51 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 52.0 | 52 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 53.0 | 53 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 54.0 | 54 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 55.0 | 55 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 56.0 | 56 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 57.0 | 57 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 58.0 | 58 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 59.0 | 59 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 60.0 | 60 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 61.0 | 61 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 62.0 | 62 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 63.0 | 63 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 64.0 | 64 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 65.0 | 65 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 66.0 | 66 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 67.0 | 67 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 68.0 | 68 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 69.0 | 69 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 70.0 | 70 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 71.0 | 71 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 72.0 | 72 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 73.0 | 73 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 74.0 | 74 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 75.0 | 75 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 76.0 | 76 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 77.0 | 77 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 78.0 | 78 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 79.0 | 79 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 80.0 | 80 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 81.0 | 81 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 82.0 | 82 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 83.0 | 83 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 84.0 | 84 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 85.0 | 85 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 86.0 | 86 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 87.0 | 87 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 88.0 | 88 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 89.0 | 89 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 90.0 | 90 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 91.0 | 91 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 92.0 | 92 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 93.0 | 93 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 94.0 | 94 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 95.0 | 95 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 96.0 | 96 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 97.0 | 97 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 98.0 | 98 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 99.0 | 99 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 100.0 | 100 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 101.0 | 101 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 102.0 | 102 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 103.0 | 103 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 104.0 | 104 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 105.0 | 105 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 106.0 | 106 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 107.0 | 107 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 108.0 | 108 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 109.0 | 109 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 110.0 | 110 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 111.0 | 111 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 112.0 | 112 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 113.0 | 113 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 114.0 | 114 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 115.0 | 115 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 116.0 | 116 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 117.0 | 117 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 118.0 | 118 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 119.0 | 119 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 120.0 | 120 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 121.0 | 121 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 122.0 | 122 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 123.0 | 123 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 124.0 | 124 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 125.0 | 125 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 126.0 | 126 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 127.0 | 127 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 128.0 | 128 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 129.0 | 129 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 130.0 | 130 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 131.0 | 131 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 132.0 | 132 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 133.0 | 133 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 134.0 | 134 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 135.0 | 135 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 136.0 | 136 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 137.0 | 137 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 138.0 | 138 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 139.0 | 139 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 140.0 | 140 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 141.0 | 141 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 142.0 | 142 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 143.0 | 143 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 144.0 | 144 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 145.0 | 145 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 146.0 | 146 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 147.0 | 147 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 148.0 | 148 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 149.0 | 149 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 150.0 | 150 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 151.0 | 151 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 152.0 | 152 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 153.0 | 153 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 154.0 | 154 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 155.0 | 155 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 156.0 | 156 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 157.0 | 157 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 158.0 | 158 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 159.0 | 159 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 160.0 | 160 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 161.0 | 161 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 162.0 | 162 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 163.0 | 163 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 164.0 | 164 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 165.0 | 165 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 166.0 | 166 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 167.0 | 167 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 168.0 | 168 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 169.0 | 169 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 170.0 | 170 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 171.0 | 171 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 172.0 | 172 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 173.0 | 173 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 174.0 | 174 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 175.0 | 175 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 176.0 | 176 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 177.0 | 177 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 178.0 | 178 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 179.0 | 179 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 180.0 | 180 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 181.0 | 181 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 182.0 | 182 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 183.0 | 183 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 184.0 | 184 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 185.0 | 185 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 186.0 | 186 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 187.0 | 187 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 188.0 | 188 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 189.0 | 189 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 190.0 | 190 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 191.0 | 191 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 192.0 | 192 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 193.0 | 193 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 194.0 | 194 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 195.0 | 195 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 196.0 | 196 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 197.0 | 197 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 198.0 | 198 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 199.0 | 199 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 200.0 | 200 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 201.0 | 201 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 202.0 | 202 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 203.0 | 203 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 204.0 | 204 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 205.0 | 205 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 206.0 | 206 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 207.0 | 207 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 208.0 | 208 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 209.0 | 209 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 210.0 | 210 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 211.0 | 211 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 212.0 | 212 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 213.0 | 213 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 214.0 | 214 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 215.0 | 215 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 216.0 | 216 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 217.0 | 217 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 218.0 | 218 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 219.0 | 219 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 220.0 | 220 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 221.0 | 221 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 222.0 | 222 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 223.0 | 223 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 224.0 | 224 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 225.0 | 225 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 226.0 | 226 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 227.0 | 227 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 228.0 | 228 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 229.0 | 229 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 230.0 | 230 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 231.0 | 231 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 232.0 | 232 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 233.0 | 233 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 234.0 | 234 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 235.0 | 235 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 236.0 | 236 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 237.0 | 237 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 238.0 | 238 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 239.0 | 239 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 240.0 | 240 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 241.0 | 241 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 242.0 | 242 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 243.0 | 243 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 244.0 | 244 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 245.0 | 245 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 246.0 | 246 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 247.0 | 247 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 248.0 | 248 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 249.0 | 249 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 250.0 | 250 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 251.0 | 251 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 252.0 | 252 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 253.0 | 253 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 254.0 | 254 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 255.0 | 255 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 256.0 | 256 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 257.0 | 257 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 258.0 | 258 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 259.0 | 259 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 260.0 | 260 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 261.0 | 261 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 262.0 | 262 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 263.0 | 263 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 264.0 | 264 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 265.0 | 265 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 266.0 | 266 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 267.0 | 267 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 268.0 | 268 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 269.0 | 269 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 270.0 | 270 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 271.0 | 271 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 272.0 | 272 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 273.0 | 273 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 274.0 | 274 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 275.0 | 275 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 276.0 | 276 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 277.0 | 277 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 278.0 | 278 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 279.0 | 279 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 280.0 | 280 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 281.0 | 281 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 282.0 | 282 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 283.0 | 283 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 284.0 | 284 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 285.0 | 285 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 286.0 | 286 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 287.0 | 287 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 288.0 | 288 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 289.0 | 289 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 290.0 | 290 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 291.0 | 291 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 292.0 | 292 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 293.0 | 293 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 294.0 | 294 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 295.0 | 295 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 296.0 | 296 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 297.0 | 297 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 298.0 | 298 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 299.0 | 299 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 300.0 | 300 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 301.0 | 301 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 302.0 | 302 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 303.0 | 303 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 304.0 | 304 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 305.0 | 305 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 306.0 | 306 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 307.0 | 307 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 308.0 | 308 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 309.0 | 309 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 310.0 | 310 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 311.0 | 311 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 312.0 | 312 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 313.0 | 313 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 314.0 | 314 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 315.0 | 315 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 316.0 | 316 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 317.0 | 317 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 318.0 | 318 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 319.0 | 319 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 320.0 | 320 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 321.0 | 321 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 322.0 | 322 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 323.0 | 323 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 324.0 | 324 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 325.0 | 325 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 326.0 | 326 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 327.0 | 327 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 328.0 | 328 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 329.0 | 329 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 330.0 | 330 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 331.0 | 331 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 332.0 | 332 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 333.0 | 333 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 334.0 | 334 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 335.0 | 335 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 336.0 | 336 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 337.0 | 337 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 338.0 | 338 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 339.0 | 339 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 340.0 | 340 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 341.0 | 341 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 342.0 | 342 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 343.0 | 343 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 344.0 | 344 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 345.0 | 345 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 346.0 | 346 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 347.0 | 347 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 348.0 | 348 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 349.0 | 349 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 350.0 | 350 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 351.0 | 351 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 352.0 | 352 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 353.0 | 353 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 354.0 | 354 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 355.0 | 355 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 356.0 | 356 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 357.0 | 357 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 358.0 | 358 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 359.0 | 359 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 360.0 | 360 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 361.0 | 361 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 362.0 | 362 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 363.0 | 363 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 364.0 | 364 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 365.0 | 365 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 366.0 | 366 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 367.0 | 367 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 368.0 | 368 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 369.0 | 369 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 370.0 | 370 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 371.0 | 371 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 372.0 | 372 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 373.0 | 373 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 374.0 | 374 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 375.0 | 375 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 376.0 | 376 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 377.0 | 377 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 378.0 | 378 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 379.0 | 379 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 380.0 | 380 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 381.0 | 381 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 382.0 | 382 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 383.0 | 383 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 384.0 | 384 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 385.0 | 385 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 386.0 | 386 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 387.0 | 387 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 388.0 | 388 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 389.0 | 389 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 390.0 | 390 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 391.0 | 391 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 392.0 | 392 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 393.0 | 393 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 394.0 | 394 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 395.0 | 395 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 396.0 | 396 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 397.0 | 397 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 398.0 | 398 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 399.0 | 399 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 400.0 | 400 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 401.0 | 401 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 402.0 | 402 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 403.0 | 403 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 404.0 | 404 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 405.0 | 405 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 406.0 | 406 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 407.0 | 407 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 408.0 | 408 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 409.0 | 409 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 410.0 | 410 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 411.0 | 411 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 412.0 | 412 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 413.0 | 413 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 414.0 | 414 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 415.0 | 415 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 416.0 | 416 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 417.0 | 417 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 418.0 | 418 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 419.0 | 419 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 420.0 | 420 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 421.0 | 421 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 422.0 | 422 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 423.0 | 423 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 424.0 | 424 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 425.0 | 425 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 426.0 | 426 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 427.0 | 427 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 428.0 | 428 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 429.0 | 429 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 430.0 | 430 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 431.0 | 431 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 432.0 | 432 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 433.0 | 433 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 434.0 | 434 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 435.0 | 435 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 436.0 | 436 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 437.0 | 437 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 438.0 | 438 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 439.0 | 439 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 440.0 | 440 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 441.0 | 441 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 442.0 | 442 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 443.0 | 443 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 444.0 | 444 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 445.0 | 445 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 446.0 | 446 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 447.0 | 447 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 448.0 | 448 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 449.0 | 449 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 450.0 | 450 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 451.0 | 451 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 452.0 | 452 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 453.0 | 453 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 454.0 | 454 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 455.0 | 455 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 456.0 | 456 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 457.0 | 457 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 458.0 | 458 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 459.0 | 459 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 460.0 | 460 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 461.0 | 461 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 462.0 | 462 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 463.0 | 463 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 464.0 | 464 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 465.0 | 465 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 466.0 | 466 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 467.0 | 467 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 468.0 | 468 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 469.0 | 469 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 470.0 | 470 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 471.0 | 471 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 472.0 | 472 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 473.0 | 473 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 474.0 | 474 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 475.0 | 475 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 476.0 | 476 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 477.0 | 477 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 478.0 | 478 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 479.0 | 479 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 480.0 | 480 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 481.0 | 481 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 482.0 | 482 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 483.0 | 483 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 484.0 | 484 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 485.0 | 485 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 486.0 | 486 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 487.0 | 487 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 488.0 | 488 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 489.0 | 489 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 490.0 | 490 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 491.0 | 491 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 492.0 | 492 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 493.0 | 493 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 494.0 | 494 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 495.0 | 495 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 496.0 | 496 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 497.0 | 497 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 498.0 | 498 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | No log | 499.0 | 499 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 500.0 | 500 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 501.0 | 501 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 502.0 | 502 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 503.0 | 503 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 504.0 | 504 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 505.0 | 505 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 506.0 | 506 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 507.0 | 507 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 508.0 | 508 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 509.0 | 509 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 510.0 | 510 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 511.0 | 511 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 512.0 | 512 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 513.0 | 513 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 514.0 | 514 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 515.0 | 515 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 516.0 | 516 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 517.0 | 517 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 518.0 | 518 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 519.0 | 519 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 520.0 | 520 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 521.0 | 521 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 522.0 | 522 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 523.0 | 523 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 524.0 | 524 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 525.0 | 525 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 526.0 | 526 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 527.0 | 527 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 528.0 | 528 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 529.0 | 529 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 530.0 | 530 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 531.0 | 531 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 532.0 | 532 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 533.0 | 533 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 534.0 | 534 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 535.0 | 535 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 536.0 | 536 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 537.0 | 537 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 538.0 | 538 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 539.0 | 539 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 540.0 | 540 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 541.0 | 541 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 542.0 | 542 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 543.0 | 543 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 544.0 | 544 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 545.0 | 545 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 546.0 | 546 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 547.0 | 547 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 548.0 | 548 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 549.0 | 549 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 550.0 | 550 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 551.0 | 551 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 552.0 | 552 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 553.0 | 553 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 554.0 | 554 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 555.0 | 555 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 556.0 | 556 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 557.0 | 557 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 558.0 | 558 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 559.0 | 559 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 560.0 | 560 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 561.0 | 561 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 562.0 | 562 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 563.0 | 563 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 564.0 | 564 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 565.0 | 565 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 566.0 | 566 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 567.0 | 567 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 568.0 | 568 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 569.0 | 569 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 570.0 | 570 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 571.0 | 571 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 572.0 | 572 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 573.0 | 573 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 574.0 | 574 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 575.0 | 575 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 576.0 | 576 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 577.0 | 577 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 578.0 | 578 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 579.0 | 579 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 580.0 | 580 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 581.0 | 581 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 582.0 | 582 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 583.0 | 583 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 584.0 | 584 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 585.0 | 585 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 586.0 | 586 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 587.0 | 587 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 588.0 | 588 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 589.0 | 589 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 590.0 | 590 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 591.0 | 591 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 592.0 | 592 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 593.0 | 593 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 594.0 | 594 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 595.0 | 595 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 596.0 | 596 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 597.0 | 597 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 598.0 | 598 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 599.0 | 599 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 600.0 | 600 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 601.0 | 601 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 602.0 | 602 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 603.0 | 603 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 604.0 | 604 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 605.0 | 605 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 606.0 | 606 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 607.0 | 607 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 608.0 | 608 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 609.0 | 609 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 610.0 | 610 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 611.0 | 611 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 612.0 | 612 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 613.0 | 613 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 614.0 | 614 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 615.0 | 615 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 616.0 | 616 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 617.0 | 617 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 618.0 | 618 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 619.0 | 619 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 620.0 | 620 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 621.0 | 621 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 622.0 | 622 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 623.0 | 623 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 624.0 | 624 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 625.0 | 625 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 626.0 | 626 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 627.0 | 627 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 628.0 | 628 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 629.0 | 629 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 630.0 | 630 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 631.0 | 631 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 632.0 | 632 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 633.0 | 633 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 634.0 | 634 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 635.0 | 635 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 636.0 | 636 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 637.0 | 637 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 638.0 | 638 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 639.0 | 639 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 640.0 | 640 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 641.0 | 641 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 642.0 | 642 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 643.0 | 643 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 644.0 | 644 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 645.0 | 645 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 646.0 | 646 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 647.0 | 647 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 648.0 | 648 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 649.0 | 649 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 650.0 | 650 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 651.0 | 651 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 652.0 | 652 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 653.0 | 653 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 654.0 | 654 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 655.0 | 655 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 656.0 | 656 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 657.0 | 657 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 658.0 | 658 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 659.0 | 659 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 660.0 | 660 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 661.0 | 661 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 662.0 | 662 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 663.0 | 663 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 664.0 | 664 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 665.0 | 665 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 666.0 | 666 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 667.0 | 667 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 668.0 | 668 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 669.0 | 669 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 670.0 | 670 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 671.0 | 671 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 672.0 | 672 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 673.0 | 673 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 674.0 | 674 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 675.0 | 675 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 676.0 | 676 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 677.0 | 677 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 678.0 | 678 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 679.0 | 679 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 680.0 | 680 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 681.0 | 681 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 682.0 | 682 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 683.0 | 683 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 684.0 | 684 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 685.0 | 685 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 686.0 | 686 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 687.0 | 687 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 688.0 | 688 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 689.0 | 689 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 690.0 | 690 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 691.0 | 691 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 692.0 | 692 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 693.0 | 693 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 694.0 | 694 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 695.0 | 695 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 696.0 | 696 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 697.0 | 697 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 698.0 | 698 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 699.0 | 699 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 700.0 | 700 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 701.0 | 701 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 702.0 | 702 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 703.0 | 703 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 704.0 | 704 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 705.0 | 705 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 706.0 | 706 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 707.0 | 707 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 708.0 | 708 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 709.0 | 709 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 710.0 | 710 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 711.0 | 711 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 712.0 | 712 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 713.0 | 713 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 714.0 | 714 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 715.0 | 715 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 716.0 | 716 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 717.0 | 717 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 718.0 | 718 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 719.0 | 719 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 720.0 | 720 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 721.0 | 721 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 722.0 | 722 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 723.0 | 723 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 724.0 | 724 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 725.0 | 725 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 726.0 | 726 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 727.0 | 727 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 728.0 | 728 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 729.0 | 729 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 730.0 | 730 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 731.0 | 731 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 732.0 | 732 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 733.0 | 733 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 734.0 | 734 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 735.0 | 735 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 736.0 | 736 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 737.0 | 737 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 738.0 | 738 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 739.0 | 739 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 740.0 | 740 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 741.0 | 741 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 742.0 | 742 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 743.0 | 743 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 744.0 | 744 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 745.0 | 745 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 746.0 | 746 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 747.0 | 747 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 748.0 | 748 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 749.0 | 749 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 750.0 | 750 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 751.0 | 751 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 752.0 | 752 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 753.0 | 753 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 754.0 | 754 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 755.0 | 755 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 756.0 | 756 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 757.0 | 757 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 758.0 | 758 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 759.0 | 759 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 760.0 | 760 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 761.0 | 761 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 762.0 | 762 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 763.0 | 763 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 764.0 | 764 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 765.0 | 765 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 766.0 | 766 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 767.0 | 767 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 768.0 | 768 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 769.0 | 769 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 770.0 | 770 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 771.0 | 771 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 772.0 | 772 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 773.0 | 773 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 774.0 | 774 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 775.0 | 775 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 776.0 | 776 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 777.0 | 777 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 778.0 | 778 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 779.0 | 779 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 780.0 | 780 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 781.0 | 781 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 782.0 | 782 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 783.0 | 783 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 784.0 | 784 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 785.0 | 785 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 786.0 | 786 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 787.0 | 787 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 788.0 | 788 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 789.0 | 789 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 790.0 | 790 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 791.0 | 791 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 792.0 | 792 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 793.0 | 793 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 794.0 | 794 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 795.0 | 795 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 796.0 | 796 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 797.0 | 797 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 798.0 | 798 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 799.0 | 799 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 800.0 | 800 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 801.0 | 801 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 802.0 | 802 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 803.0 | 803 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 804.0 | 804 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 805.0 | 805 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 806.0 | 806 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 807.0 | 807 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 808.0 | 808 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 809.0 | 809 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 810.0 | 810 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 811.0 | 811 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 812.0 | 812 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 813.0 | 813 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 814.0 | 814 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 815.0 | 815 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 816.0 | 816 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 817.0 | 817 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 818.0 | 818 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 819.0 | 819 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 820.0 | 820 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 821.0 | 821 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 822.0 | 822 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 823.0 | 823 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 824.0 | 824 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 825.0 | 825 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 826.0 | 826 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 827.0 | 827 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 828.0 | 828 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 829.0 | 829 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 830.0 | 830 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 831.0 | 831 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 832.0 | 832 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 833.0 | 833 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 834.0 | 834 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 835.0 | 835 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 836.0 | 836 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 837.0 | 837 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 838.0 | 838 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 839.0 | 839 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 840.0 | 840 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 841.0 | 841 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 842.0 | 842 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 843.0 | 843 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 844.0 | 844 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 845.0 | 845 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 846.0 | 846 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 847.0 | 847 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 848.0 | 848 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 849.0 | 849 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 850.0 | 850 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 851.0 | 851 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 852.0 | 852 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 853.0 | 853 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 854.0 | 854 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 855.0 | 855 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 856.0 | 856 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 857.0 | 857 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 858.0 | 858 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 859.0 | 859 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 860.0 | 860 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 861.0 | 861 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 862.0 | 862 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 863.0 | 863 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 864.0 | 864 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 865.0 | 865 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 866.0 | 866 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 867.0 | 867 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 868.0 | 868 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 869.0 | 869 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 870.0 | 870 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 871.0 | 871 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 872.0 | 872 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 873.0 | 873 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 874.0 | 874 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 875.0 | 875 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 876.0 | 876 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 877.0 | 877 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 878.0 | 878 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 879.0 | 879 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 880.0 | 880 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 881.0 | 881 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 882.0 | 882 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 883.0 | 883 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 884.0 | 884 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 885.0 | 885 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 886.0 | 886 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 887.0 | 887 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 888.0 | 888 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 889.0 | 889 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 890.0 | 890 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 891.0 | 891 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 892.0 | 892 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 893.0 | 893 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 894.0 | 894 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 895.0 | 895 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 896.0 | 896 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 897.0 | 897 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 898.0 | 898 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 899.0 | 899 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 900.0 | 900 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 901.0 | 901 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 902.0 | 902 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 903.0 | 903 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 904.0 | 904 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 905.0 | 905 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 906.0 | 906 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 907.0 | 907 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 908.0 | 908 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 909.0 | 909 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 910.0 | 910 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 911.0 | 911 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 912.0 | 912 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 913.0 | 913 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 914.0 | 914 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 915.0 | 915 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 916.0 | 916 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 917.0 | 917 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 918.0 | 918 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 919.0 | 919 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 920.0 | 920 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 921.0 | 921 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 922.0 | 922 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 923.0 | 923 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 924.0 | 924 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 925.0 | 925 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 926.0 | 926 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 927.0 | 927 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 928.0 | 928 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 929.0 | 929 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 930.0 | 930 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 931.0 | 931 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 932.0 | 932 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 933.0 | 933 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 934.0 | 934 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 935.0 | 935 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 936.0 | 936 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 937.0 | 937 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 938.0 | 938 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 939.0 | 939 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 940.0 | 940 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 941.0 | 941 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 942.0 | 942 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 943.0 | 943 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 944.0 | 944 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 945.0 | 945 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 946.0 | 946 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 947.0 | 947 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 948.0 | 948 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 949.0 | 949 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 950.0 | 950 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 951.0 | 951 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 952.0 | 952 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 953.0 | 953 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 954.0 | 954 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 955.0 | 955 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 956.0 | 956 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 957.0 | 957 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 958.0 | 958 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 959.0 | 959 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 960.0 | 960 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 961.0 | 961 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 962.0 | 962 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 963.0 | 963 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 964.0 | 964 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 965.0 | 965 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 966.0 | 966 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 967.0 | 967 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 968.0 | 968 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 969.0 | 969 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 970.0 | 970 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 971.0 | 971 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 972.0 | 972 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 973.0 | 973 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 974.0 | 974 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 975.0 | 975 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 976.0 | 976 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 977.0 | 977 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 978.0 | 978 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 979.0 | 979 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 980.0 | 980 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 981.0 | 981 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 982.0 | 982 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 983.0 | 983 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 984.0 | 984 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 985.0 | 985 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 986.0 | 986 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 987.0 | 987 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 988.0 | 988 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 989.0 | 989 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 990.0 | 990 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 991.0 | 991 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 992.0 | 992 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 993.0 | 993 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 994.0 | 994 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 995.0 | 995 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 996.0 | 996 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 997.0 | 997 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 998.0 | 998 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 999.0 | 999 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1000.0 | 1000 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1001.0 | 1001 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1002.0 | 1002 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1003.0 | 1003 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1004.0 | 1004 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1005.0 | 1005 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1006.0 | 1006 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1007.0 | 1007 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1008.0 | 1008 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1009.0 | 1009 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1010.0 | 1010 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1011.0 | 1011 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1012.0 | 1012 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1013.0 | 1013 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1014.0 | 1014 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1015.0 | 1015 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1016.0 | 1016 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1017.0 | 1017 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1018.0 | 1018 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1019.0 | 1019 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1020.0 | 1020 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1021.0 | 1021 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1022.0 | 1022 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1023.0 | 1023 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1024.0 | 1024 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1025.0 | 1025 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1026.0 | 1026 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1027.0 | 1027 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1028.0 | 1028 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1029.0 | 1029 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1030.0 | 1030 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1031.0 | 1031 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1032.0 | 1032 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1033.0 | 1033 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1034.0 | 1034 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1035.0 | 1035 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1036.0 | 1036 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1037.0 | 1037 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1038.0 | 1038 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1039.0 | 1039 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1040.0 | 1040 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1041.0 | 1041 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1042.0 | 1042 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1043.0 | 1043 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1044.0 | 1044 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1045.0 | 1045 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1046.0 | 1046 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1047.0 | 1047 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1048.0 | 1048 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1049.0 | 1049 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1050.0 | 1050 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1051.0 | 1051 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1052.0 | 1052 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1053.0 | 1053 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1054.0 | 1054 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1055.0 | 1055 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1056.0 | 1056 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1057.0 | 1057 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1058.0 | 1058 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1059.0 | 1059 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1060.0 | 1060 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1061.0 | 1061 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1062.0 | 1062 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1063.0 | 1063 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1064.0 | 1064 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1065.0 | 1065 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1066.0 | 1066 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1067.0 | 1067 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1068.0 | 1068 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1069.0 | 1069 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1070.0 | 1070 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1071.0 | 1071 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1072.0 | 1072 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1073.0 | 1073 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1074.0 | 1074 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1075.0 | 1075 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1076.0 | 1076 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1077.0 | 1077 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1078.0 | 1078 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1079.0 | 1079 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1080.0 | 1080 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1081.0 | 1081 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1082.0 | 1082 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1083.0 | 1083 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1084.0 | 1084 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1085.0 | 1085 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1086.0 | 1086 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1087.0 | 1087 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1088.0 | 1088 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1089.0 | 1089 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1090.0 | 1090 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1091.0 | 1091 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1092.0 | 1092 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1093.0 | 1093 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1094.0 | 1094 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1095.0 | 1095 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1096.0 | 1096 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1097.0 | 1097 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1098.0 | 1098 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1099.0 | 1099 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1100.0 | 1100 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1101.0 | 1101 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1102.0 | 1102 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1103.0 | 1103 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1104.0 | 1104 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1105.0 | 1105 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1106.0 | 1106 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1107.0 | 1107 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1108.0 | 1108 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1109.0 | 1109 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1110.0 | 1110 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1111.0 | 1111 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1112.0 | 1112 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1113.0 | 1113 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1114.0 | 1114 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1115.0 | 1115 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1116.0 | 1116 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1117.0 | 1117 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1118.0 | 1118 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1119.0 | 1119 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1120.0 | 1120 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1121.0 | 1121 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1122.0 | 1122 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1123.0 | 1123 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1124.0 | 1124 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1125.0 | 1125 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1126.0 | 1126 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1127.0 | 1127 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1128.0 | 1128 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1129.0 | 1129 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1130.0 | 1130 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1131.0 | 1131 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1132.0 | 1132 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1133.0 | 1133 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1134.0 | 1134 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1135.0 | 1135 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1136.0 | 1136 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1137.0 | 1137 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1138.0 | 1138 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1139.0 | 1139 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1140.0 | 1140 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1141.0 | 1141 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1142.0 | 1142 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1143.0 | 1143 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1144.0 | 1144 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1145.0 | 1145 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1146.0 | 1146 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1147.0 | 1147 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1148.0 | 1148 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1149.0 | 1149 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1150.0 | 1150 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1151.0 | 1151 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1152.0 | 1152 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1153.0 | 1153 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1154.0 | 1154 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1155.0 | 1155 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1156.0 | 1156 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1157.0 | 1157 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1158.0 | 1158 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1159.0 | 1159 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1160.0 | 1160 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1161.0 | 1161 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1162.0 | 1162 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1163.0 | 1163 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1164.0 | 1164 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1165.0 | 1165 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1166.0 | 1166 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1167.0 | 1167 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1168.0 | 1168 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1169.0 | 1169 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1170.0 | 1170 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1171.0 | 1171 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1172.0 | 1172 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1173.0 | 1173 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1174.0 | 1174 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1175.0 | 1175 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1176.0 | 1176 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1177.0 | 1177 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1178.0 | 1178 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1179.0 | 1179 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1180.0 | 1180 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1181.0 | 1181 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1182.0 | 1182 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1183.0 | 1183 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1184.0 | 1184 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1185.0 | 1185 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1186.0 | 1186 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1187.0 | 1187 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1188.0 | 1188 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1189.0 | 1189 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1190.0 | 1190 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1191.0 | 1191 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1192.0 | 1192 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1193.0 | 1193 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1194.0 | 1194 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1195.0 | 1195 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1196.0 | 1196 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1197.0 | 1197 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1198.0 | 1198 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1199.0 | 1199 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1200.0 | 1200 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1201.0 | 1201 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1202.0 | 1202 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1203.0 | 1203 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1204.0 | 1204 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1205.0 | 1205 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1206.0 | 1206 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1207.0 | 1207 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1208.0 | 1208 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1209.0 | 1209 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1210.0 | 1210 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1211.0 | 1211 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1212.0 | 1212 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1213.0 | 1213 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1214.0 | 1214 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1215.0 | 1215 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1216.0 | 1216 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1217.0 | 1217 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1218.0 | 1218 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1219.0 | 1219 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1220.0 | 1220 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1221.0 | 1221 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1222.0 | 1222 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1223.0 | 1223 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1224.0 | 1224 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1225.0 | 1225 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1226.0 | 1226 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1227.0 | 1227 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1228.0 | 1228 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1229.0 | 1229 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1230.0 | 1230 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1231.0 | 1231 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1232.0 | 1232 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1233.0 | 1233 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1234.0 | 1234 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1235.0 | 1235 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1236.0 | 1236 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1237.0 | 1237 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1238.0 | 1238 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1239.0 | 1239 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1240.0 | 1240 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1241.0 | 1241 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1242.0 | 1242 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1243.0 | 1243 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1244.0 | 1244 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1245.0 | 1245 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1246.0 | 1246 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1247.0 | 1247 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1248.0 | 1248 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1249.0 | 1249 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1250.0 | 1250 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1251.0 | 1251 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1252.0 | 1252 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1253.0 | 1253 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1254.0 | 1254 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1255.0 | 1255 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1256.0 | 1256 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1257.0 | 1257 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1258.0 | 1258 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1259.0 | 1259 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1260.0 | 1260 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1261.0 | 1261 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1262.0 | 1262 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1263.0 | 1263 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1264.0 | 1264 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1265.0 | 1265 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1266.0 | 1266 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1267.0 | 1267 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1268.0 | 1268 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1269.0 | 1269 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1270.0 | 1270 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1271.0 | 1271 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1272.0 | 1272 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1273.0 | 1273 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1274.0 | 1274 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1275.0 | 1275 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1276.0 | 1276 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1277.0 | 1277 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1278.0 | 1278 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1279.0 | 1279 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1280.0 | 1280 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1281.0 | 1281 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1282.0 | 1282 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1283.0 | 1283 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1284.0 | 1284 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1285.0 | 1285 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1286.0 | 1286 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1287.0 | 1287 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1288.0 | 1288 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1289.0 | 1289 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1290.0 | 1290 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1291.0 | 1291 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1292.0 | 1292 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1293.0 | 1293 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1294.0 | 1294 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1295.0 | 1295 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1296.0 | 1296 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1297.0 | 1297 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1298.0 | 1298 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1299.0 | 1299 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1300.0 | 1300 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1301.0 | 1301 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1302.0 | 1302 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1303.0 | 1303 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1304.0 | 1304 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1305.0 | 1305 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1306.0 | 1306 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1307.0 | 1307 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1308.0 | 1308 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1309.0 | 1309 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1310.0 | 1310 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1311.0 | 1311 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1312.0 | 1312 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1313.0 | 1313 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1314.0 | 1314 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1315.0 | 1315 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1316.0 | 1316 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1317.0 | 1317 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1318.0 | 1318 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1319.0 | 1319 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1320.0 | 1320 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1321.0 | 1321 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1322.0 | 1322 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1323.0 | 1323 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1324.0 | 1324 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1325.0 | 1325 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1326.0 | 1326 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1327.0 | 1327 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1328.0 | 1328 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1329.0 | 1329 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1330.0 | 1330 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1331.0 | 1331 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1332.0 | 1332 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1333.0 | 1333 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1334.0 | 1334 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1335.0 | 1335 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1336.0 | 1336 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1337.0 | 1337 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1338.0 | 1338 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1339.0 | 1339 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1340.0 | 1340 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1341.0 | 1341 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1342.0 | 1342 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1343.0 | 1343 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1344.0 | 1344 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1345.0 | 1345 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1346.0 | 1346 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1347.0 | 1347 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1348.0 | 1348 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1349.0 | 1349 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1350.0 | 1350 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1351.0 | 1351 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1352.0 | 1352 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1353.0 | 1353 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1354.0 | 1354 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1355.0 | 1355 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1356.0 | 1356 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1357.0 | 1357 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1358.0 | 1358 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1359.0 | 1359 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1360.0 | 1360 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1361.0 | 1361 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1362.0 | 1362 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1363.0 | 1363 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1364.0 | 1364 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1365.0 | 1365 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1366.0 | 1366 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1367.0 | 1367 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1368.0 | 1368 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1369.0 | 1369 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1370.0 | 1370 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1371.0 | 1371 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1372.0 | 1372 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1373.0 | 1373 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1374.0 | 1374 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1375.0 | 1375 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1376.0 | 1376 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1377.0 | 1377 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1378.0 | 1378 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1379.0 | 1379 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1380.0 | 1380 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1381.0 | 1381 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1382.0 | 1382 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1383.0 | 1383 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1384.0 | 1384 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1385.0 | 1385 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1386.0 | 1386 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1387.0 | 1387 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1388.0 | 1388 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1389.0 | 1389 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1390.0 | 1390 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1391.0 | 1391 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1392.0 | 1392 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1393.0 | 1393 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1394.0 | 1394 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1395.0 | 1395 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1396.0 | 1396 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1397.0 | 1397 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1398.0 | 1398 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1399.0 | 1399 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1400.0 | 1400 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1401.0 | 1401 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1402.0 | 1402 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1403.0 | 1403 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1404.0 | 1404 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1405.0 | 1405 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1406.0 | 1406 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1407.0 | 1407 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1408.0 | 1408 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1409.0 | 1409 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1410.0 | 1410 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1411.0 | 1411 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1412.0 | 1412 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1413.0 | 1413 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1414.0 | 1414 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1415.0 | 1415 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1416.0 | 1416 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1417.0 | 1417 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1418.0 | 1418 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1419.0 | 1419 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1420.0 | 1420 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1421.0 | 1421 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1422.0 | 1422 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1423.0 | 1423 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1424.0 | 1424 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1425.0 | 1425 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1426.0 | 1426 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1427.0 | 1427 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1428.0 | 1428 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1429.0 | 1429 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1430.0 | 1430 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1431.0 | 1431 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1432.0 | 1432 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1433.0 | 1433 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1434.0 | 1434 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1435.0 | 1435 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1436.0 | 1436 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1437.0 | 1437 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1438.0 | 1438 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1439.0 | 1439 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1440.0 | 1440 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1441.0 | 1441 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1442.0 | 1442 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1443.0 | 1443 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1444.0 | 1444 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1445.0 | 1445 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1446.0 | 1446 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1447.0 | 1447 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1448.0 | 1448 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1449.0 | 1449 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1450.0 | 1450 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1451.0 | 1451 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1452.0 | 1452 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1453.0 | 1453 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1454.0 | 1454 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1455.0 | 1455 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1456.0 | 1456 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1457.0 | 1457 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1458.0 | 1458 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1459.0 | 1459 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1460.0 | 1460 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1461.0 | 1461 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1462.0 | 1462 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1463.0 | 1463 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1464.0 | 1464 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1465.0 | 1465 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1466.0 | 1466 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1467.0 | 1467 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1468.0 | 1468 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1469.0 | 1469 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1470.0 | 1470 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1471.0 | 1471 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1472.0 | 1472 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1473.0 | 1473 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1474.0 | 1474 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1475.0 | 1475 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1476.0 | 1476 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1477.0 | 1477 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1478.0 | 1478 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1479.0 | 1479 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1480.0 | 1480 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1481.0 | 1481 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1482.0 | 1482 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1483.0 | 1483 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1484.0 | 1484 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1485.0 | 1485 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1486.0 | 1486 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1487.0 | 1487 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1488.0 | 1488 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1489.0 | 1489 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1490.0 | 1490 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1491.0 | 1491 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1492.0 | 1492 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1493.0 | 1493 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1494.0 | 1494 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1495.0 | 1495 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1496.0 | 1496 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1497.0 | 1497 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1498.0 | 1498 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1499.0 | 1499 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1500.0 | 1500 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1501.0 | 1501 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1502.0 | 1502 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1503.0 | 1503 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1504.0 | 1504 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1505.0 | 1505 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1506.0 | 1506 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1507.0 | 1507 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1508.0 | 1508 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1509.0 | 1509 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1510.0 | 1510 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1511.0 | 1511 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1512.0 | 1512 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1513.0 | 1513 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1514.0 | 1514 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1515.0 | 1515 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1516.0 | 1516 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1517.0 | 1517 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1518.0 | 1518 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1519.0 | 1519 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1520.0 | 1520 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1521.0 | 1521 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1522.0 | 1522 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1523.0 | 1523 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1524.0 | 1524 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1525.0 | 1525 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1526.0 | 1526 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1527.0 | 1527 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1528.0 | 1528 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1529.0 | 1529 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1530.0 | 1530 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1531.0 | 1531 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1532.0 | 1532 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1533.0 | 1533 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1534.0 | 1534 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1535.0 | 1535 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1536.0 | 1536 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1537.0 | 1537 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1538.0 | 1538 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1539.0 | 1539 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1540.0 | 1540 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1541.0 | 1541 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1542.0 | 1542 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1543.0 | 1543 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1544.0 | 1544 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1545.0 | 1545 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1546.0 | 1546 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1547.0 | 1547 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1548.0 | 1548 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1549.0 | 1549 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1550.0 | 1550 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1551.0 | 1551 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1552.0 | 1552 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1553.0 | 1553 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1554.0 | 1554 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1555.0 | 1555 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1556.0 | 1556 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1557.0 | 1557 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1558.0 | 1558 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1559.0 | 1559 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1560.0 | 1560 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1561.0 | 1561 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1562.0 | 1562 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1563.0 | 1563 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1564.0 | 1564 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1565.0 | 1565 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1566.0 | 1566 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1567.0 | 1567 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1568.0 | 1568 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1569.0 | 1569 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1570.0 | 1570 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1571.0 | 1571 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1572.0 | 1572 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1573.0 | 1573 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1574.0 | 1574 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1575.0 | 1575 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1576.0 | 1576 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1577.0 | 1577 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1578.0 | 1578 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1579.0 | 1579 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1580.0 | 1580 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1581.0 | 1581 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1582.0 | 1582 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1583.0 | 1583 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1584.0 | 1584 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1585.0 | 1585 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1586.0 | 1586 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1587.0 | 1587 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1588.0 | 1588 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1589.0 | 1589 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1590.0 | 1590 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1591.0 | 1591 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1592.0 | 1592 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1593.0 | 1593 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1594.0 | 1594 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1595.0 | 1595 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1596.0 | 1596 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1597.0 | 1597 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1598.0 | 1598 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1599.0 | 1599 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1600.0 | 1600 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1601.0 | 1601 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1602.0 | 1602 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1603.0 | 1603 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1604.0 | 1604 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1605.0 | 1605 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1606.0 | 1606 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1607.0 | 1607 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1608.0 | 1608 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1609.0 | 1609 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1610.0 | 1610 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1611.0 | 1611 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1612.0 | 1612 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1613.0 | 1613 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1614.0 | 1614 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1615.0 | 1615 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1616.0 | 1616 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1617.0 | 1617 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1618.0 | 1618 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1619.0 | 1619 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1620.0 | 1620 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1621.0 | 1621 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1622.0 | 1622 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1623.0 | 1623 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1624.0 | 1624 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1625.0 | 1625 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1626.0 | 1626 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1627.0 | 1627 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1628.0 | 1628 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1629.0 | 1629 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1630.0 | 1630 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1631.0 | 1631 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1632.0 | 1632 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1633.0 | 1633 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1634.0 | 1634 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1635.0 | 1635 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1636.0 | 1636 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1637.0 | 1637 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1638.0 | 1638 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1639.0 | 1639 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1640.0 | 1640 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1641.0 | 1641 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1642.0 | 1642 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1643.0 | 1643 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1644.0 | 1644 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1645.0 | 1645 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1646.0 | 1646 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1647.0 | 1647 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1648.0 | 1648 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1649.0 | 1649 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1650.0 | 1650 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1651.0 | 1651 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1652.0 | 1652 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1653.0 | 1653 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1654.0 | 1654 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1655.0 | 1655 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1656.0 | 1656 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1657.0 | 1657 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1658.0 | 1658 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1659.0 | 1659 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1660.0 | 1660 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1661.0 | 1661 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1662.0 | 1662 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1663.0 | 1663 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1664.0 | 1664 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1665.0 | 1665 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1666.0 | 1666 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1667.0 | 1667 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1668.0 | 1668 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1669.0 | 1669 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1670.0 | 1670 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1671.0 | 1671 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1672.0 | 1672 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1673.0 | 1673 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1674.0 | 1674 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1675.0 | 1675 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1676.0 | 1676 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1677.0 | 1677 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1678.0 | 1678 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1679.0 | 1679 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1680.0 | 1680 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1681.0 | 1681 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1682.0 | 1682 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1683.0 | 1683 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1684.0 | 1684 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1685.0 | 1685 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1686.0 | 1686 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1687.0 | 1687 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1688.0 | 1688 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1689.0 | 1689 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1690.0 | 1690 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1691.0 | 1691 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1692.0 | 1692 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1693.0 | 1693 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1694.0 | 1694 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1695.0 | 1695 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1696.0 | 1696 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1697.0 | 1697 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1698.0 | 1698 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1699.0 | 1699 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1700.0 | 1700 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1701.0 | 1701 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1702.0 | 1702 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1703.0 | 1703 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1704.0 | 1704 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1705.0 | 1705 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1706.0 | 1706 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1707.0 | 1707 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1708.0 | 1708 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1709.0 | 1709 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1710.0 | 1710 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1711.0 | 1711 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1712.0 | 1712 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1713.0 | 1713 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1714.0 | 1714 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1715.0 | 1715 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1716.0 | 1716 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1717.0 | 1717 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1718.0 | 1718 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1719.0 | 1719 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1720.0 | 1720 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1721.0 | 1721 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1722.0 | 1722 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1723.0 | 1723 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1724.0 | 1724 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1725.0 | 1725 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1726.0 | 1726 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1727.0 | 1727 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1728.0 | 1728 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1729.0 | 1729 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1730.0 | 1730 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1731.0 | 1731 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1732.0 | 1732 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1733.0 | 1733 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1734.0 | 1734 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1735.0 | 1735 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1736.0 | 1736 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1737.0 | 1737 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1738.0 | 1738 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1739.0 | 1739 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1740.0 | 1740 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1741.0 | 1741 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1742.0 | 1742 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1743.0 | 1743 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1744.0 | 1744 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1745.0 | 1745 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1746.0 | 1746 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1747.0 | 1747 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1748.0 | 1748 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1749.0 | 1749 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1750.0 | 1750 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1751.0 | 1751 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1752.0 | 1752 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1753.0 | 1753 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1754.0 | 1754 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1755.0 | 1755 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1756.0 | 1756 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1757.0 | 1757 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1758.0 | 1758 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1759.0 | 1759 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1760.0 | 1760 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1761.0 | 1761 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1762.0 | 1762 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1763.0 | 1763 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1764.0 | 1764 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1765.0 | 1765 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1766.0 | 1766 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1767.0 | 1767 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1768.0 | 1768 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1769.0 | 1769 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1770.0 | 1770 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1771.0 | 1771 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1772.0 | 1772 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1773.0 | 1773 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1774.0 | 1774 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1775.0 | 1775 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1776.0 | 1776 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1777.0 | 1777 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1778.0 | 1778 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1779.0 | 1779 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1780.0 | 1780 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1781.0 | 1781 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1782.0 | 1782 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1783.0 | 1783 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1784.0 | 1784 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1785.0 | 1785 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1786.0 | 1786 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1787.0 | 1787 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1788.0 | 1788 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1789.0 | 1789 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1790.0 | 1790 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1791.0 | 1791 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1792.0 | 1792 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1793.0 | 1793 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1794.0 | 1794 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1795.0 | 1795 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1796.0 | 1796 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1797.0 | 1797 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1798.0 | 1798 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1799.0 | 1799 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1800.0 | 1800 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1801.0 | 1801 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1802.0 | 1802 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1803.0 | 1803 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1804.0 | 1804 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1805.0 | 1805 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1806.0 | 1806 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1807.0 | 1807 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1808.0 | 1808 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1809.0 | 1809 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1810.0 | 1810 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1811.0 | 1811 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1812.0 | 1812 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1813.0 | 1813 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1814.0 | 1814 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1815.0 | 1815 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1816.0 | 1816 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1817.0 | 1817 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1818.0 | 1818 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1819.0 | 1819 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1820.0 | 1820 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1821.0 | 1821 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1822.0 | 1822 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1823.0 | 1823 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1824.0 | 1824 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1825.0 | 1825 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1826.0 | 1826 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1827.0 | 1827 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1828.0 | 1828 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1829.0 | 1829 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1830.0 | 1830 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1831.0 | 1831 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1832.0 | 1832 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1833.0 | 1833 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1834.0 | 1834 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1835.0 | 1835 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1836.0 | 1836 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1837.0 | 1837 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1838.0 | 1838 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1839.0 | 1839 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1840.0 | 1840 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1841.0 | 1841 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1842.0 | 1842 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1843.0 | 1843 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1844.0 | 1844 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1845.0 | 1845 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1846.0 | 1846 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1847.0 | 1847 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1848.0 | 1848 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1849.0 | 1849 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1850.0 | 1850 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1851.0 | 1851 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1852.0 | 1852 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1853.0 | 1853 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1854.0 | 1854 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1855.0 | 1855 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1856.0 | 1856 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1857.0 | 1857 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1858.0 | 1858 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1859.0 | 1859 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1860.0 | 1860 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1861.0 | 1861 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1862.0 | 1862 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1863.0 | 1863 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1864.0 | 1864 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1865.0 | 1865 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1866.0 | 1866 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1867.0 | 1867 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1868.0 | 1868 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1869.0 | 1869 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1870.0 | 1870 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1871.0 | 1871 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1872.0 | 1872 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1873.0 | 1873 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1874.0 | 1874 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1875.0 | 1875 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1876.0 | 1876 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1877.0 | 1877 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1878.0 | 1878 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1879.0 | 1879 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1880.0 | 1880 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1881.0 | 1881 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1882.0 | 1882 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1883.0 | 1883 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1884.0 | 1884 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1885.0 | 1885 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1886.0 | 1886 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1887.0 | 1887 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1888.0 | 1888 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1889.0 | 1889 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1890.0 | 1890 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1891.0 | 1891 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1892.0 | 1892 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1893.0 | 1893 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1894.0 | 1894 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1895.0 | 1895 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1896.0 | 1896 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1897.0 | 1897 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1898.0 | 1898 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1899.0 | 1899 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1900.0 | 1900 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1901.0 | 1901 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1902.0 | 1902 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1903.0 | 1903 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1904.0 | 1904 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1905.0 | 1905 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1906.0 | 1906 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1907.0 | 1907 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1908.0 | 1908 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1909.0 | 1909 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1910.0 | 1910 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1911.0 | 1911 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1912.0 | 1912 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1913.0 | 1913 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1914.0 | 1914 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1915.0 | 1915 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1916.0 | 1916 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1917.0 | 1917 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1918.0 | 1918 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1919.0 | 1919 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1920.0 | 1920 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1921.0 | 1921 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1922.0 | 1922 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1923.0 | 1923 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1924.0 | 1924 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1925.0 | 1925 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1926.0 | 1926 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1927.0 | 1927 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1928.0 | 1928 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1929.0 | 1929 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1930.0 | 1930 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1931.0 | 1931 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1932.0 | 1932 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1933.0 | 1933 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1934.0 | 1934 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1935.0 | 1935 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1936.0 | 1936 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1937.0 | 1937 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1938.0 | 1938 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1939.0 | 1939 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1940.0 | 1940 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1941.0 | 1941 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1942.0 | 1942 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1943.0 | 1943 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1944.0 | 1944 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1945.0 | 1945 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1946.0 | 1946 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1947.0 | 1947 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1948.0 | 1948 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1949.0 | 1949 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1950.0 | 1950 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1951.0 | 1951 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1952.0 | 1952 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1953.0 | 1953 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1954.0 | 1954 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1955.0 | 1955 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1956.0 | 1956 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1957.0 | 1957 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1958.0 | 1958 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1959.0 | 1959 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1960.0 | 1960 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1961.0 | 1961 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1962.0 | 1962 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1963.0 | 1963 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1964.0 | 1964 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1965.0 | 1965 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1966.0 | 1966 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1967.0 | 1967 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1968.0 | 1968 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1969.0 | 1969 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1970.0 | 1970 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1971.0 | 1971 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1972.0 | 1972 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1973.0 | 1973 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1974.0 | 1974 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1975.0 | 1975 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1976.0 | 1976 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1977.0 | 1977 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1978.0 | 1978 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1979.0 | 1979 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1980.0 | 1980 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1981.0 | 1981 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1982.0 | 1982 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1983.0 | 1983 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1984.0 | 1984 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1985.0 | 1985 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1986.0 | 1986 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1987.0 | 1987 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1988.0 | 1988 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1989.0 | 1989 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1990.0 | 1990 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1991.0 | 1991 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1992.0 | 1992 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1993.0 | 1993 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1994.0 | 1994 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1995.0 | 1995 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1996.0 | 1996 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1997.0 | 1997 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1998.0 | 1998 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 1999.0 | 1999 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | | 0.0 | 2000.0 | 2000 | nan | 3.5714 | 1.2195 | 3.5714 | 3.5714 | 19.0 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
Ariffiq99/CRAB_COPA_KUCI_Bert_Base_Uncased_finetuned
Ariffiq99
2024-06-04T04:38:09Z
105
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "multiple-choice", "generated_from_trainer", "base_model:Ariffiq99/COPA_KUCI_Bert_Base_Uncased_Finetuned", "base_model:finetune:Ariffiq99/COPA_KUCI_Bert_Base_Uncased_Finetuned", "license:apache-2.0", "endpoints_compatible", "region:us" ]
multiple-choice
2024-06-04T03:39:36Z
--- license: apache-2.0 base_model: Ariffiq99/COPA_KUCI_Bert_Base_Uncased_Finetuned tags: - generated_from_trainer metrics: - f1 model-index: - name: CRAB_COPA_KUCI_Bert_Base_Uncased_finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CRAB_COPA_KUCI_Bert_Base_Uncased_finetuned This model is a fine-tuned version of [Ariffiq99/COPA_KUCI_Bert_Base_Uncased_Finetuned](https://huggingface.co/Ariffiq99/COPA_KUCI_Bert_Base_Uncased_Finetuned) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.7686 - F1: 0.7694 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 1.0747 | 1.0 | 2880 | 0.9424 | 0.7014 | | 0.9502 | 2.0 | 5760 | 0.8660 | 0.7167 | | 0.8039 | 3.0 | 8640 | 0.7995 | 0.7278 | | 0.7633 | 4.0 | 11520 | 0.8053 | 0.7333 | | 0.7705 | 5.0 | 14400 | 0.8241 | 0.75 | | 0.8075 | 6.0 | 17280 | 0.7628 | 0.7667 | | 0.6885 | 7.0 | 20160 | 0.7813 | 0.7708 | | 0.6746 | 8.0 | 23040 | 0.7686 | 0.7694 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
mradermacher/Machroom-3B-model_stock-GGUF
mradermacher
2024-06-04T04:36:07Z
6
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-04T04:25:44Z
--- base_model: DreadPoor/Machroom-3B-model_stock language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/DreadPoor/Machroom-3B-model_stock <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q2_K.gguf) | Q2_K | 1.2 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.IQ3_XS.gguf) | IQ3_XS | 1.3 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.IQ3_S.gguf) | IQ3_S | 1.4 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q3_K_S.gguf) | Q3_K_S | 1.4 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.IQ3_M.gguf) | IQ3_M | 1.4 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q3_K_M.gguf) | Q3_K_M | 1.5 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q3_K_L.gguf) | Q3_K_L | 1.6 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.IQ4_XS.gguf) | IQ4_XS | 1.6 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q4_K_S.gguf) | Q4_K_S | 1.7 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q4_K_M.gguf) | Q4_K_M | 1.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q5_K_S.gguf) | Q5_K_S | 2.0 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q5_K_M.gguf) | Q5_K_M | 2.1 | | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q6_K.gguf) | Q6_K | 2.4 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.Q8_0.gguf) | Q8_0 | 3.1 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Machroom-3B-model_stock-GGUF/resolve/main/Machroom-3B-model_stock.f16.gguf) | f16 | 5.7 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
MubarakB/zxCm3h8ADcB3R0ve2rgC
MubarakB
2024-06-04T04:35:51Z
0
0
peft
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:NousResearch/Llama-2-7b-chat-hf", "base_model:adapter:NousResearch/Llama-2-7b-chat-hf", "region:us" ]
null
2024-06-04T04:35:47Z
--- library_name: peft base_model: NousResearch/Llama-2-7b-chat-hf --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.11.1
hdve/google-gemma-2b-1717475491
hdve
2024-06-04T04:33:55Z
141
0
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:31:33Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
bamaxi/ruBert-base-sakha
bamaxi
2024-06-04T04:28:42Z
127
0
transformers
[ "transformers", "safetensors", "bert", "fill-mask", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2024-06-03T23:19:14Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
mradermacher/Adamus-7B-slerp-GGUF
mradermacher
2024-06-04T04:28:17Z
22
0
transformers
[ "transformers", "gguf", "merge", "mergekit", "lazymergekit", "mlabonne/NeuralBeagle14-7B", "cognitivecomputations/dolphin-2.8-mistral-7b-v02", "en", "base_model:vtboyarc/Adamus-7B-slerp", "base_model:quantized:vtboyarc/Adamus-7B-slerp", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-04T04:02:00Z
--- base_model: vtboyarc/Adamus-7B-slerp language: - en library_name: transformers quantized_by: mradermacher tags: - merge - mergekit - lazymergekit - mlabonne/NeuralBeagle14-7B - cognitivecomputations/dolphin-2.8-mistral-7b-v02 --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/vtboyarc/Adamus-7B-slerp <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q2_K.gguf) | Q2_K | 2.8 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.IQ3_XS.gguf) | IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q3_K_S.gguf) | Q3_K_S | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.IQ3_S.gguf) | IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.IQ3_M.gguf) | IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q3_K_M.gguf) | Q3_K_M | 3.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q3_K_L.gguf) | Q3_K_L | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.IQ4_XS.gguf) | IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q4_K_S.gguf) | Q4_K_S | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q4_K_M.gguf) | Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q5_K_S.gguf) | Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q5_K_M.gguf) | Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q6_K.gguf) | Q6_K | 6.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.Q8_0.gguf) | Q8_0 | 7.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Adamus-7B-slerp-GGUF/resolve/main/Adamus-7B-slerp.f16.gguf) | f16 | 14.6 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2
Zoyd
2024-06-04T04:20:09Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "exl2", "region:us" ]
text-generation
2024-06-03T17:18:31Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **3.5 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
SEHYONG/Llama-3-Open-Ko-8B-Instruct-kookmin7
SEHYONG
2024-06-04T04:11:42Z
7
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "base_model:SEHYONG/Llama-3-Open-Ko-8B-Instruct-kookmin6", "base_model:finetune:SEHYONG/Llama-3-Open-Ko-8B-Instruct-kookmin6", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:05:37Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: SEHYONG/Llama-3-Open-Ko-8B-Instruct-kookmin6 --- # Uploaded model - **Developed by:** SEHYONG - **License:** apache-2.0 - **Finetuned from model :** SEHYONG/Llama-3-Open-Ko-8B-Instruct-kookmin6 This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Rudra360/Emoji_Suggester
Rudra360
2024-06-04T04:09:27Z
0
0
spacy
[ "spacy", "en", "region:us" ]
null
2024-06-03T14:17:44Z
--- language: - en library_name: spacy --- # Emoji Suggester Emoji Suggester is a tool designed to recommend relevant emojis based on incoming messages from social media apps, enhancing expressiveness and engagement in your conversations. The suggestions are powered by a model trained on a dataset of Twitter messages. ## Table of Contents - [Installation](#installation) - [Usage](#usage) - [Contributing](#contributing) - [License](#license) - [Contact](#contact) ## Installation To install Emoji Suggester, follow these steps: 1. Clone the repository: ```bash git clone https://huggingface.co/Rudra360/Emoji_Suggester or ```bash git clone git@huggingface.co:Rudra360/Emoji_Suggester.git ## Usage Change the Directory 1. go to emoji_suggester ```bash cd Emoji_Suggester Then the run the follwing script 2. from util import predict 3. message = "I'm so happy today!" suggested_emojis = predict(message) print(suggested_emojis)
hdve/Qwen-Qwen1.5-7B-1717473930
hdve
2024-06-04T04:08:43Z
7
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T04:06:13Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
andikazf15/IndoBERT-QA-product-pred
andikazf15
2024-06-04T04:08:06Z
36
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "question-answering", "generated_from_trainer", "base_model:rizquuula/mBERT-IndoSQuADv2_1691852742-16-2e-06-0.01-5", "base_model:finetune:rizquuula/mBERT-IndoSQuADv2_1691852742-16-2e-06-0.01-5", "license:apache-2.0", "endpoints_compatible", "region:us" ]
question-answering
2024-06-03T03:02:51Z
--- license: apache-2.0 base_model: rizquuula/mBERT-IndoSQuADv2_1691852742-16-2e-06-0.01-5 tags: - generated_from_trainer model-index: - name: IndoBERT-QA-product-pred results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IndoBERT-QA-product-pred This model is a fine-tuned version of [rizquuula/mBERT-IndoSQuADv2_1691852742-16-2e-06-0.01-5](https://huggingface.co/rizquuula/mBERT-IndoSQuADv2_1691852742-16-2e-06-0.01-5) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.41.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
chainup244/Qwen-Qwen1.5-7B-1717473432
chainup244
2024-06-04T04:04:37Z
7
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T03:57:17Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
cgus/AlchemistCoder-DS-6.7B-exl2
cgus
2024-06-04T03:59:59Z
5
0
transformers
[ "transformers", "llama", "text-generation", "code generation", "conversational", "arxiv:2405.19265", "base_model:internlm/AlchemistCoder-DS-6.7B", "base_model:quantized:internlm/AlchemistCoder-DS-6.7B", "license:apache-2.0", "autotrain_compatible", "4-bit", "exl2", "region:us" ]
text-generation
2024-06-03T23:59:24Z
--- license: apache-2.0 base_model: internlm/AlchemistCoder-DS-6.7B inference: false tags: - code generation --- # AlchemistCoder-DS-6.7B-exl2 Original model: [AlchemistCoder-DS-6.7B](https://huggingface.co/internlm/AlchemistCoder-DS-6.7B) Model creator: [InternLM](https://huggingface.co/internlm) ## Quants [4bpw h6 (main)](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/main) [4.25bpw h6](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/4.25bpw-h6) [4.65bpw h6](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/4.65bpw-h6) [5bpw h6](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/5bpw-h6) [6bpw h6](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/6bpw-h6) [8bpw h8](https://huggingface.co/cgus/AlchemistCoder-DS-6.7B-exl2/tree/8bpw-h8) ## Quantization notes Made with Exllamav2 0.1.3 with the default dataset. ## How to run This model is meant to be used with Exllamav2 loader that requires the model to be fully loaded into GPU VRAM. It primarily requires a Nvidia RTX card on Windows/Linux or AMD card on Linux. If you want to use this model but your system doesn't meet these requirements, you should look for GGUF versions of the model. It can be used with apps like: [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) [KoboldAI](https://github.com/henk717/KoboldAI) [ExUI](https://github.com/turboderp/exui) [lollms-webui](https://github.com/ParisNeo/lollms-webui) # Original model card # AlchemistCoder: Harmonizing and Eliciting Code Capability by Hindsight Tuning on Multi-source Data [[🤗 HuggingFace](https://huggingface.co/internlm/AlchemistCoder-DS-6.7B)] [[📃 Paper](https://arxiv.org/abs/2405.19265)] [[🌐 Project Page](https://internlm.github.io/AlchemistCoder/)] ## ✨ Highlights > **Abstract:** *Open-source Large Language Models (LLMs) and their specialized variants, particularly Code LLMs, have recently delivered impressive performance. However, previous Code LLMs are typically fine-tuned on single-source data with limited quality and diversity, which may insufficiently elicit the potential of pre-trained Code LLMs. In this paper, we present AlchemistCoder, a series of Code LLMs with enhanced code generation and generalization capabilities fine-tuned on multi-source data. To achieve this, we pioneer to unveil inherent conflicts among the various styles and qualities in multi-source code corpora and introduce data-specific prompts with hindsight relabeling, termed AlchemistPrompts, to harmonize different data sources and instruction-response pairs. Additionally, we propose incorporating the data construction process into the fine-tuning data as code comprehension tasks, including instruction evolution, data filtering, and code review. Extensive experiments demonstrate that AlchemistCoder holds a clear lead among all models of the same size (6.7B/7B) and rivals or even surpasses larger models (15B/33B/70B), showcasing the efficacy of our method in refining instruction-following capabilities and advancing the boundaries of code intelligence.* - **AlchemistPrompts**: Designed as data-specific prompts for harmonizing inherent conflicts in multi-source data and mitigating the instruction/response misalignment at a fined-grained level. - **Code Comprehenstion Tasks**: Sourced from the process of data construction, consisting of instruction evolution, data filtering, and code review. - **Harmonized Multi-source Data**: Instruction tuned on 200M tokens, including 6 types of high-quality data. - **Superior Model Performance**: Surpassing all the open-source models of the same size (6.7/7B), and rivaling or even beating larger models (15B/33B/70B/ChatGPT) on 6 code benchmarks. - **Advanced generic capabilities**: Demonstrated by the significant improvements on MMLU, BBH, and GSM8K. ## 🚀 Quick Start ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("internlm/AlchemistCoder-DS-6.7B", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("internlm/AlchemistCoder-DS-6.7B", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda() model = model.eval() input_text = "Implement the Dijkstra algorithm in Python" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` The above code will generate the following result: ```python import heapq def dijkstra(graph, start, end): """ Find the shortest path between two nodes in a graph using Dijkstra's algorithm. Args: graph (dict): A dictionary representing the graph, where the keys are nodes and the values are dictionaries containing the neighboring nodes and their edge weights. start: The starting node. end: The target node. Returns: list: The shortest path from the start node to the target node as a list of nodes. Raises: ValueError: If either the start or end node is not present in the graph. """ # Check if start and end nodes are in the graph if start not in graph: raise ValueError("Start node is not present in the graph.") if end not in graph: raise ValueError("End node is not present in the graph.") # Initialize the distance dictionary with infinite distances for all nodes distances = {node: float('inf') for node in graph} # Set the distance of the start node to 0 distances[start] = 0 # Initialize the heap with the start node heap = [(0, start)] # Initialize the previous dictionary to keep track of the path previous = {} while heap: # Pop the node with the smallest distance from the heap current_distance, current_node = heapq.heappop(heap) # If the current node is the end node, we have found the shortest path if current_node == end: # Reconstruct the path by following the previous nodes path = [] while current_node in previous: path.append(current_node) current_node = previous[current_node] path.append(start) # Reverse the path to get the correct order return path[::-1] # Iterate over the neighboring nodes and update their distances for neighbor, weight in graph[current_node].items(): new_distance = current_distance + weight # If a shorter path is found, update the distance and previous node if new_distance < distances[neighbor]: distances[neighbor] = new_distance previous[neighbor] = current_node heapq.heappush(heap, (new_distance, neighbor)) # If there is no path between the start and end nodes, return an empty list return [] ``` > The `dijkstra` function takes three arguments: `graph`, `start`, and `end`. The `graph` argument is a dictionary representing the graph, where the keys are nodes and the values are dictionaries containing the neighboring nodes and their edge weights. The `start` argument is the starting node, and the `end` argument is the target node. > The function first checks if the start and end nodes are present in the graph. If either node is not present, a `ValueError` is raised. > The function then initializes a `distances` dictionary with infinite distances for all nodes. It sets the distance of the start node to 0. It also initializes a heap with the start node and a `previous` dictionary to keep track of the path. > The algorithm then iterates over the nodes in the heap. For each node, it checks if it is the end node. If it is, the function reconstructs the path by following the previous nodes and returns the shortest path as a list of nodes in the correct order. > If the current node is not the end node, the algorithm iterates over its neighboring nodes and updates their distances if a shorter path is found. It also updates the `previous` dictionary to keep track of the path. > If there is no path between the start and end nodes, the function returns an empty list. > Note that this implementation assumes that the graph is a directed graph, and it uses a heap data structure to efficiently select the node with the smallest distance at each step. ## 🧪 Evaluation and Fine-tune Please refer to [**AlchemistCoder**](https://github.com/InternLM/AlchemistCoder) and [**InternLM**](https://github.com/InternLM/InternLM/tree/main). ## 😃 Acknowledgments *AlchemistCoder* is built with [**InternLM**](https://github.com/InternLM) and [**OpenCompass**](https://github.com/open-compass). Thanks for their awesome work! ## 📧 Contact If you have any questions, please create an issue on this repository or contact us at: - sugger@tongji.edu.cn - zhangwenwei@pjlab.org.cn ## 🌟 Citation If you find our work useful, please consider citing: ```bibtex @misc{song2024alchemistcoder, title={AlchemistCoder: Harmonizing and Eliciting Code Capability by Hindsight Tuning on Multi-source Data}, author={Zifan Song and Yudong Wang and Wenwei Zhang and Kuikun Liu and Chengqi Lyu and Demin Song and Qipeng Guo and Hang Yan and Dahua Lin and Kai Chen and Cairong Zhao}, year={2024}, eprint={2405.19265}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2
Zoyd
2024-06-04T03:48:58Z
8
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "exl2", "region:us" ]
text-generation
2024-06-03T19:56:00Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **4.0 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
deewuok/sentiment-lora
deewuok
2024-06-04T03:44:18Z
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-06-04T03:43:16Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2
Zoyd
2024-06-04T03:41:15Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "exl2", "region:us" ]
text-generation
2024-06-03T14:47:50Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **2.5 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
srbdtwentyfour/mystery-llama-3-8b-v2
srbdtwentyfour
2024-06-04T03:39:26Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-06-03T08:18:31Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-Instruct-bnb-4bit --- # Uploaded model - **Developed by:** srbdtwentyfour - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
srbdtwentyfour/mystery-llama-3-8b-v1
srbdtwentyfour
2024-06-04T03:39:03Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-06-04T03:37:30Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-Instruct-bnb-4bit --- # Uploaded model - **Developed by:** srbdtwentyfour - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
hienbm/llama-3-8b-bnb-4bit_mtast
hienbm
2024-06-04T03:34:23Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "base_model:finetune:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-05-28T05:26:48Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl base_model: unsloth/llama-3-8b-bnb-4bit --- # Uploaded model - **Developed by:** hienbm - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2
Zoyd
2024-06-04T03:33:44Z
6
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "8-bit", "exl2", "region:us" ]
text-generation
2024-06-04T02:43:52Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **8.0 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
smfire/my-custom-bert
smfire
2024-06-04T03:29:23Z
161
0
transformers
[ "transformers", "safetensors", "bert", "feature-extraction", "custom", "custom-bert", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
feature-extraction
2024-06-04T02:21:12Z
--- library_name: transformers tags: - custom - custom-bert --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF
bartowski
2024-06-04T03:26:56Z
150
0
null
[ "gguf", "text-generation", "en", "license:llama3", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2024-06-04T03:09:56Z
--- language: - en license: llama3 quantized_by: bartowski pipeline_tag: text-generation --- ## Llamacpp imatrix Quantizations of Llama-3-Instruct-8B-SimPO-ExPO Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b3070">b3070</a> for quantization. Original model: https://huggingface.co/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO All quants made using imatrix option with dataset from [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8) ## Prompt format ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> {system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|> {prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` ## Download a file (not the whole branch) from below: | Filename | Quant type | File Size | Description | | -------- | ---------- | --------- | ----------- | | [Llama-3-Instruct-8B-SimPO-ExPO-Q8_0.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q8_0.gguf) | Q8_0 | 8.54GB | Extremely high quality, generally unneeded but max available quant. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q6_K.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q6_K.gguf) | Q6_K | 6.59GB | Very high quality, near perfect, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q5_K_M.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q5_K_M.gguf) | Q5_K_M | 5.73GB | High quality, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q5_K_S.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q5_K_S.gguf) | Q5_K_S | 5.59GB | High quality, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q4_K_M.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q4_K_M.gguf) | Q4_K_M | 4.92GB | Good quality, uses about 4.83 bits per weight, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q4_K_S.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q4_K_S.gguf) | Q4_K_S | 4.69GB | Slightly lower quality with more space savings, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ4_XS.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ4_XS.gguf) | IQ4_XS | 4.44GB | Decent quality, smaller than Q4_K_S with similar performance, *recommended*. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_L.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_L.gguf) | Q3_K_L | 4.32GB | Lower quality but usable, good for low RAM availability. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_M.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_M.gguf) | Q3_K_M | 4.01GB | Even lower quality. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ3_M.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ3_M.gguf) | IQ3_M | 3.78GB | Medium-low quality, new method with decent performance comparable to Q3_K_M. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_S.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q3_K_S.gguf) | Q3_K_S | 3.66GB | Low quality, not recommended. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ3_XS.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ3_XS.gguf) | IQ3_XS | 3.51GB | Lower quality, new method with decent performance, slightly better than Q3_K_S. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ3_XXS.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ3_XXS.gguf) | IQ3_XXS | 3.27GB | Lower quality, new method with decent performance, comparable to Q3 quants. | | [Llama-3-Instruct-8B-SimPO-ExPO-Q2_K.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-Q2_K.gguf) | Q2_K | 3.17GB | Very low quality but surprisingly usable. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ2_M.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ2_M.gguf) | IQ2_M | 2.94GB | Very low quality, uses SOTA techniques to also be surprisingly usable. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ2_S.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ2_S.gguf) | IQ2_S | 2.75GB | Very low quality, uses SOTA techniques to be usable. | | [Llama-3-Instruct-8B-SimPO-ExPO-IQ2_XS.gguf](https://huggingface.co/bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF/blob/main/Llama-3-Instruct-8B-SimPO-ExPO-IQ2_XS.gguf) | IQ2_XS | 2.60GB | Very low quality, uses SOTA techniques to be usable. | ## Downloading using huggingface-cli First, make sure you have hugginface-cli installed: ``` pip install -U "huggingface_hub[cli]" ``` Then, you can target the specific file you want: ``` huggingface-cli download bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF --include "Llama-3-Instruct-8B-SimPO-ExPO-Q4_K_M.gguf" --local-dir ./ ``` If the model is bigger than 50GB, it will have been split into multiple files. In order to download them all to a local folder, run: ``` huggingface-cli download bartowski/Llama-3-Instruct-8B-SimPO-ExPO-GGUF --include "Llama-3-Instruct-8B-SimPO-ExPO-Q8_0.gguf/*" --local-dir Llama-3-Instruct-8B-SimPO-ExPO-Q8_0 ``` You can either specify a new local-dir (Llama-3-Instruct-8B-SimPO-ExPO-Q8_0) or download them all in place (./) ## Which file should I choose? A great write up with charts showing various performances is provided by Artefact2 [here](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9) The first thing to figure out is how big a model you can run. To do this, you'll need to figure out how much RAM and/or VRAM you have. If you want your model running as FAST as possible, you'll want to fit the whole thing on your GPU's VRAM. Aim for a quant with a file size 1-2GB smaller than your GPU's total VRAM. If you want the absolute maximum quality, add both your system RAM and your GPU's VRAM together, then similarly grab a quant with a file size 1-2GB Smaller than that total. Next, you'll need to decide if you want to use an 'I-quant' or a 'K-quant'. If you don't want to think too much, grab one of the K-quants. These are in format 'QX_K_X', like Q5_K_M. If you want to get more into the weeds, you can check out this extremely useful feature chart: [llama.cpp feature matrix](https://github.com/ggerganov/llama.cpp/wiki/Feature-matrix) But basically, if you're aiming for below Q4, and you're running cuBLAS (Nvidia) or rocBLAS (AMD), you should look towards the I-quants. These are in format IQX_X, like IQ3_M. These are newer and offer better performance for their size. These I-quants can also be used on CPU and Apple Metal, but will be slower than their K-quant equivalent, so speed vs performance is a tradeoff you'll have to decide. The I-quants are *not* compatible with Vulcan, which is also AMD, so if you have an AMD card double check if you're using the rocBLAS build or the Vulcan build. At the time of writing this, LM Studio has a preview with ROCm support, and other inference engines have specific builds for ROCm. Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski
Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2
Zoyd
2024-06-04T03:19:34Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "6-bit", "exl2", "region:us" ]
text-generation
2024-06-03T23:58:04Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **6.0 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
k707peepee/llama-3-8b-bnb-4bit
k707peepee
2024-06-04T03:15:55Z
4
0
transformers
[ "transformers", "gguf", "llama", "text-generation-inference", "unsloth", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "base_model:quantized:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-06-04T03:07:55Z
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - gguf base_model: unsloth/llama-3-8b-bnb-4bit --- # Uploaded model - **Developed by:** k707peepee - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
MubarakB/VRJfqhGhBufxd2DZbN38
MubarakB
2024-06-04T03:10:29Z
0
0
peft
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:NousResearch/Llama-2-7b-chat-hf", "base_model:adapter:NousResearch/Llama-2-7b-chat-hf", "region:us" ]
null
2024-06-04T03:10:25Z
--- library_name: peft base_model: NousResearch/Llama-2-7b-chat-hf --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.11.1
ALI-B/phi3-mini
ALI-B
2024-06-04T03:10:28Z
77
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "unsloth", "trl", "sft", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T03:07:14Z
--- library_name: transformers tags: - unsloth - trl - sft --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
HuggingFaceFW/ablation-exp-dedup-global_minhash-350BT
HuggingFaceFW
2024-06-04T03:10:19Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-03T23:35:11Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2
Zoyd
2024-06-04T03:08:17Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "exl2", "region:us" ]
text-generation
2024-06-03T21:15:12Z
--- license: llama3 --- **Exllamav2** quant (**exl2** / **4.25 bpw**) made with ExLlamaV2 v0.1.3 Other EXL2 quants: | **Quant** | **Model Size** | **lm_head** | | ----- | ---------- | ------- | |<center>**[2.2](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_2bpw_exl2)**</center> | <center>18625 MB</center> | <center>6</center> | |<center>**[2.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-2_5bpw_exl2)**</center> | <center>20645 MB</center> | <center>6</center> | |<center>**[3.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_0bpw_exl2)**</center> | <center>24211 MB</center> | <center>6</center> | |<center>**[3.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_5bpw_exl2)**</center> | <center>27784 MB</center> | <center>6</center> | |<center>**[3.75](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-3_75bpw_exl2)**</center> | <center>29572 MB</center> | <center>6</center> | |<center>**[4.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_0bpw_exl2)**</center> | <center>31359 MB</center> | <center>6</center> | |<center>**[4.25](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-4_25bpw_exl2)**</center> | <center>33139 MB</center> | <center>6</center> | |<center>**[5.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-5_0bpw_exl2)**</center> | <center>38500 MB</center> | <center>6</center> | |<center>**[6.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_0bpw_exl2)**</center> | <center>45805 MB</center> | <center>8</center> | |<center>**[6.5](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-6_5bpw_exl2)**</center> | <center>49410 MB</center> | <center>8</center> | |<center>**[8.0](https://huggingface.co/Zoyd/nyunai_nyun-llama3-62B-8_0bpw_exl2)**</center> | <center>54655 MB</center> | <center>8</center> |
Abhinay45/outputs
Abhinay45
2024-06-04T03:08:04Z
0
0
peft
[ "peft", "tensorboard", "safetensors", "trl", "sft", "unsloth", "generated_from_trainer", "dataset:yahma/alpaca-cleaned", "base_model:unsloth/llama-3-8b-bnb-4bit", "base_model:adapter:unsloth/llama-3-8b-bnb-4bit", "license:llama2", "region:us" ]
null
2024-06-04T03:05:36Z
--- license: llama2 library_name: peft tags: - trl - sft - unsloth - generated_from_trainer base_model: unsloth/llama-3-8b-bnb-4bit datasets: - yahma/alpaca-cleaned model-index: - name: Alpaca + Llama-3 8b Unsloth 2x faster finetuning. results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Alpaca + Llama-3 8b Unsloth 2x faster finetuning. This model is a fine-tuned version of [unsloth/llama-3-8b-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-bnb-4bit) on the alpaca dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 8 - seed: 3407 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5 - training_steps: 60 - mixed_precision_training: Native AMP ### Training results ### Framework versions - PEFT 0.11.1 - Transformers 4.41.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf
RichardErkhov
2024-06-04T02:59:56Z
69
0
null
[ "gguf", "arxiv:2405.04324", "region:us" ]
null
2024-06-04T01:18:38Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) granite-20b-code-base - GGUF - Model creator: https://huggingface.co/ibm-granite/ - Original model: https://huggingface.co/ibm-granite/granite-20b-code-base/ | Name | Quant method | Size | | ---- | ---- | ---- | | [granite-20b-code-base.Q2_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q2_K.gguf) | Q2_K | 7.38GB | | [granite-20b-code-base.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.IQ3_XS.gguf) | IQ3_XS | 8.06GB | | [granite-20b-code-base.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.IQ3_S.gguf) | IQ3_S | 1.45GB | | [granite-20b-code-base.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q3_K_S.gguf) | Q3_K_S | 8.32GB | | [granite-20b-code-base.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.IQ3_M.gguf) | IQ3_M | 2.21GB | | [granite-20b-code-base.Q3_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q3_K.gguf) | Q3_K | 1.42GB | | [granite-20b-code-base.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q3_K_M.gguf) | Q3_K_M | 0.87GB | | [granite-20b-code-base.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q3_K_L.gguf) | Q3_K_L | 0.51GB | | [granite-20b-code-base.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.IQ4_XS.gguf) | IQ4_XS | 0.11GB | | [granite-20b-code-base.Q4_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q4_0.gguf) | Q4_0 | 0.1GB | | [granite-20b-code-base.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.IQ4_NL.gguf) | IQ4_NL | 0.02GB | | [granite-20b-code-base.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q4_K_S.gguf) | Q4_K_S | 0.01GB | | [granite-20b-code-base.Q4_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q4_K.gguf) | Q4_K | 0.01GB | | [granite-20b-code-base.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q4_K_M.gguf) | Q4_K_M | 0.0GB | | [granite-20b-code-base.Q4_1.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q4_1.gguf) | Q4_1 | 0.0GB | | [granite-20b-code-base.Q5_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q5_0.gguf) | Q5_0 | 0.0GB | | [granite-20b-code-base.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q5_K_S.gguf) | Q5_K_S | 0.0GB | | [granite-20b-code-base.Q5_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q5_K.gguf) | Q5_K | 0.0GB | | [granite-20b-code-base.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q5_K_M.gguf) | Q5_K_M | 0.0GB | | [granite-20b-code-base.Q5_1.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q5_1.gguf) | Q5_1 | 0.0GB | | [granite-20b-code-base.Q6_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q6_K.gguf) | Q6_K | 0.0GB | | [granite-20b-code-base.Q8_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-base-gguf/blob/main/granite-20b-code-base.Q8_0.gguf) | Q8_0 | 0.0GB | Original model description: --- pipeline_tag: text-generation inference: true license: apache-2.0 datasets: - codeparrot/github-code-clean - bigcode/starcoderdata # - Stackexchange # - CommonCrawl - open-web-math/open-web-math - math-ai/StackMathQA # - Arxiv # - Wikipedia # - conceptofmind/FLAN_2022 # Original link is broken, we used IBM's filtered version | Phase 2 metrics: - code_eval library_name: transformers tags: - code - granite model-index: - name: granite-20b-code-base results: - task: type: text-generation dataset: type: mbpp name: MBPP metrics: - name: pass@1 type: pass@1 value: 43.8 veriefied: false - task: type: text-generation dataset: type: evalplus/mbppplus name: MBPP+ metrics: - name: pass@1 type: pass@1 value: 51.6 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Python) metrics: - name: pass@1 type: pass@1 value: 48.2 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(JavaScript) metrics: - name: pass@1 type: pass@1 value: 50.0 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Java) metrics: - name: pass@1 type: pass@1 value: 59.1 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Go) metrics: - name: pass@1 type: pass@1 value: 32.3 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(C++) metrics: - name: pass@1 type: pass@1 value: 40.9 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Rust) metrics: - name: pass@1 type: pass@1 value: 35.4 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Python) metrics: - name: pass@1 type: pass@1 value: 17.1 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(JavaScript) metrics: - name: pass@1 type: pass@1 value: 18.3 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Java) metrics: - name: pass@1 type: pass@1 value: 23.2 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Go) metrics: - name: pass@1 type: pass@1 value: 10.4 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(C++) metrics: - name: pass@1 type: pass@1 value: 25.6 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Rust) metrics: - name: pass@1 type: pass@1 value: 18.3 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Python) metrics: - name: pass@1 type: pass@1 value: 23.2 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(JavaScript) metrics: - name: pass@1 type: pass@1 value: 23.8 veriefied: false # Check - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Java) metrics: - name: pass@1 type: pass@1 value: 14.6 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Go) metrics: - name: pass@1 type: pass@1 value: 26.2 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(C++) metrics: - name: pass@1 type: pass@1 value: 15.2 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Rust) metrics: - name: pass@1 type: pass@1 value: 3.0 veriefied: false --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png) # Granite-20B-Code-Base ## Model Summary **Granite-20B-Code-Base** is a decoder-only code model designed for code generative tasks (e.g., code generation, code explanation, code fixing, etc.). It is trained from scratch with a two-phase training strategy. In phase 1, our model is trained on 3 trillion tokens sourced from 116 programming languages, ensuring a comprehensive understanding of programming languages and syntax. In phase 2, our model is trained on 500 billion tokens with a carefully designed mixture of high-quality data from code and natural language domains to improve the models’ ability to reason and follow instructions. - **Developers:** IBM Research - **GitHub Repository:** [ibm-granite/granite-code-models](https://github.com/ibm-granite/granite-code-models) - **Paper:** [Granite Code Models: A Family of Open Foundation Models for Code Intelligence](https://arxiv.org/abs/2405.04324) - **Release Date**: May 6th, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0). ## Usage ### Intended use Prominent enterprise use cases of LLMs in software engineering productivity include code generation, code explanation, code fixing, generating unit tests, generating documentation, addressing technical debt issues, vulnerability detection, code translation, and more. All Granite Code Base models, including the **20B parameter model**, are able to handle these tasks as they were trained on a large amount of code data from 116 programming languages. ### Generation This is a simple example of how to use **Granite-20B-Code-Base** model. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # or "cpu" model_path = "ibm-granite/granite-20b-code-base" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired input_text = "def generate():" # tokenize the text input_tokens = tokenizer(input_text, return_tensors="pt") # transfer tokenized inputs to the device for i in input_tokens: input_tokens[i] = input_tokens[i].to(device) # generate output tokens output = model.generate(**input_tokens) # decode output tokens into text output = tokenizer.batch_decode(output) # loop over the batch to print, in this example the batch size is 1 for i in output: print(i) ``` ## Training Data - **Data Collection and Filtering:** Pretraining code data is sourced from a combination of publicly available datasets (e.g., [GitHub Code Clean](https://huggingface.co/datasets/codeparrot/github-code-clean), [Starcoder data](https://huggingface.co/datasets/bigcode/starcoderdata)), and additional public code repositories and issues from GitHub. We filter raw data to retain a list of 116 programming languages. After language filtering, we also filter out low-quality code. - **Exact and Fuzzy Deduplication:** We adopt an aggressive deduplication strategy that includes both exact and fuzzy deduplication to remove documents having (near) identical code content. - **HAP, PII, Malware Filtering:** We apply a HAP content filter that reduces models' likelihood of generating hateful, abusive, or profane language. We also make sure to redact Personally Identifiable Information (PII) by replacing PII content (e.g., names, email addresses, keys, passwords) with corresponding tokens (e.g., ⟨NAME⟩, ⟨EMAIL⟩, ⟨KEY⟩, ⟨PASSWORD⟩). Moreover, we scan all datasets using [ClamAV](https://www.clamav.net/) to identify and remove instances of malware in the source code. - **Natural Language Datasets:** In addition to collecting code data for model training, we curate several publicly available high-quality natural language datasets to improve models' proficiency in language understanding and mathematical reasoning. Unlike the code data, we do not deduplicate these datasets. ## Infrastructure We train the Granite Code models using two of IBM's super computing clusters, namely Vela and Blue Vela, both outfitted with NVIDIA A100 and H100 GPUs respectively. These clusters provide a scalable and efficient infrastructure for training our models over thousands of GPUs. ## Ethical Considerations and Limitations The use of Large Language Models involves risks and ethical considerations people must be aware of. Regarding code generation, caution is urged against complete reliance on specific code models for crucial decisions or impactful information as the generated code is not guaranteed to work as intended. **Granite-20B-Code-Base** model is not the exception in this regard. Even though this model is suited for multiple code-related tasks, it has not undergone any safety alignment, there it may produce problematic outputs. Additionally, it remains uncertain whether smaller models might exhibit increased susceptibility to hallucination in generation scenarios by copying source code verbatim from the training dataset due to their reduced sizes and memorization capacities. This aspect is currently an active area of research, and we anticipate more rigorous exploration, comprehension, and mitigations in this domain. Regarding ethics, a latent risk associated with all Large Language Models is their malicious utilization. We urge the community to use **Granite-20B-Code-Base** model with ethical intentions and in a responsible way.
0xfaskety/Qwen-Qwen1.5-7B-1717469455
0xfaskety
2024-06-04T02:57:47Z
10
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T02:50:57Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
GuiTap/xlm-roberta-base-finetuned-ner-lenerBr
GuiTap
2024-06-04T02:57:09Z
3
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "token-classification", "generated_from_trainer", "dataset:lener_br", "base_model:FacebookAI/xlm-roberta-base", "base_model:finetune:FacebookAI/xlm-roberta-base", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2024-06-03T00:48:45Z
--- license: mit base_model: FacebookAI/xlm-roberta-base tags: - generated_from_trainer datasets: - lener_br metrics: - precision - recall - f1 - accuracy model-index: - name: xlm-roberta-base-finetuned-ner-lenerBr results: - task: name: Token Classification type: token-classification dataset: name: lener_br type: lener_br config: lener_br split: validation args: lener_br metrics: - name: Precision type: precision value: 0.7397260273972602 - name: Recall type: recall value: 0.9211682605324373 - name: F1 type: f1 value: 0.8205364337515828 - name: Accuracy type: accuracy value: 0.970340819101409 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-ner-lenerBr This model is a fine-tuned version of [FacebookAI/xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base) on the lener_br dataset. It achieves the following results on the evaluation set: - Loss: 0.1294 - Precision: 0.7397 - Recall: 0.9212 - F1: 0.8205 - Accuracy: 0.9703 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 245 | 0.1569 | 0.7358 | 0.7788 | 0.7567 | 0.9534 | | No log | 2.0 | 490 | 0.1310 | 0.6909 | 0.8927 | 0.7790 | 0.9632 | | 0.1674 | 3.0 | 735 | 0.1148 | 0.7174 | 0.9119 | 0.8030 | 0.9677 | | 0.1674 | 4.0 | 980 | 0.1550 | 0.7209 | 0.8979 | 0.7997 | 0.9658 | | 0.0276 | 5.0 | 1225 | 0.1441 | 0.7183 | 0.9173 | 0.8057 | 0.9682 | | 0.0276 | 6.0 | 1470 | 0.1482 | 0.7326 | 0.8752 | 0.7976 | 0.9665 | | 0.0154 | 7.0 | 1715 | 0.1209 | 0.7418 | 0.9284 | 0.8247 | 0.9710 | | 0.0154 | 8.0 | 1960 | 0.1266 | 0.7375 | 0.9243 | 0.8204 | 0.9708 | | 0.0096 | 9.0 | 2205 | 0.1394 | 0.7356 | 0.9147 | 0.8154 | 0.9690 | | 0.0096 | 10.0 | 2450 | 0.1294 | 0.7397 | 0.9212 | 0.8205 | 0.9703 | ### Framework versions - Transformers 4.41.1 - Pytorch 2.1.2 - Datasets 2.19.1 - Tokenizers 0.19.1
chainup244/Qwen-Qwen1.5-1.8B-1717469101
chainup244
2024-06-04T02:47:57Z
141
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T02:45:03Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
martinsinnona/visdecode_vega_2
martinsinnona
2024-06-04T02:42:03Z
51
0
transformers
[ "transformers", "safetensors", "pix2struct", "image-text-to-text", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
image-text-to-text
2024-06-04T02:01:16Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Chanakan5591/llama-3-typhoon-v1.5-8b-nf4
Chanakan5591
2024-06-04T02:40:51Z
79
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
2024-06-04T02:36:06Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
ehottl/distilbert-base-uncased-finetuned-clinc
ehottl
2024-06-04T02:36:06Z
113
0
transformers
[ "transformers", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-06-04T02:24:15Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8020 - Accuracy: 0.9158 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.3069 | 1.0 | 318 | 3.3020 | 0.7177 | | 2.6569 | 2.0 | 636 | 1.9007 | 0.8468 | | 1.5836 | 3.0 | 954 | 1.1867 | 0.8881 | | 1.0474 | 4.0 | 1272 | 0.8876 | 0.9116 | | 0.8287 | 5.0 | 1590 | 0.8020 | 0.9158 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2.post303 - Datasets 2.19.1 - Tokenizers 0.15.2
RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf
RichardErkhov
2024-06-04T02:30:40Z
157
0
null
[ "gguf", "arxiv:2405.04324", "region:us" ]
null
2024-06-04T01:24:21Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) granite-20b-code-instruct - GGUF - Model creator: https://huggingface.co/ibm-granite/ - Original model: https://huggingface.co/ibm-granite/granite-20b-code-instruct/ | Name | Quant method | Size | | ---- | ---- | ---- | | [granite-20b-code-instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q2_K.gguf) | Q2_K | 7.38GB | | [granite-20b-code-instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.IQ3_XS.gguf) | IQ3_XS | 8.06GB | | [granite-20b-code-instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.IQ3_S.gguf) | IQ3_S | 0.79GB | | [granite-20b-code-instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q3_K_S.gguf) | Q3_K_S | 0.56GB | | [granite-20b-code-instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.IQ3_M.gguf) | IQ3_M | 0.06GB | | [granite-20b-code-instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q3_K.gguf) | Q3_K | 0.04GB | | [granite-20b-code-instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q3_K_M.gguf) | Q3_K_M | 0.0GB | | [granite-20b-code-instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q3_K_L.gguf) | Q3_K_L | 0.0GB | | [granite-20b-code-instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.IQ4_XS.gguf) | IQ4_XS | 0.0GB | | [granite-20b-code-instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q4_0.gguf) | Q4_0 | 0.0GB | | [granite-20b-code-instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.IQ4_NL.gguf) | IQ4_NL | 0.0GB | | [granite-20b-code-instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q4_K_S.gguf) | Q4_K_S | 0.0GB | | [granite-20b-code-instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q4_K.gguf) | Q4_K | 0.0GB | | [granite-20b-code-instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q4_K_M.gguf) | Q4_K_M | 0.0GB | | [granite-20b-code-instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q4_1.gguf) | Q4_1 | 0.0GB | | [granite-20b-code-instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q5_0.gguf) | Q5_0 | 0.0GB | | [granite-20b-code-instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q5_K_S.gguf) | Q5_K_S | 0.0GB | | [granite-20b-code-instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q5_K.gguf) | Q5_K | 0.0GB | | [granite-20b-code-instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q5_K_M.gguf) | Q5_K_M | 0.0GB | | [granite-20b-code-instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q5_1.gguf) | Q5_1 | 0.0GB | | [granite-20b-code-instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q6_K.gguf) | Q6_K | 0.0GB | | [granite-20b-code-instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/ibm-granite_-_granite-20b-code-instruct-gguf/blob/main/granite-20b-code-instruct.Q8_0.gguf) | Q8_0 | 0.0GB | Original model description: --- pipeline_tag: text-generation base_model: ibm-granite/granite-20b-code-base inference: true license: apache-2.0 datasets: - bigcode/commitpackft - TIGER-Lab/MathInstruct - meta-math/MetaMathQA - glaiveai/glaive-code-assistant-v3 - glaive-function-calling-v2 - bugdaryan/sql-create-context-instruction - garage-bAInd/Open-Platypus - nvidia/HelpSteer metrics: - code_eval library_name: transformers tags: - code - granite model-index: - name: granite-20b-code-instruct results: - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Python) metrics: - name: pass@1 type: pass@1 value: 60.4 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(JavaScript) metrics: - name: pass@1 type: pass@1 value: 53.7 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Java) metrics: - name: pass@1 type: pass@1 value: 58.5 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Go) metrics: - name: pass@1 type: pass@1 value: 42.1 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(C++) metrics: - name: pass@1 type: pass@1 value: 45.7 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalSynthesis(Rust) metrics: - name: pass@1 type: pass@1 value: 42.7 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Python) metrics: - name: pass@1 type: pass@1 value: 44.5 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(JavaScript) metrics: - name: pass@1 type: pass@1 value: 42.7 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Java) metrics: - name: pass@1 type: pass@1 value: 49.4 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Go) metrics: - name: pass@1 type: pass@1 value: 32.3 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(C++) metrics: - name: pass@1 type: pass@1 value: 42.1 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalExplain(Rust) metrics: - name: pass@1 type: pass@1 value: 18.3 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Python) metrics: - name: pass@1 type: pass@1 value: 43.9 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(JavaScript) metrics: - name: pass@1 type: pass@1 value: 43.9 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Java) metrics: - name: pass@1 type: pass@1 value: 45.7 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Go) metrics: - name: pass@1 type: pass@1 value: 41.5 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(C++) metrics: - name: pass@1 type: pass@1 value: 41.5 veriefied: false - task: type: text-generation dataset: type: bigcode/humanevalpack name: HumanEvalFix(Rust) metrics: - name: pass@1 type: pass@1 value: 29.9 veriefied: false --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png) # Granite-20B-Code-Instruct ## Model Summary **Granite-20B-Code-Instruct** is a 20B parameter model fine tuned from *Granite-20B-Code-Base* on a combination of **permissively licensed** instruction data to enhance instruction following capabilities including logical reasoning and problem-solving skills. - **Developers:** IBM Research - **GitHub Repository:** [ibm-granite/granite-code-models](https://github.com/ibm-granite/granite-code-models) - **Paper:** [Granite Code Models: A Family of Open Foundation Models for Code Intelligence](https://arxiv.org/abs/2405.04324) - **Release Date**: May 6th, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0). ## Usage ### Intended use The model is designed to respond to coding related instructions and can be used to build coding assitants. <!-- TO DO: Check starcoder2 instruct code example that includes the template https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1 --> ### Generation This is a simple example of how to use **Granite-20B-Code-Instruct** model. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # or "cpu" model_path = "ibm-granite/granite-20b-code-instruct" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired chat = [ { "role": "user", "content": "Write a code to find the maximum value in a list of numbers." }, ] chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) # tokenize the text input_tokens = tokenizer(chat, return_tensors="pt") # transfer tokenized inputs to the device for i in input_tokens: input_tokens[i] = input_tokens[i].to(device) # generate output tokens output = model.generate(**input_tokens, max_new_tokens=100) # decode output tokens into text output = tokenizer.batch_decode(output) # loop over the batch to print, in this example the batch size is 1 for i in output: print(i) ``` <!-- TO DO: Check this part --> ## Training Data Granite Code Instruct models are trained on the following types of data. * Code Commits Datasets: we sourced code commits data from the [CommitPackFT](https://huggingface.co/datasets/bigcode/commitpackft) dataset, a filtered version of the full CommitPack dataset. From CommitPackFT dataset, we only consider data for 92 programming languages. Our inclusion criteria boils down to selecting programming languages common across CommitPackFT and the 116 languages that we considered to pretrain the code-base model (*Granite-20B-Code-Base*). * Math Datasets: We consider two high-quality math datasets, [MathInstruct](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) and [MetaMathQA](https://huggingface.co/datasets/meta-math/MetaMathQA). Due to license issues, we filtered out GSM8K-RFT and Camel-Math from MathInstruct dataset. * Code Instruction Datasets: We use [Glaive-Code-Assistant-v3](https://huggingface.co/datasets/glaiveai/glaive-code-assistant-v3), [Glaive-Function-Calling-v2](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2), [NL2SQL11](https://huggingface.co/datasets/bugdaryan/sql-create-context-instruction) and a small collection of synthetic API calling datasets. * Language Instruction Datasets: We include high-quality datasets such as [HelpSteer](https://huggingface.co/datasets/nvidia/HelpSteer) and an open license-filtered version of [Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus). We also include a collection of hardcoded prompts to ensure our model generates correct outputs given inquiries about its name or developers. ## Infrastructure We train the Granite Code models using two of IBM's super computing clusters, namely Vela and Blue Vela, both outfitted with NVIDIA A100 and H100 GPUs respectively. These clusters provide a scalable and efficient infrastructure for training our models over thousands of GPUs. ## Ethical Considerations and Limitations Granite code instruct models are primarily finetuned using instruction-response pairs across a specific set of programming languages. Thus, their performance may be limited with out-of-domain programming languages. In this situation, it is beneficial providing few-shot examples to steer the model's output. Moreover, developers should perform safety testing and target-specific tuning before deploying these models on critical applications. The model also inherits ethical considerations and limitations from its base model. For more information, please refer to *[Granite-20B-Code-Base](https://huggingface.co/ibm-granite/granite-20b-code-base)* model card.
apwic/nerui-lora-r16-3
apwic
2024-06-04T02:30:22Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T14:17:21Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r16-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r16-3 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0458 - Location Precision: 0.9022 - Location Recall: 0.9651 - Location F1: 0.9326 - Location Number: 86 - Organization Precision: 0.9314 - Organization Recall: 0.9157 - Organization F1: 0.9235 - Organization Number: 178 - Person Precision: 0.9843 - Person Recall: 0.9766 - Person F1: 0.9804 - Person Number: 128 - Overall Precision: 0.9416 - Overall Recall: 0.9464 - Overall F1: 0.9440 - Overall Accuracy: 0.9884 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.0611 | 1.0 | 96 | 0.6536 | 0.0 | 0.0 | 0.0 | 86 | 0.0 | 0.0 | 0.0 | 178 | 0.0 | 0.0 | 0.0 | 128 | 0.0 | 0.0 | 0.0 | 0.8435 | | 0.6324 | 2.0 | 192 | 0.5023 | 0.0 | 0.0 | 0.0 | 86 | 0.5556 | 0.0281 | 0.0535 | 178 | 0.0 | 0.0 | 0.0 | 128 | 0.4167 | 0.0128 | 0.0248 | 0.8448 | | 0.4878 | 3.0 | 288 | 0.3482 | 0.25 | 0.0233 | 0.0426 | 86 | 0.3936 | 0.2079 | 0.2721 | 178 | 0.3543 | 0.3516 | 0.3529 | 128 | 0.3668 | 0.2143 | 0.2705 | 0.8799 | | 0.341 | 4.0 | 384 | 0.2386 | 0.5185 | 0.3256 | 0.4 | 86 | 0.5308 | 0.6292 | 0.5758 | 178 | 0.5767 | 0.7344 | 0.6460 | 128 | 0.5467 | 0.5969 | 0.5707 | 0.9296 | | 0.2391 | 5.0 | 480 | 0.1745 | 0.7179 | 0.6512 | 0.6829 | 86 | 0.6603 | 0.7753 | 0.7132 | 178 | 0.8151 | 0.9297 | 0.8686 | 128 | 0.7229 | 0.7985 | 0.7588 | 0.9547 | | 0.1867 | 6.0 | 576 | 0.1380 | 0.7396 | 0.8256 | 0.7802 | 86 | 0.7385 | 0.8090 | 0.7721 | 178 | 0.9118 | 0.9688 | 0.9394 | 128 | 0.7939 | 0.8648 | 0.8278 | 0.9655 | | 0.1578 | 7.0 | 672 | 0.1150 | 0.75 | 0.8372 | 0.7912 | 86 | 0.7755 | 0.8539 | 0.8128 | 178 | 0.9058 | 0.9766 | 0.9398 | 128 | 0.8116 | 0.8903 | 0.8491 | 0.9690 | | 0.1374 | 8.0 | 768 | 0.0980 | 0.7766 | 0.8488 | 0.8111 | 86 | 0.8105 | 0.8652 | 0.8370 | 178 | 0.9191 | 0.9766 | 0.9470 | 128 | 0.8381 | 0.8980 | 0.8670 | 0.9730 | | 0.1267 | 9.0 | 864 | 0.0882 | 0.77 | 0.8953 | 0.8280 | 86 | 0.8511 | 0.8989 | 0.8743 | 178 | 0.9328 | 0.9766 | 0.9542 | 128 | 0.8578 | 0.9235 | 0.8894 | 0.9749 | | 0.115 | 10.0 | 960 | 0.0822 | 0.8061 | 0.9186 | 0.8587 | 86 | 0.8474 | 0.9045 | 0.8750 | 178 | 0.9328 | 0.9766 | 0.9542 | 128 | 0.8649 | 0.9311 | 0.8968 | 0.9765 | | 0.1082 | 11.0 | 1056 | 0.0755 | 0.7835 | 0.8837 | 0.8306 | 86 | 0.8495 | 0.8876 | 0.8681 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8647 | 0.9133 | 0.8883 | 0.9768 | | 0.1032 | 12.0 | 1152 | 0.0724 | 0.8495 | 0.9186 | 0.8827 | 86 | 0.8579 | 0.9157 | 0.8859 | 178 | 0.9323 | 0.9688 | 0.9502 | 128 | 0.8798 | 0.9337 | 0.9059 | 0.9781 | | 0.0944 | 13.0 | 1248 | 0.0646 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.875 | 0.9045 | 0.8895 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8988 | 0.9286 | 0.9134 | 0.9800 | | 0.0923 | 14.0 | 1344 | 0.0638 | 0.8242 | 0.8721 | 0.8475 | 86 | 0.8743 | 0.8989 | 0.8864 | 178 | 0.9538 | 0.9688 | 0.9612 | 128 | 0.8886 | 0.9158 | 0.9020 | 0.9798 | | 0.0918 | 15.0 | 1440 | 0.0623 | 0.8571 | 0.9070 | 0.8814 | 86 | 0.8859 | 0.9157 | 0.9006 | 178 | 0.9615 | 0.9766 | 0.9690 | 128 | 0.9037 | 0.9337 | 0.9184 | 0.9806 | | 0.0848 | 16.0 | 1536 | 0.0615 | 0.8298 | 0.9070 | 0.8667 | 86 | 0.8696 | 0.8989 | 0.8840 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.8941 | 0.9260 | 0.9098 | 0.9798 | | 0.0818 | 17.0 | 1632 | 0.0594 | 0.8495 | 0.9186 | 0.8827 | 86 | 0.8840 | 0.8989 | 0.8914 | 178 | 0.9690 | 0.9766 | 0.9728 | 128 | 0.9032 | 0.9286 | 0.9157 | 0.9814 | | 0.0797 | 18.0 | 1728 | 0.0577 | 0.8764 | 0.9070 | 0.8914 | 86 | 0.8840 | 0.8989 | 0.8914 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9121 | 0.9260 | 0.9190 | 0.9814 | | 0.0745 | 19.0 | 1824 | 0.0573 | 0.8667 | 0.9070 | 0.8864 | 86 | 0.8852 | 0.9101 | 0.8975 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9102 | 0.9311 | 0.9206 | 0.9814 | | 0.0747 | 20.0 | 1920 | 0.0554 | 0.8478 | 0.9070 | 0.8764 | 86 | 0.8907 | 0.9157 | 0.9030 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9082 | 0.9337 | 0.9208 | 0.9816 | | 0.0702 | 21.0 | 2016 | 0.0560 | 0.8387 | 0.9070 | 0.8715 | 86 | 0.8876 | 0.8876 | 0.8876 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9048 | 0.9209 | 0.9128 | 0.9811 | | 0.0701 | 22.0 | 2112 | 0.0550 | 0.8316 | 0.9186 | 0.8729 | 86 | 0.8871 | 0.9270 | 0.9066 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9022 | 0.9413 | 0.9213 | 0.9822 | | 0.0663 | 23.0 | 2208 | 0.0520 | 0.8478 | 0.9070 | 0.8764 | 86 | 0.9045 | 0.9045 | 0.9045 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9146 | 0.9286 | 0.9215 | 0.9833 | | 0.0666 | 24.0 | 2304 | 0.0543 | 0.8211 | 0.9070 | 0.8619 | 86 | 0.8870 | 0.8820 | 0.8845 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9 | 0.9184 | 0.9091 | 0.9806 | | 0.0635 | 25.0 | 2400 | 0.0524 | 0.8316 | 0.9186 | 0.8729 | 86 | 0.8927 | 0.8876 | 0.8901 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.905 | 0.9235 | 0.9141 | 0.9822 | | 0.0632 | 26.0 | 2496 | 0.0519 | 0.8421 | 0.9302 | 0.8840 | 86 | 0.8927 | 0.8876 | 0.8901 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9075 | 0.9260 | 0.9167 | 0.9825 | | 0.0596 | 27.0 | 2592 | 0.0489 | 0.8495 | 0.9186 | 0.8827 | 86 | 0.8840 | 0.8989 | 0.8914 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9055 | 0.9286 | 0.9169 | 0.9830 | | 0.0608 | 28.0 | 2688 | 0.0508 | 0.8316 | 0.9186 | 0.8729 | 86 | 0.8927 | 0.8876 | 0.8901 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9073 | 0.9235 | 0.9153 | 0.9825 | | 0.0591 | 29.0 | 2784 | 0.0464 | 0.8966 | 0.9070 | 0.9017 | 86 | 0.8962 | 0.9213 | 0.9086 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9843 | | 0.0582 | 30.0 | 2880 | 0.0472 | 0.8864 | 0.9070 | 0.8966 | 86 | 0.9126 | 0.9382 | 0.9252 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9857 | | 0.0567 | 31.0 | 2976 | 0.0518 | 0.8333 | 0.9302 | 0.8791 | 86 | 0.8971 | 0.8820 | 0.8895 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9073 | 0.9235 | 0.9153 | 0.9825 | | 0.0545 | 32.0 | 3072 | 0.0493 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.8956 | 0.9157 | 0.9056 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9152 | 0.9362 | 0.9256 | 0.9841 | | 0.0526 | 33.0 | 3168 | 0.0488 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.8944 | 0.9045 | 0.8994 | 178 | 0.9688 | 0.9688 | 0.9688 | 128 | 0.9125 | 0.9311 | 0.9217 | 0.9843 | | 0.0536 | 34.0 | 3264 | 0.0481 | 0.8989 | 0.9302 | 0.9143 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9342 | 0.9413 | 0.9377 | 0.9843 | | 0.0501 | 35.0 | 3360 | 0.0482 | 0.8889 | 0.9302 | 0.9091 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9846 | | 0.0541 | 36.0 | 3456 | 0.0481 | 0.8889 | 0.9302 | 0.9091 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9343 | 0.9439 | 0.9391 | 0.9857 | | 0.0513 | 37.0 | 3552 | 0.0475 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9857 | | 0.0506 | 38.0 | 3648 | 0.0483 | 0.8602 | 0.9302 | 0.8939 | 86 | 0.9045 | 0.9045 | 0.9045 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9196 | 0.9337 | 0.9266 | 0.9846 | | 0.0483 | 39.0 | 3744 | 0.0498 | 0.8617 | 0.9419 | 0.9000 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9854 | | 0.0481 | 40.0 | 3840 | 0.0467 | 0.8876 | 0.9186 | 0.9029 | 86 | 0.8950 | 0.9101 | 0.9025 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9219 | 0.9337 | 0.9278 | 0.9846 | | 0.0463 | 41.0 | 3936 | 0.0471 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9050 | 0.9101 | 0.9076 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9846 | | 0.0461 | 42.0 | 4032 | 0.0456 | 0.8977 | 0.9186 | 0.9080 | 86 | 0.9282 | 0.9438 | 0.9359 | 178 | 0.9764 | 0.9688 | 0.9725 | 128 | 0.9369 | 0.9464 | 0.9416 | 0.9870 | | 0.0454 | 43.0 | 4128 | 0.0459 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9860 | | 0.0459 | 44.0 | 4224 | 0.0470 | 0.8804 | 0.9419 | 0.9101 | 86 | 0.9282 | 0.9438 | 0.9359 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.935 | 0.9541 | 0.9444 | 0.9873 | | 0.0465 | 45.0 | 4320 | 0.0464 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9322 | 0.9270 | 0.9296 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9343 | 0.9439 | 0.9391 | 0.9870 | | 0.0468 | 46.0 | 4416 | 0.0483 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9764 | 0.9688 | 0.9725 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9854 | | 0.0432 | 47.0 | 4512 | 0.0477 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9764 | 0.9688 | 0.9725 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9857 | | 0.0434 | 48.0 | 4608 | 0.0450 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9868 | | 0.0442 | 49.0 | 4704 | 0.0464 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9253 | 0.9045 | 0.9148 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9289 | 0.9337 | 0.9313 | 0.9849 | | 0.0421 | 50.0 | 4800 | 0.0474 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9218 | 0.9270 | 0.9244 | 178 | 0.9764 | 0.9688 | 0.9725 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9865 | | 0.0421 | 51.0 | 4896 | 0.0462 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9106 | 0.9157 | 0.9132 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9862 | | 0.0415 | 52.0 | 4992 | 0.0461 | 0.8602 | 0.9302 | 0.8939 | 86 | 0.9056 | 0.9157 | 0.9106 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.92 | 0.9388 | 0.9293 | 0.9865 | | 0.0418 | 53.0 | 5088 | 0.0455 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9050 | 0.9101 | 0.9076 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9198 | 0.9362 | 0.9279 | 0.9857 | | 0.0416 | 54.0 | 5184 | 0.0450 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9865 | | 0.0403 | 55.0 | 5280 | 0.0456 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9153 | 0.9101 | 0.9127 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9316 | 0.9388 | 0.9352 | 0.9862 | | 0.0424 | 56.0 | 5376 | 0.0458 | 0.8804 | 0.9419 | 0.9101 | 86 | 0.9195 | 0.8989 | 0.9091 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9289 | 0.9337 | 0.9313 | 0.9843 | | 0.0391 | 57.0 | 5472 | 0.0450 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9868 | | 0.039 | 58.0 | 5568 | 0.0462 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9857 | | 0.0367 | 59.0 | 5664 | 0.0457 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.8939 | 0.8989 | 0.8964 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9148 | 0.9311 | 0.9229 | 0.9860 | | 0.0396 | 60.0 | 5760 | 0.0450 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9865 | | 0.038 | 61.0 | 5856 | 0.0451 | 0.8913 | 0.9535 | 0.9213 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9343 | 0.9439 | 0.9391 | 0.9876 | | 0.0359 | 62.0 | 5952 | 0.0451 | 0.8913 | 0.9535 | 0.9213 | 86 | 0.9261 | 0.9157 | 0.9209 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9367 | 0.9439 | 0.9403 | 0.9870 | | 0.0366 | 63.0 | 6048 | 0.0456 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9370 | 0.9490 | 0.9430 | 0.9881 | | 0.0346 | 64.0 | 6144 | 0.0452 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9876 | | 0.0338 | 65.0 | 6240 | 0.0457 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9347 | 0.9490 | 0.9418 | 0.9881 | | 0.0352 | 66.0 | 6336 | 0.0455 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9244 | 0.9362 | 0.9303 | 0.9873 | | 0.0351 | 67.0 | 6432 | 0.0456 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9040 | 0.8989 | 0.9014 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9217 | 0.9311 | 0.9264 | 0.9857 | | 0.0333 | 68.0 | 6528 | 0.0462 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9116 | 0.9270 | 0.9192 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9277 | 0.9490 | 0.9382 | 0.9881 | | 0.0356 | 69.0 | 6624 | 0.0452 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9091 | 0.8989 | 0.9040 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9289 | 0.9337 | 0.9313 | 0.9862 | | 0.0336 | 70.0 | 6720 | 0.0455 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9266 | 0.9213 | 0.9239 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9879 | | 0.0331 | 71.0 | 6816 | 0.0459 | 0.8710 | 0.9419 | 0.9050 | 86 | 0.9116 | 0.9270 | 0.9192 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9252 | 0.9464 | 0.9357 | 0.9876 | | 0.0351 | 72.0 | 6912 | 0.0469 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.8989 | 0.8989 | 0.8989 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9194 | 0.9311 | 0.9252 | 0.9852 | | 0.0333 | 73.0 | 7008 | 0.0466 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9261 | 0.9157 | 0.9209 | 178 | 0.9764 | 0.9688 | 0.9725 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9876 | | 0.0345 | 74.0 | 7104 | 0.0455 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9873 | | 0.033 | 75.0 | 7200 | 0.0458 | 0.8710 | 0.9419 | 0.9050 | 86 | 0.9261 | 0.9157 | 0.9209 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9879 | | 0.0334 | 76.0 | 7296 | 0.0455 | 0.8913 | 0.9535 | 0.9213 | 86 | 0.9205 | 0.9101 | 0.9153 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9342 | 0.9413 | 0.9377 | 0.9881 | | 0.0332 | 77.0 | 7392 | 0.0442 | 0.8710 | 0.9419 | 0.9050 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9296 | 0.9439 | 0.9367 | 0.9884 | | 0.0337 | 78.0 | 7488 | 0.0470 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9881 | | 0.0334 | 79.0 | 7584 | 0.0465 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9369 | 0.9464 | 0.9416 | 0.9873 | | 0.0319 | 80.0 | 7680 | 0.0455 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9418 | 0.9490 | 0.9454 | 0.9879 | | 0.032 | 81.0 | 7776 | 0.0465 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9418 | 0.9490 | 0.9454 | 0.9876 | | 0.0328 | 82.0 | 7872 | 0.0450 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9106 | 0.9157 | 0.9132 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9884 | | 0.032 | 83.0 | 7968 | 0.0449 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9106 | 0.9157 | 0.9132 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9881 | | 0.0309 | 84.0 | 8064 | 0.0451 | 0.8817 | 0.9535 | 0.9162 | 86 | 0.9106 | 0.9157 | 0.9132 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9273 | 0.9439 | 0.9355 | 0.9879 | | 0.0315 | 85.0 | 8160 | 0.0455 | 0.8913 | 0.9535 | 0.9213 | 86 | 0.9205 | 0.9101 | 0.9153 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9342 | 0.9413 | 0.9377 | 0.9879 | | 0.0305 | 86.0 | 8256 | 0.0456 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9266 | 0.9213 | 0.9239 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9394 | 0.9490 | 0.9442 | 0.9879 | | 0.0318 | 87.0 | 8352 | 0.0457 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9369 | 0.9464 | 0.9416 | 0.9873 | | 0.0317 | 88.0 | 8448 | 0.0459 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9369 | 0.9464 | 0.9416 | 0.9873 | | 0.0319 | 89.0 | 8544 | 0.0463 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9261 | 0.9157 | 0.9209 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9392 | 0.9464 | 0.9428 | 0.9876 | | 0.0311 | 90.0 | 8640 | 0.0465 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9870 | | 0.0297 | 91.0 | 8736 | 0.0460 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9391 | 0.9439 | 0.9415 | 0.9876 | | 0.0306 | 92.0 | 8832 | 0.0462 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9391 | 0.9439 | 0.9415 | 0.9876 | | 0.0335 | 93.0 | 8928 | 0.0460 | 0.8913 | 0.9535 | 0.9213 | 86 | 0.92 | 0.9045 | 0.9122 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9340 | 0.9388 | 0.9364 | 0.9870 | | 0.0288 | 94.0 | 9024 | 0.0462 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9418 | 0.9490 | 0.9454 | 0.9881 | | 0.0296 | 95.0 | 9120 | 0.0459 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9368 | 0.9157 | 0.9261 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9440 | 0.9464 | 0.9452 | 0.9881 | | 0.0317 | 96.0 | 9216 | 0.0455 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9884 | | 0.0298 | 97.0 | 9312 | 0.0457 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9884 | | 0.0295 | 98.0 | 9408 | 0.0456 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9884 | | 0.0303 | 99.0 | 9504 | 0.0458 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9884 | | 0.0304 | 100.0 | 9600 | 0.0458 | 0.9022 | 0.9651 | 0.9326 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9416 | 0.9464 | 0.9440 | 0.9884 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
flammenai/Mahou-1.3a-mistral-7B-GGUF
flammenai
2024-06-04T02:26:13Z
0
1
transformers
[ "transformers", "gguf", "dataset:flammenai/MahouMix-v1", "base_model:flammenai/Mahou-1.3a-mistral-7B", "base_model:quantized:flammenai/Mahou-1.3a-mistral-7B", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2024-06-02T03:39:19Z
--- library_name: transformers license: apache-2.0 base_model: - flammenai/Mahou-1.3a-mistral-7B datasets: - flammenai/MahouMix-v1 --- ![image/png](https://huggingface.co/flammenai/Mahou-1.0-mistral-7B/resolve/main/mahou1.png) # Mahou-1.3a-mistral-7B Mahou is designed to provide short messages in a conversational context. It is capable of casual conversation and character roleplay. ### Chat Format This model has been trained to use ChatML format. ``` <|im_start|>system {{system}}<|im_end|> <|im_start|>{{char}} {{message}}<|im_end|> <|im_start|>{{user}} {{message}}<|im_end|> ``` ### Roleplay Format - Speech without quotes. - Actions in `*asterisks*` ``` *leans against wall cooly* so like, i just casted a super strong spell at magician academy today, not gonna lie, felt badass. ``` ### SillyTavern Settings 1. Use ChatML for the Context Template. 2. Enable Instruct Mode. 3. Use the [Mahou preset](https://huggingface.co/datasets/flammenai/Mahou-ST-ChatML-Instruct/raw/main/Mahou.json). 4. *Recommended* Additonal stopping strings: `["\n", "<|", "</"]` ### Method DPO finetuned for 6 epochs using an A100 on Google Colab. [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) - [Maxime Labonne](https://huggingface.co/mlabonne)
Ariffiq99/CRAB_COPA_KUCI_xlm_roberta_large_finetuned
Ariffiq99
2024-06-04T02:25:44Z
6
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "multiple-choice", "generated_from_trainer", "base_model:Ariffiq99/COPA_KUCI_xlm_roberta_large_finetuned", "base_model:finetune:Ariffiq99/COPA_KUCI_xlm_roberta_large_finetuned", "license:mit", "endpoints_compatible", "region:us" ]
multiple-choice
2024-06-04T00:05:24Z
--- license: mit base_model: Ariffiq99/COPA_KUCI_xlm_roberta_large_finetuned tags: - generated_from_trainer metrics: - f1 model-index: - name: CRAB_COPA_KUCI_xlm_roberta_large_finetuned results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CRAB_COPA_KUCI_xlm_roberta_large_finetuned This model is a fine-tuned version of [Ariffiq99/COPA_KUCI_xlm_roberta_large_finetuned](https://huggingface.co/Ariffiq99/COPA_KUCI_xlm_roberta_large_finetuned) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2852 - F1: 0.7250 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 1.1412 | 1.0 | 2880 | 1.4904 | 0.675 | | 1.0659 | 2.0 | 5760 | 1.7656 | 0.6986 | | 0.9118 | 3.0 | 8640 | 1.4802 | 0.7083 | | 0.8833 | 4.0 | 11520 | 0.9360 | 0.7208 | | 0.9054 | 5.0 | 14400 | 1.3935 | 0.7111 | | 0.8062 | 6.0 | 17280 | 1.1927 | 0.7194 | | 0.8188 | 7.0 | 20160 | 1.1275 | 0.7278 | | 0.7608 | 8.0 | 23040 | 1.2852 | 0.7250 | ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
TTTXXX01/LS-zephyr-7b-sft-full
TTTXXX01
2024-06-04T02:25:00Z
8
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "trl", "dpo", "generated_from_trainer", "conversational", "dataset:HuggingFaceH4/ultrafeedback_binarized", "base_model:alignment-handbook/zephyr-7b-sft-full", "base_model:finetune:alignment-handbook/zephyr-7b-sft-full", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-03T18:08:07Z
--- license: apache-2.0 base_model: alignment-handbook/zephyr-7b-sft-full tags: - alignment-handbook - trl - dpo - generated_from_trainer - trl - dpo - generated_from_trainer datasets: - HuggingFaceH4/ultrafeedback_binarized model-index: - name: LS-zephyr-7b-sft-full results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # LS-zephyr-7b-sft-full This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the HuggingFaceH4/ultrafeedback_binarized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 3 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 3 - total_train_batch_size: 9 - total_eval_batch_size: 12 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.19.1
Larbz-7/swin-tiny-patch4-window7-224-finetuned-eurosat
Larbz-7
2024-06-04T02:22:07Z
219
0
transformers
[ "transformers", "tensorboard", "safetensors", "swin", "image-classification", "generated_from_trainer", "base_model:microsoft/swin-tiny-patch4-window7-224", "base_model:finetune:microsoft/swin-tiny-patch4-window7-224", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2024-06-03T23:03:14Z
--- license: apache-2.0 base_model: microsoft/swin-tiny-patch4-window7-224 tags: - generated_from_trainer metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.1335 - Accuracy: 0.5414 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:------:|:----:|:---------------:|:--------:| | 2.3862 | 0.9994 | 788 | 2.2541 | 0.5365 | | 2.1651 | 2.0 | 1577 | 2.1688 | 0.5395 | | 2.1559 | 2.9981 | 2364 | 2.1335 | 0.5414 | ### Framework versions - Transformers 4.41.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.2 - Tokenizers 0.19.1
apwic/nerui-lora-r8-3
apwic
2024-06-04T02:20:47Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T13:59:10Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r8-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r8-3 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0484 - Location Precision: 0.9 - Location Recall: 0.9419 - Location F1: 0.9205 - Location Number: 86 - Organization Precision: 0.9364 - Organization Recall: 0.9101 - Organization F1: 0.9231 - Organization Number: 178 - Person Precision: 0.9843 - Person Recall: 0.9766 - Person F1: 0.9804 - Person Number: 128 - Overall Precision: 0.9436 - Overall Recall: 0.9388 - Overall F1: 0.9412 - Overall Accuracy: 0.9846 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.1489 | 1.0 | 96 | 0.6808 | 0.0 | 0.0 | 0.0 | 86 | 0.0 | 0.0 | 0.0 | 178 | 0.0 | 0.0 | 0.0 | 128 | 0.0 | 0.0 | 0.0 | 0.8435 | | 0.6648 | 2.0 | 192 | 0.5508 | 0.0 | 0.0 | 0.0 | 86 | 0.5 | 0.0056 | 0.0111 | 178 | 0.0 | 0.0 | 0.0 | 128 | 0.3333 | 0.0026 | 0.0051 | 0.8437 | | 0.5545 | 3.0 | 288 | 0.4324 | 0.0 | 0.0 | 0.0 | 86 | 0.3793 | 0.0618 | 0.1063 | 178 | 0.3714 | 0.1016 | 0.1595 | 128 | 0.3636 | 0.0612 | 0.1048 | 0.8543 | | 0.4347 | 4.0 | 384 | 0.3185 | 0.3077 | 0.0465 | 0.0808 | 86 | 0.3876 | 0.2809 | 0.3257 | 178 | 0.4167 | 0.5078 | 0.4577 | 128 | 0.3993 | 0.3036 | 0.3449 | 0.8910 | | 0.3178 | 5.0 | 480 | 0.2349 | 0.5714 | 0.3721 | 0.4507 | 86 | 0.5476 | 0.6461 | 0.5928 | 178 | 0.5890 | 0.75 | 0.6598 | 128 | 0.5664 | 0.6199 | 0.5920 | 0.9320 | | 0.2406 | 6.0 | 576 | 0.1835 | 0.7407 | 0.6977 | 0.7186 | 86 | 0.6716 | 0.7584 | 0.7124 | 178 | 0.7467 | 0.875 | 0.8058 | 128 | 0.7106 | 0.7832 | 0.7451 | 0.9536 | | 0.1942 | 7.0 | 672 | 0.1519 | 0.7701 | 0.7791 | 0.7746 | 86 | 0.7114 | 0.8034 | 0.7546 | 178 | 0.8786 | 0.9609 | 0.9179 | 128 | 0.7780 | 0.8495 | 0.8122 | 0.9625 | | 0.1647 | 8.0 | 768 | 0.1279 | 0.7882 | 0.7791 | 0.7836 | 86 | 0.7487 | 0.8034 | 0.7751 | 178 | 0.8986 | 0.9688 | 0.9323 | 128 | 0.8068 | 0.8520 | 0.8288 | 0.9660 | | 0.1479 | 9.0 | 864 | 0.1130 | 0.7978 | 0.8256 | 0.8114 | 86 | 0.7602 | 0.8371 | 0.7968 | 178 | 0.9118 | 0.9688 | 0.9394 | 128 | 0.8171 | 0.8776 | 0.8462 | 0.9690 | | 0.135 | 10.0 | 960 | 0.1037 | 0.7660 | 0.8372 | 0.8 | 86 | 0.7755 | 0.8539 | 0.8128 | 178 | 0.9179 | 0.9609 | 0.9389 | 128 | 0.8184 | 0.8852 | 0.8505 | 0.9682 | | 0.1317 | 11.0 | 1056 | 0.0951 | 0.7935 | 0.8488 | 0.8202 | 86 | 0.8182 | 0.8596 | 0.8384 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8537 | 0.8929 | 0.8728 | 0.9733 | | 0.1196 | 12.0 | 1152 | 0.0904 | 0.7708 | 0.8605 | 0.8132 | 86 | 0.8404 | 0.8876 | 0.8634 | 178 | 0.9328 | 0.9766 | 0.9542 | 128 | 0.8541 | 0.9107 | 0.8815 | 0.9749 | | 0.1108 | 13.0 | 1248 | 0.0824 | 0.7979 | 0.8721 | 0.8333 | 86 | 0.8466 | 0.8989 | 0.8719 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8671 | 0.9158 | 0.8908 | 0.9768 | | 0.107 | 14.0 | 1344 | 0.0797 | 0.8 | 0.8837 | 0.8398 | 86 | 0.8729 | 0.8876 | 0.8802 | 178 | 0.9394 | 0.9688 | 0.9538 | 128 | 0.8775 | 0.9133 | 0.895 | 0.9781 | | 0.1063 | 15.0 | 1440 | 0.0760 | 0.7872 | 0.8605 | 0.8222 | 86 | 0.8610 | 0.9045 | 0.8822 | 178 | 0.9394 | 0.9688 | 0.9538 | 128 | 0.8692 | 0.9158 | 0.8919 | 0.9776 | | 0.1 | 16.0 | 1536 | 0.0724 | 0.8462 | 0.8953 | 0.8701 | 86 | 0.8703 | 0.9045 | 0.8871 | 178 | 0.9538 | 0.9688 | 0.9612 | 128 | 0.8916 | 0.9235 | 0.9073 | 0.9795 | | 0.095 | 17.0 | 1632 | 0.0705 | 0.8261 | 0.8837 | 0.8539 | 86 | 0.8710 | 0.9101 | 0.8901 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8851 | 0.9235 | 0.9039 | 0.9789 | | 0.0932 | 18.0 | 1728 | 0.0698 | 0.8370 | 0.8953 | 0.8652 | 86 | 0.8944 | 0.9045 | 0.8994 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8983 | 0.9235 | 0.9107 | 0.9803 | | 0.0871 | 19.0 | 1824 | 0.0672 | 0.8387 | 0.9070 | 0.8715 | 86 | 0.8944 | 0.9045 | 0.8994 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8985 | 0.9260 | 0.9121 | 0.9800 | | 0.0883 | 20.0 | 1920 | 0.0650 | 0.8298 | 0.9070 | 0.8667 | 86 | 0.8944 | 0.9045 | 0.8994 | 178 | 0.9612 | 0.9688 | 0.9650 | 128 | 0.9007 | 0.9260 | 0.9132 | 0.9803 | | 0.0832 | 21.0 | 2016 | 0.0651 | 0.8298 | 0.9070 | 0.8667 | 86 | 0.8994 | 0.9045 | 0.9020 | 178 | 0.9612 | 0.9688 | 0.9650 | 128 | 0.9030 | 0.9260 | 0.9144 | 0.9811 | | 0.0829 | 22.0 | 2112 | 0.0645 | 0.8125 | 0.9070 | 0.8571 | 86 | 0.8663 | 0.9101 | 0.8877 | 178 | 0.9466 | 0.9688 | 0.9575 | 128 | 0.8792 | 0.9286 | 0.9032 | 0.9787 | | 0.0789 | 23.0 | 2208 | 0.0601 | 0.8211 | 0.9070 | 0.8619 | 86 | 0.8994 | 0.9045 | 0.9020 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9055 | 0.9286 | 0.9169 | 0.9819 | | 0.078 | 24.0 | 2304 | 0.0612 | 0.8211 | 0.9070 | 0.8619 | 86 | 0.8927 | 0.8876 | 0.8901 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9025 | 0.9209 | 0.9116 | 0.9806 | | 0.0756 | 25.0 | 2400 | 0.0594 | 0.8298 | 0.9070 | 0.8667 | 86 | 0.9045 | 0.9045 | 0.9045 | 178 | 0.9615 | 0.9766 | 0.9690 | 128 | 0.9055 | 0.9286 | 0.9169 | 0.9806 | | 0.0767 | 26.0 | 2496 | 0.0588 | 0.7822 | 0.9186 | 0.8449 | 86 | 0.8960 | 0.8708 | 0.8832 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.8930 | 0.9158 | 0.9043 | 0.9800 | | 0.0721 | 27.0 | 2592 | 0.0561 | 0.8125 | 0.9070 | 0.8571 | 86 | 0.8852 | 0.9101 | 0.8975 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.8968 | 0.9311 | 0.9136 | 0.9814 | | 0.0719 | 28.0 | 2688 | 0.0559 | 0.8404 | 0.9186 | 0.8778 | 86 | 0.9040 | 0.8989 | 0.9014 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9123 | 0.9286 | 0.9204 | 0.9819 | | 0.0702 | 29.0 | 2784 | 0.0543 | 0.8478 | 0.9070 | 0.8764 | 86 | 0.9016 | 0.9270 | 0.9141 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9132 | 0.9388 | 0.9258 | 0.9816 | | 0.0711 | 30.0 | 2880 | 0.0539 | 0.8667 | 0.9070 | 0.8864 | 86 | 0.9066 | 0.9270 | 0.9167 | 178 | 0.9690 | 0.9766 | 0.9728 | 128 | 0.9177 | 0.9388 | 0.9281 | 0.9819 | | 0.067 | 31.0 | 2976 | 0.0576 | 0.8061 | 0.9186 | 0.8587 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9059 | 0.9337 | 0.9196 | 0.9819 | | 0.0664 | 32.0 | 3072 | 0.0567 | 0.8211 | 0.9070 | 0.8619 | 86 | 0.9011 | 0.9213 | 0.9111 | 178 | 0.9690 | 0.9766 | 0.9728 | 128 | 0.9039 | 0.9362 | 0.9198 | 0.9814 | | 0.0642 | 33.0 | 3168 | 0.0558 | 0.8316 | 0.9186 | 0.8729 | 86 | 0.9096 | 0.9045 | 0.9070 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9125 | 0.9311 | 0.9217 | 0.9825 | | 0.0642 | 34.0 | 3264 | 0.0545 | 0.8587 | 0.9186 | 0.8876 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9835 | | 0.0624 | 35.0 | 3360 | 0.0542 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9223 | 0.9388 | 0.9305 | 0.9830 | | 0.0651 | 36.0 | 3456 | 0.0535 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9690 | 0.9766 | 0.9728 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9833 | | 0.0635 | 37.0 | 3552 | 0.0523 | 0.8864 | 0.9070 | 0.8966 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9268 | 0.9362 | 0.9315 | 0.9833 | | 0.0617 | 38.0 | 3648 | 0.0528 | 0.8587 | 0.9186 | 0.8876 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9838 | | 0.0581 | 39.0 | 3744 | 0.0548 | 0.8061 | 0.9186 | 0.8587 | 86 | 0.9091 | 0.8989 | 0.9040 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9055 | 0.9286 | 0.9169 | 0.9827 | | 0.0597 | 40.0 | 3840 | 0.0510 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9846 | | 0.0569 | 41.0 | 3936 | 0.0505 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9849 | | 0.0579 | 42.0 | 4032 | 0.0504 | 0.8778 | 0.9186 | 0.8977 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9843 | | 0.0564 | 43.0 | 4128 | 0.0506 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9106 | 0.9157 | 0.9132 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9244 | 0.9362 | 0.9303 | 0.9843 | | 0.0572 | 44.0 | 4224 | 0.0499 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9116 | 0.9270 | 0.9192 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9248 | 0.9413 | 0.9330 | 0.9849 | | 0.0563 | 45.0 | 4320 | 0.0488 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9843 | | 0.0594 | 46.0 | 4416 | 0.0507 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9167 | 0.9270 | 0.9218 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9248 | 0.9413 | 0.9330 | 0.9841 | | 0.0545 | 47.0 | 4512 | 0.0497 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9246 | 0.9388 | 0.9316 | 0.9846 | | 0.0536 | 48.0 | 4608 | 0.0487 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9246 | 0.9388 | 0.9316 | 0.9849 | | 0.0556 | 49.0 | 4704 | 0.0501 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9096 | 0.9045 | 0.9070 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9217 | 0.9311 | 0.9264 | 0.9833 | | 0.0522 | 50.0 | 4800 | 0.0506 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9854 | | 0.0527 | 51.0 | 4896 | 0.0496 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9342 | 0.9413 | 0.9377 | 0.9852 | | 0.0529 | 52.0 | 4992 | 0.0490 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9266 | 0.9213 | 0.9239 | 178 | 0.9688 | 0.9688 | 0.9688 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9852 | | 0.0522 | 53.0 | 5088 | 0.0494 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9846 | | 0.0525 | 54.0 | 5184 | 0.0482 | 0.8889 | 0.9302 | 0.9091 | 86 | 0.9270 | 0.9270 | 0.9270 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9343 | 0.9439 | 0.9391 | 0.9860 | | 0.0512 | 55.0 | 5280 | 0.0488 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9854 | | 0.053 | 56.0 | 5376 | 0.0487 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9205 | 0.9101 | 0.9153 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9291 | 0.9362 | 0.9327 | 0.9849 | | 0.0498 | 57.0 | 5472 | 0.0486 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9846 | | 0.0504 | 58.0 | 5568 | 0.0489 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9854 | | 0.0456 | 59.0 | 5664 | 0.0492 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9148 | 0.9045 | 0.9096 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9242 | 0.9337 | 0.9289 | 0.9846 | | 0.0504 | 60.0 | 5760 | 0.0475 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9153 | 0.9101 | 0.9127 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9242 | 0.9337 | 0.9289 | 0.9849 | | 0.0494 | 61.0 | 5856 | 0.0476 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9314 | 0.9157 | 0.9235 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9315 | 0.9362 | 0.9338 | 0.9852 | | 0.046 | 62.0 | 5952 | 0.0478 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9318 | 0.9213 | 0.9266 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9391 | 0.9439 | 0.9415 | 0.9860 | | 0.0463 | 63.0 | 6048 | 0.0485 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9688 | 0.9688 | 0.9688 | 128 | 0.9223 | 0.9388 | 0.9305 | 0.9849 | | 0.0452 | 64.0 | 6144 | 0.0482 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9295 | 0.9413 | 0.9354 | 0.9852 | | 0.0446 | 65.0 | 6240 | 0.0492 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9271 | 0.9413 | 0.9342 | 0.9854 | | 0.0463 | 66.0 | 6336 | 0.0495 | 0.8587 | 0.9186 | 0.8876 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9196 | 0.9337 | 0.9266 | 0.9843 | | 0.0466 | 67.0 | 6432 | 0.0491 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9244 | 0.9362 | 0.9303 | 0.9846 | | 0.0451 | 68.0 | 6528 | 0.0499 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9248 | 0.9413 | 0.9330 | 0.9852 | | 0.047 | 69.0 | 6624 | 0.0493 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9852 | | 0.0435 | 70.0 | 6720 | 0.0485 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9270 | 0.9388 | 0.9328 | 0.9849 | | 0.045 | 71.0 | 6816 | 0.0490 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9111 | 0.9213 | 0.9162 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9248 | 0.9413 | 0.9330 | 0.9852 | | 0.0458 | 72.0 | 6912 | 0.0497 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9340 | 0.9388 | 0.9364 | 0.9849 | | 0.0442 | 73.0 | 7008 | 0.0495 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9157 | 0.9157 | 0.9157 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9295 | 0.9413 | 0.9354 | 0.9854 | | 0.0442 | 74.0 | 7104 | 0.0490 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9153 | 0.9101 | 0.9127 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9852 | | 0.0437 | 75.0 | 7200 | 0.0487 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9268 | 0.9362 | 0.9315 | 0.9841 | | 0.0458 | 76.0 | 7296 | 0.0493 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9316 | 0.9388 | 0.9352 | 0.9843 | | 0.0448 | 77.0 | 7392 | 0.0487 | 0.8681 | 0.9186 | 0.8927 | 86 | 0.9153 | 0.9101 | 0.9127 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9266 | 0.9337 | 0.9301 | 0.9838 | | 0.0451 | 78.0 | 7488 | 0.0495 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9293 | 0.9388 | 0.9340 | 0.9843 | | 0.0449 | 79.0 | 7584 | 0.0498 | 0.8791 | 0.9302 | 0.9040 | 86 | 0.9213 | 0.9213 | 0.9213 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9846 | | 0.0436 | 80.0 | 7680 | 0.0493 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9205 | 0.9101 | 0.9153 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9291 | 0.9362 | 0.9327 | 0.9843 | | 0.044 | 81.0 | 7776 | 0.0494 | 0.8804 | 0.9419 | 0.9101 | 86 | 0.9209 | 0.9157 | 0.9183 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9318 | 0.9413 | 0.9365 | 0.9852 | | 0.0438 | 82.0 | 7872 | 0.0485 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9291 | 0.9362 | 0.9327 | 0.9846 | | 0.0434 | 83.0 | 7968 | 0.0482 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9162 | 0.9213 | 0.9188 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9296 | 0.9439 | 0.9367 | 0.9857 | | 0.0418 | 84.0 | 8064 | 0.0485 | 0.8696 | 0.9302 | 0.8989 | 86 | 0.9101 | 0.9101 | 0.9101 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9221 | 0.9362 | 0.9291 | 0.9846 | | 0.0424 | 85.0 | 8160 | 0.0484 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9310 | 0.9101 | 0.9205 | 178 | 0.9766 | 0.9766 | 0.9766 | 128 | 0.9364 | 0.9388 | 0.9376 | 0.9849 | | 0.042 | 86.0 | 8256 | 0.0482 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9266 | 0.9213 | 0.9239 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9367 | 0.9439 | 0.9403 | 0.9857 | | 0.0431 | 87.0 | 8352 | 0.0482 | 0.8804 | 0.9419 | 0.9101 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9340 | 0.9388 | 0.9364 | 0.9852 | | 0.0417 | 88.0 | 8448 | 0.0482 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9364 | 0.9388 | 0.9376 | 0.9849 | | 0.0421 | 89.0 | 8544 | 0.0482 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9261 | 0.9157 | 0.9209 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9365 | 0.9413 | 0.9389 | 0.9854 | | 0.0412 | 90.0 | 8640 | 0.0485 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9257 | 0.9101 | 0.9178 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9364 | 0.9388 | 0.9376 | 0.9852 | | 0.0407 | 91.0 | 8736 | 0.0484 | 0.8901 | 0.9419 | 0.9153 | 86 | 0.9310 | 0.9101 | 0.9205 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9388 | 0.9388 | 0.9388 | 0.9849 | | 0.0405 | 92.0 | 8832 | 0.0487 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0447 | 93.0 | 8928 | 0.0487 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0402 | 94.0 | 9024 | 0.0487 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9310 | 0.9101 | 0.9205 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9412 | 0.9388 | 0.9400 | 0.9849 | | 0.0406 | 95.0 | 9120 | 0.0485 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0413 | 96.0 | 9216 | 0.0485 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0404 | 97.0 | 9312 | 0.0484 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9368 | 0.9157 | 0.9261 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9437 | 0.9413 | 0.9425 | 0.9852 | | 0.0403 | 98.0 | 9408 | 0.0485 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0403 | 99.0 | 9504 | 0.0484 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | | 0.0417 | 100.0 | 9600 | 0.0484 | 0.9 | 0.9419 | 0.9205 | 86 | 0.9364 | 0.9101 | 0.9231 | 178 | 0.9843 | 0.9766 | 0.9804 | 128 | 0.9436 | 0.9388 | 0.9412 | 0.9846 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
rubenamtz0/llama-3-8b-lora-law2entity
rubenamtz0
2024-06-04T02:19:37Z
15
1
peft
[ "peft", "safetensors", "gguf", "llama", "axolotl", "generated_from_trainer", "dataset:rubenamtz0/law_entity_recognition", "base_model:meta-llama/Meta-Llama-3-8B", "base_model:adapter:meta-llama/Meta-Llama-3-8B", "license:llama3", "8-bit", "bitsandbytes", "region:us" ]
null
2024-06-02T01:21:16Z
--- license: llama3 library_name: peft tags: - axolotl - generated_from_trainer base_model: meta-llama/Meta-Llama-3-8B model-index: - name: llama-3-8b-lora-law2entity results: [] datasets: - rubenamtz0/law_entity_recognition --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml base_model: meta-llama/Meta-Llama-3-8B model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: true load_in_4bit: false strict: false datasets: - path: rubenamtz0/law_entity_recognition type: alpaca dataset_prepared_path: val_set_size: 0.1 output_dir: ./outputs/lora-law hub_model_id: rubenamtz0/llama-3-8b-lora-law2entity sequence_len: 4096 sample_packing: true pad_to_sequence_len: true adapter: lora lora_model_dir: lora_r: 32 lora_alpha: 16 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: wandb_project: entity-relationship-claim-ft wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 4 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true s2_attention: warmup_steps: 10 evals_per_epoch: 4 eval_table_size: eval_max_new_tokens: 128 saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: pad_token: <|end_of_text|> ``` </details><br> # llama-3-8b-lora-law2entity This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the rubenamtz0/law_entity_recognition dataset. It achieves the following results on the evaluation set: - Loss: 0.1490 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - num_devices: 3 - gradient_accumulation_steps: 4 - total_train_batch_size: 24 - total_eval_batch_size: 6 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.2735 | 0.05 | 1 | 0.2923 | | 0.2852 | 0.25 | 5 | 0.2742 | | 0.2007 | 0.5 | 10 | 0.2015 | | 0.1742 | 0.75 | 15 | 0.1807 | | 0.1854 | 1.0 | 20 | 0.1688 | | 0.159 | 1.1125 | 25 | 0.1630 | | 0.1444 | 1.3625 | 30 | 0.1592 | | 0.1479 | 1.6125 | 35 | 0.1565 | | 0.1505 | 1.8625 | 40 | 0.1538 | | 0.1369 | 2.1125 | 45 | 0.1518 | | 0.1348 | 2.2125 | 50 | 0.1512 | | 0.1287 | 2.4625 | 55 | 0.1510 | | 0.1359 | 2.7125 | 60 | 0.1498 | | 0.1367 | 2.9625 | 65 | 0.1491 | | 0.1218 | 3.075 | 70 | 0.1491 | | 0.1285 | 3.325 | 75 | 0.1493 | | 0.1307 | 3.575 | 80 | 0.1490 | ### Framework versions - PEFT 0.11.1 - Transformers 4.41.1 - Pytorch 2.1.2+cu118 - Datasets 2.19.1 - Tokenizers 0.19.1
amir1226/q-taxi-v3
amir1226
2024-06-04T02:16:11Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2024-06-04T02:16:09Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="amir1226/q-taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
apwic/nerui-lora-r16-2
apwic
2024-06-04T02:11:49Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T13:41:36Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r16-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r16-2 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0417 - Location Precision: 0.8713 - Location Recall: 0.9462 - Location F1: 0.9072 - Location Number: 93 - Organization Precision: 0.8909 - Organization Recall: 0.8855 - Organization F1: 0.8882 - Organization Number: 166 - Person Precision: 0.9787 - Person Recall: 0.9718 - Person F1: 0.9753 - Person Number: 142 - Overall Precision: 0.9165 - Overall Recall: 0.9302 - Overall F1: 0.9233 - Overall Accuracy: 0.9868 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.0607 | 1.0 | 96 | 0.6772 | 0.0 | 0.0 | 0.0 | 93 | 0.0 | 0.0 | 0.0 | 166 | 0.0 | 0.0 | 0.0 | 142 | 0.0 | 0.0 | 0.0 | 0.8343 | | 0.6351 | 2.0 | 192 | 0.5251 | 0.0 | 0.0 | 0.0 | 93 | 0.5 | 0.0120 | 0.0235 | 166 | 0.0 | 0.0 | 0.0 | 142 | 0.3333 | 0.0050 | 0.0098 | 0.8348 | | 0.4897 | 3.0 | 288 | 0.3649 | 0.0 | 0.0 | 0.0 | 93 | 0.3529 | 0.2169 | 0.2687 | 166 | 0.3286 | 0.3239 | 0.3262 | 142 | 0.3267 | 0.2045 | 0.2515 | 0.8763 | | 0.335 | 4.0 | 384 | 0.2323 | 0.3684 | 0.3011 | 0.3314 | 93 | 0.5099 | 0.6205 | 0.5598 | 166 | 0.5683 | 0.7324 | 0.6400 | 142 | 0.5098 | 0.5860 | 0.5452 | 0.9289 | | 0.2342 | 5.0 | 480 | 0.1642 | 0.5895 | 0.6022 | 0.5957 | 93 | 0.6396 | 0.7590 | 0.6942 | 166 | 0.8269 | 0.9085 | 0.8658 | 142 | 0.6942 | 0.7756 | 0.7326 | 0.9564 | | 0.1832 | 6.0 | 576 | 0.1316 | 0.7027 | 0.8387 | 0.7647 | 93 | 0.7432 | 0.8193 | 0.7794 | 166 | 0.9257 | 0.9648 | 0.9448 | 142 | 0.7941 | 0.8753 | 0.8327 | 0.9657 | | 0.1526 | 7.0 | 672 | 0.1085 | 0.7692 | 0.8602 | 0.8122 | 93 | 0.7433 | 0.8373 | 0.7875 | 166 | 0.9079 | 0.9718 | 0.9388 | 142 | 0.8059 | 0.8903 | 0.8460 | 0.9690 | | 0.136 | 8.0 | 768 | 0.0910 | 0.75 | 0.8710 | 0.8060 | 93 | 0.8011 | 0.8494 | 0.8246 | 166 | 0.9262 | 0.9718 | 0.9485 | 142 | 0.8314 | 0.8978 | 0.8633 | 0.9734 | | 0.1234 | 9.0 | 864 | 0.0817 | 0.7981 | 0.8925 | 0.8426 | 93 | 0.8229 | 0.8675 | 0.8446 | 166 | 0.9133 | 0.9648 | 0.9384 | 142 | 0.8485 | 0.9077 | 0.8771 | 0.9753 | | 0.1123 | 10.0 | 960 | 0.0774 | 0.7981 | 0.8925 | 0.8426 | 93 | 0.8207 | 0.9096 | 0.8629 | 166 | 0.9388 | 0.9718 | 0.9550 | 142 | 0.8552 | 0.9277 | 0.8900 | 0.9772 | | 0.1042 | 11.0 | 1056 | 0.0683 | 0.8039 | 0.8817 | 0.8410 | 93 | 0.8371 | 0.8976 | 0.8663 | 166 | 0.9448 | 0.9648 | 0.9547 | 142 | 0.8659 | 0.9177 | 0.8910 | 0.9789 | | 0.1 | 12.0 | 1152 | 0.0661 | 0.8317 | 0.9032 | 0.8660 | 93 | 0.8436 | 0.9096 | 0.8754 | 166 | 0.9514 | 0.9648 | 0.9580 | 142 | 0.8774 | 0.9277 | 0.9018 | 0.9800 | | 0.0949 | 13.0 | 1248 | 0.0622 | 0.8416 | 0.9140 | 0.8763 | 93 | 0.8571 | 0.9036 | 0.8798 | 166 | 0.9580 | 0.9648 | 0.9614 | 142 | 0.8878 | 0.9277 | 0.9073 | 0.9811 | | 0.091 | 14.0 | 1344 | 0.0597 | 0.8173 | 0.9140 | 0.8629 | 93 | 0.8788 | 0.8735 | 0.8761 | 166 | 0.9580 | 0.9648 | 0.9614 | 142 | 0.8908 | 0.9152 | 0.9028 | 0.9802 | | 0.0852 | 15.0 | 1440 | 0.0593 | 0.84 | 0.9032 | 0.8705 | 93 | 0.8306 | 0.9157 | 0.8711 | 166 | 0.9650 | 0.9718 | 0.9684 | 142 | 0.8779 | 0.9327 | 0.9045 | 0.9800 | | 0.0874 | 16.0 | 1536 | 0.0591 | 0.7838 | 0.9355 | 0.8529 | 93 | 0.8538 | 0.8795 | 0.8665 | 166 | 0.9514 | 0.9648 | 0.9580 | 142 | 0.8685 | 0.9227 | 0.8948 | 0.9797 | | 0.0817 | 17.0 | 1632 | 0.0538 | 0.8350 | 0.9247 | 0.8776 | 93 | 0.8876 | 0.9036 | 0.8955 | 166 | 0.9580 | 0.9648 | 0.9614 | 142 | 0.8988 | 0.9302 | 0.9142 | 0.9830 | | 0.0784 | 18.0 | 1728 | 0.0511 | 0.8350 | 0.9247 | 0.8776 | 93 | 0.8830 | 0.9096 | 0.8961 | 166 | 0.9580 | 0.9648 | 0.9614 | 142 | 0.8969 | 0.9327 | 0.9144 | 0.9833 | | 0.0764 | 19.0 | 1824 | 0.0523 | 0.7890 | 0.9247 | 0.8515 | 93 | 0.8841 | 0.8735 | 0.8788 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8892 | 0.9202 | 0.9044 | 0.9822 | | 0.0735 | 20.0 | 1920 | 0.0524 | 0.8018 | 0.9570 | 0.8725 | 93 | 0.8889 | 0.8675 | 0.8780 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8940 | 0.9252 | 0.9093 | 0.9819 | | 0.074 | 21.0 | 2016 | 0.0519 | 0.8 | 0.9462 | 0.8670 | 93 | 0.8788 | 0.8735 | 0.8761 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8897 | 0.9252 | 0.9071 | 0.9822 | | 0.0695 | 22.0 | 2112 | 0.0529 | 0.7857 | 0.9462 | 0.8585 | 93 | 0.8353 | 0.8554 | 0.8452 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8679 | 0.9177 | 0.8921 | 0.9805 | | 0.0673 | 23.0 | 2208 | 0.0519 | 0.8056 | 0.9355 | 0.8657 | 93 | 0.9045 | 0.8554 | 0.8793 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9017 | 0.9152 | 0.9084 | 0.9824 | | 0.0677 | 24.0 | 2304 | 0.0530 | 0.7982 | 0.9355 | 0.8614 | 93 | 0.9045 | 0.8554 | 0.8793 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8995 | 0.9152 | 0.9073 | 0.9811 | | 0.0649 | 25.0 | 2400 | 0.0501 | 0.8018 | 0.9570 | 0.8725 | 93 | 0.8994 | 0.8614 | 0.8800 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8981 | 0.9227 | 0.9102 | 0.9822 | | 0.0647 | 26.0 | 2496 | 0.0478 | 0.8365 | 0.9355 | 0.8832 | 93 | 0.9057 | 0.8675 | 0.8862 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9111 | 0.9202 | 0.9156 | 0.9838 | | 0.0579 | 27.0 | 2592 | 0.0466 | 0.8208 | 0.9355 | 0.8744 | 93 | 0.8963 | 0.8855 | 0.8909 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9029 | 0.9277 | 0.9151 | 0.9835 | | 0.0627 | 28.0 | 2688 | 0.0488 | 0.8131 | 0.9355 | 0.8700 | 93 | 0.8855 | 0.8855 | 0.8855 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8964 | 0.9277 | 0.9118 | 0.9819 | | 0.0601 | 29.0 | 2784 | 0.0487 | 0.8131 | 0.9355 | 0.8700 | 93 | 0.8882 | 0.9096 | 0.8988 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8974 | 0.9377 | 0.9171 | 0.9827 | | 0.0575 | 30.0 | 2880 | 0.0459 | 0.8286 | 0.9355 | 0.8788 | 93 | 0.8922 | 0.8976 | 0.8949 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9034 | 0.9327 | 0.9178 | 0.9833 | | 0.0569 | 31.0 | 2976 | 0.0455 | 0.8073 | 0.9462 | 0.8713 | 93 | 0.8951 | 0.8735 | 0.8841 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.8983 | 0.9252 | 0.9115 | 0.9841 | | 0.0548 | 32.0 | 3072 | 0.0445 | 0.8224 | 0.9462 | 0.88 | 93 | 0.8889 | 0.8675 | 0.8780 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9002 | 0.9227 | 0.9113 | 0.9846 | | 0.0528 | 33.0 | 3168 | 0.0471 | 0.7946 | 0.9570 | 0.8683 | 93 | 0.8944 | 0.8675 | 0.8807 | 166 | 0.9858 | 0.9789 | 0.9823 | 142 | 0.8986 | 0.9277 | 0.9129 | 0.9827 | | 0.0533 | 34.0 | 3264 | 0.0445 | 0.8073 | 0.9462 | 0.8713 | 93 | 0.8802 | 0.8855 | 0.8829 | 166 | 0.9789 | 0.9789 | 0.9789 | 142 | 0.8947 | 0.9327 | 0.9133 | 0.9833 | | 0.0503 | 35.0 | 3360 | 0.0425 | 0.8286 | 0.9355 | 0.8788 | 93 | 0.8922 | 0.8976 | 0.8949 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9034 | 0.9327 | 0.9178 | 0.9852 | | 0.0531 | 36.0 | 3456 | 0.0447 | 0.7928 | 0.9462 | 0.8627 | 93 | 0.8957 | 0.8795 | 0.8875 | 166 | 0.9648 | 0.9648 | 0.9648 | 142 | 0.8918 | 0.9252 | 0.9082 | 0.9830 | | 0.0493 | 37.0 | 3552 | 0.0442 | 0.8365 | 0.9355 | 0.8832 | 93 | 0.8970 | 0.8916 | 0.8943 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9075 | 0.9302 | 0.9187 | 0.9841 | | 0.05 | 38.0 | 3648 | 0.0423 | 0.87 | 0.9355 | 0.9016 | 93 | 0.9042 | 0.9096 | 0.9069 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9193 | 0.9377 | 0.9284 | 0.9857 | | 0.0489 | 39.0 | 3744 | 0.0416 | 0.8529 | 0.9355 | 0.8923 | 93 | 0.8994 | 0.9157 | 0.9075 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9128 | 0.9401 | 0.9263 | 0.9855 | | 0.0481 | 40.0 | 3840 | 0.0411 | 0.8544 | 0.9462 | 0.8980 | 93 | 0.9068 | 0.8795 | 0.8930 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9163 | 0.9277 | 0.9219 | 0.9852 | | 0.0462 | 41.0 | 3936 | 0.0429 | 0.8286 | 0.9355 | 0.8788 | 93 | 0.9036 | 0.9036 | 0.9036 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9102 | 0.9352 | 0.9225 | 0.9855 | | 0.0468 | 42.0 | 4032 | 0.0435 | 0.8302 | 0.9462 | 0.8844 | 93 | 0.9030 | 0.8976 | 0.9003 | 166 | 0.9858 | 0.9789 | 0.9823 | 142 | 0.9126 | 0.9377 | 0.9250 | 0.9846 | | 0.0469 | 43.0 | 4128 | 0.0423 | 0.8878 | 0.9355 | 0.9110 | 93 | 0.8976 | 0.8976 | 0.8976 | 166 | 0.9858 | 0.9789 | 0.9823 | 142 | 0.9259 | 0.9352 | 0.9305 | 0.9860 | | 0.0472 | 44.0 | 4224 | 0.0460 | 0.8148 | 0.9462 | 0.8756 | 93 | 0.8938 | 0.8614 | 0.8773 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9 | 0.9202 | 0.9100 | 0.9830 | | 0.0468 | 45.0 | 4320 | 0.0420 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9062 | 0.8735 | 0.8896 | 166 | 0.9858 | 0.9789 | 0.9823 | 142 | 0.9254 | 0.9277 | 0.9265 | 0.9852 | | 0.0453 | 46.0 | 4416 | 0.0425 | 0.8462 | 0.9462 | 0.8934 | 93 | 0.8994 | 0.8614 | 0.8800 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9111 | 0.9202 | 0.9156 | 0.9852 | | 0.0428 | 47.0 | 4512 | 0.0432 | 0.8788 | 0.9355 | 0.9062 | 93 | 0.8902 | 0.9277 | 0.9086 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9177 | 0.9451 | 0.9312 | 0.9855 | | 0.043 | 48.0 | 4608 | 0.0433 | 0.8381 | 0.9462 | 0.8889 | 93 | 0.8924 | 0.8494 | 0.8704 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9062 | 0.9152 | 0.9107 | 0.9841 | | 0.0443 | 49.0 | 4704 | 0.0437 | 0.8529 | 0.9355 | 0.8923 | 93 | 0.8929 | 0.9036 | 0.8982 | 166 | 0.9648 | 0.9648 | 0.9648 | 142 | 0.9078 | 0.9327 | 0.9200 | 0.9846 | | 0.0466 | 50.0 | 4800 | 0.0430 | 0.8627 | 0.9462 | 0.9026 | 93 | 0.8922 | 0.8976 | 0.8949 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9146 | 0.9352 | 0.9248 | 0.9860 | | 0.0419 | 51.0 | 4896 | 0.0430 | 0.8462 | 0.9462 | 0.8934 | 93 | 0.8951 | 0.8735 | 0.8841 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9115 | 0.9252 | 0.9183 | 0.9852 | | 0.0421 | 52.0 | 4992 | 0.0404 | 0.9158 | 0.9355 | 0.9255 | 93 | 0.8953 | 0.9277 | 0.9112 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9289 | 0.9451 | 0.9370 | 0.9874 | | 0.0409 | 53.0 | 5088 | 0.0431 | 0.8462 | 0.9462 | 0.8934 | 93 | 0.8982 | 0.9036 | 0.9009 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9126 | 0.9377 | 0.9250 | 0.9857 | | 0.0391 | 54.0 | 5184 | 0.0417 | 0.8969 | 0.9355 | 0.9158 | 93 | 0.9012 | 0.9337 | 0.9172 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9268 | 0.9476 | 0.9371 | 0.9868 | | 0.0383 | 55.0 | 5280 | 0.0402 | 0.8980 | 0.9462 | 0.9215 | 93 | 0.9053 | 0.9217 | 0.9134 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9289 | 0.9451 | 0.9370 | 0.9877 | | 0.0399 | 56.0 | 5376 | 0.0431 | 0.8627 | 0.9462 | 0.9026 | 93 | 0.9048 | 0.9157 | 0.9102 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9197 | 0.9426 | 0.9310 | 0.9855 | | 0.04 | 57.0 | 5472 | 0.0425 | 0.8544 | 0.9462 | 0.8980 | 93 | 0.9024 | 0.8916 | 0.8970 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9167 | 0.9327 | 0.9246 | 0.9855 | | 0.04 | 58.0 | 5568 | 0.0422 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9146 | 0.9036 | 0.9091 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9261 | 0.9377 | 0.9318 | 0.9868 | | 0.0372 | 59.0 | 5664 | 0.0425 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9036 | 0.9036 | 0.9036 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9216 | 0.9377 | 0.9295 | 0.9863 | | 0.0384 | 60.0 | 5760 | 0.0422 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9146 | 0.9036 | 0.9091 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9261 | 0.9377 | 0.9318 | 0.9866 | | 0.0379 | 61.0 | 5856 | 0.0402 | 0.8627 | 0.9462 | 0.9026 | 93 | 0.9091 | 0.9036 | 0.9063 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9216 | 0.9377 | 0.9295 | 0.9877 | | 0.0362 | 62.0 | 5952 | 0.0387 | 0.8889 | 0.9462 | 0.9167 | 93 | 0.9036 | 0.9036 | 0.9036 | 166 | 0.9648 | 0.9648 | 0.9648 | 142 | 0.9214 | 0.9352 | 0.9282 | 0.9871 | | 0.036 | 63.0 | 6048 | 0.0424 | 0.8381 | 0.9462 | 0.8889 | 93 | 0.9030 | 0.8976 | 0.9003 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9124 | 0.9352 | 0.9236 | 0.9852 | | 0.036 | 64.0 | 6144 | 0.0404 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9024 | 0.8916 | 0.8970 | 166 | 0.9580 | 0.9648 | 0.9614 | 142 | 0.9165 | 0.9302 | 0.9233 | 0.9857 | | 0.033 | 65.0 | 6240 | 0.0419 | 0.8544 | 0.9462 | 0.8980 | 93 | 0.9030 | 0.8976 | 0.9003 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9169 | 0.9352 | 0.9259 | 0.9857 | | 0.0348 | 66.0 | 6336 | 0.0396 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9024 | 0.8916 | 0.8970 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9235 | 0.9327 | 0.9280 | 0.9868 | | 0.0346 | 67.0 | 6432 | 0.0410 | 0.8627 | 0.9462 | 0.9026 | 93 | 0.8862 | 0.8916 | 0.8889 | 166 | 0.9648 | 0.9648 | 0.9648 | 142 | 0.9075 | 0.9302 | 0.9187 | 0.9849 | | 0.0337 | 68.0 | 6528 | 0.0416 | 0.8544 | 0.9462 | 0.8980 | 93 | 0.9030 | 0.8976 | 0.9003 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9169 | 0.9352 | 0.9259 | 0.9857 | | 0.0355 | 69.0 | 6624 | 0.0418 | 0.8627 | 0.9462 | 0.9026 | 93 | 0.8909 | 0.8855 | 0.8882 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9142 | 0.9302 | 0.9221 | 0.9855 | | 0.0337 | 70.0 | 6720 | 0.0408 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9146 | 0.9036 | 0.9091 | 166 | 0.9718 | 0.9718 | 0.9718 | 142 | 0.9238 | 0.9377 | 0.9307 | 0.9863 | | 0.0351 | 71.0 | 6816 | 0.0411 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9152 | 0.9096 | 0.9124 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9263 | 0.9401 | 0.9332 | 0.9860 | | 0.0337 | 72.0 | 6912 | 0.0411 | 0.9072 | 0.9462 | 0.9263 | 93 | 0.8929 | 0.9036 | 0.8982 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9261 | 0.9377 | 0.9318 | 0.9866 | | 0.0317 | 73.0 | 7008 | 0.0415 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9036 | 0.9036 | 0.9036 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9216 | 0.9377 | 0.9295 | 0.9860 | | 0.0308 | 74.0 | 7104 | 0.0442 | 0.8558 | 0.9570 | 0.9036 | 93 | 0.9202 | 0.9036 | 0.9119 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9240 | 0.9401 | 0.9320 | 0.9860 | | 0.0331 | 75.0 | 7200 | 0.0416 | 0.9072 | 0.9462 | 0.9263 | 93 | 0.9053 | 0.9217 | 0.9134 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9312 | 0.9451 | 0.9381 | 0.9879 | | 0.0307 | 76.0 | 7296 | 0.0426 | 0.8725 | 0.9570 | 0.9128 | 93 | 0.8963 | 0.8855 | 0.8909 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9189 | 0.9327 | 0.9257 | 0.9860 | | 0.0311 | 77.0 | 7392 | 0.0411 | 0.8889 | 0.9462 | 0.9167 | 93 | 0.8869 | 0.8976 | 0.8922 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9191 | 0.9352 | 0.9271 | 0.9871 | | 0.0321 | 78.0 | 7488 | 0.0421 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8862 | 0.8916 | 0.8889 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9144 | 0.9327 | 0.9235 | 0.9863 | | 0.0314 | 79.0 | 7584 | 0.0419 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8869 | 0.8976 | 0.8922 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9169 | 0.9352 | 0.9259 | 0.9866 | | 0.0327 | 80.0 | 7680 | 0.0420 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9096 | 0.9096 | 0.9096 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9263 | 0.9401 | 0.9332 | 0.9868 | | 0.0338 | 81.0 | 7776 | 0.0423 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9091 | 0.9036 | 0.9063 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9238 | 0.9377 | 0.9307 | 0.9871 | | 0.0326 | 82.0 | 7872 | 0.0430 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.9080 | 0.8916 | 0.8997 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9235 | 0.9327 | 0.9280 | 0.9857 | | 0.0311 | 83.0 | 7968 | 0.0420 | 0.8889 | 0.9462 | 0.9167 | 93 | 0.8970 | 0.8916 | 0.8943 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9235 | 0.9327 | 0.9280 | 0.9857 | | 0.0319 | 84.0 | 8064 | 0.0435 | 0.8462 | 0.9462 | 0.8934 | 93 | 0.8970 | 0.8916 | 0.8943 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9122 | 0.9327 | 0.9223 | 0.9855 | | 0.0312 | 85.0 | 8160 | 0.0414 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8909 | 0.8855 | 0.8882 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9187 | 0.9302 | 0.9244 | 0.9863 | | 0.0313 | 86.0 | 8256 | 0.0418 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8862 | 0.8916 | 0.8889 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9167 | 0.9327 | 0.9246 | 0.9866 | | 0.0315 | 87.0 | 8352 | 0.0414 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8916 | 0.8916 | 0.8916 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9189 | 0.9327 | 0.9257 | 0.9868 | | 0.0314 | 88.0 | 8448 | 0.0415 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9024 | 0.8916 | 0.8970 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9235 | 0.9327 | 0.9280 | 0.9866 | | 0.0301 | 89.0 | 8544 | 0.0416 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8970 | 0.8916 | 0.8943 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9212 | 0.9327 | 0.9269 | 0.9868 | | 0.0303 | 90.0 | 8640 | 0.0410 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9030 | 0.8976 | 0.9003 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9236 | 0.9352 | 0.9294 | 0.9866 | | 0.0292 | 91.0 | 8736 | 0.0412 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8909 | 0.8855 | 0.8882 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9165 | 0.9302 | 0.9233 | 0.9863 | | 0.0292 | 92.0 | 8832 | 0.0424 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9080 | 0.8916 | 0.8997 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9257 | 0.9327 | 0.9292 | 0.9868 | | 0.0295 | 93.0 | 8928 | 0.0426 | 0.88 | 0.9462 | 0.9119 | 93 | 0.9080 | 0.8916 | 0.8997 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9257 | 0.9327 | 0.9292 | 0.9866 | | 0.0304 | 94.0 | 9024 | 0.0422 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8963 | 0.8855 | 0.8909 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9210 | 0.9302 | 0.9256 | 0.9866 | | 0.0304 | 95.0 | 9120 | 0.0415 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8855 | 0.8855 | 0.8855 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9142 | 0.9302 | 0.9221 | 0.9866 | | 0.0312 | 96.0 | 9216 | 0.0415 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8862 | 0.8916 | 0.8889 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9167 | 0.9327 | 0.9246 | 0.9868 | | 0.0291 | 97.0 | 9312 | 0.0418 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8855 | 0.8855 | 0.8855 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9142 | 0.9302 | 0.9221 | 0.9866 | | 0.0306 | 98.0 | 9408 | 0.0417 | 0.88 | 0.9462 | 0.9119 | 93 | 0.8916 | 0.8916 | 0.8916 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9189 | 0.9327 | 0.9257 | 0.9871 | | 0.0293 | 99.0 | 9504 | 0.0417 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8909 | 0.8855 | 0.8882 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9165 | 0.9302 | 0.9233 | 0.9868 | | 0.0302 | 100.0 | 9600 | 0.0417 | 0.8713 | 0.9462 | 0.9072 | 93 | 0.8909 | 0.8855 | 0.8882 | 166 | 0.9787 | 0.9718 | 0.9753 | 142 | 0.9165 | 0.9302 | 0.9233 | 0.9868 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
zaynu/llama2-finetune
zaynu
2024-06-04T01:56:39Z
5
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T01:35:17Z
--- license: apache-2.0 ---
apwic/nerui-lora-r16-1
apwic
2024-06-04T01:54:08Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T13:06:00Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r16-1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r16-1 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0342 - Location Precision: 0.9316 - Location Recall: 0.9397 - Location F1: 0.9356 - Location Number: 116 - Organization Precision: 0.9484 - Organization Recall: 0.9304 - Organization F1: 0.9393 - Organization Number: 158 - Person Precision: 0.984 - Person Recall: 0.9919 - Person F1: 0.9880 - Person Number: 124 - Overall Precision: 0.9547 - Overall Recall: 0.9523 - Overall F1: 0.9535 - Overall Accuracy: 0.9896 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.0545 | 1.0 | 96 | 0.6622 | 0.0 | 0.0 | 0.0 | 116 | 0.0 | 0.0 | 0.0 | 158 | 0.0 | 0.0 | 0.0 | 124 | 0.0 | 0.0 | 0.0 | 0.8394 | | 0.64 | 2.0 | 192 | 0.5206 | 0.0 | 0.0 | 0.0 | 116 | 0.5 | 0.0127 | 0.0247 | 158 | 0.0 | 0.0 | 0.0 | 124 | 0.3333 | 0.0050 | 0.0099 | 0.8400 | | 0.503 | 3.0 | 288 | 0.3728 | 0.0833 | 0.0086 | 0.0156 | 116 | 0.3625 | 0.1835 | 0.2437 | 158 | 0.36 | 0.2903 | 0.3214 | 124 | 0.3438 | 0.1658 | 0.2237 | 0.8718 | | 0.3537 | 4.0 | 384 | 0.2518 | 0.3947 | 0.2586 | 0.3125 | 116 | 0.4885 | 0.5380 | 0.5120 | 158 | 0.5521 | 0.7258 | 0.6272 | 124 | 0.4964 | 0.5151 | 0.5055 | 0.9198 | | 0.2513 | 5.0 | 480 | 0.1812 | 0.6111 | 0.5690 | 0.5893 | 116 | 0.5979 | 0.7342 | 0.6591 | 158 | 0.8028 | 0.9194 | 0.8571 | 124 | 0.6667 | 0.7437 | 0.7031 | 0.9498 | | 0.1948 | 6.0 | 576 | 0.1359 | 0.7438 | 0.7759 | 0.7595 | 116 | 0.7368 | 0.7975 | 0.7660 | 158 | 0.8905 | 0.9839 | 0.9349 | 124 | 0.7879 | 0.8492 | 0.8174 | 0.9657 | | 0.1623 | 7.0 | 672 | 0.1109 | 0.7917 | 0.8190 | 0.8051 | 116 | 0.7619 | 0.8101 | 0.7853 | 158 | 0.9104 | 0.9839 | 0.9457 | 124 | 0.8175 | 0.8668 | 0.8415 | 0.9701 | | 0.1397 | 8.0 | 768 | 0.0954 | 0.8083 | 0.8362 | 0.8220 | 116 | 0.7976 | 0.8481 | 0.8221 | 158 | 0.9389 | 0.9919 | 0.9647 | 124 | 0.8449 | 0.8894 | 0.8666 | 0.9739 | | 0.1266 | 9.0 | 864 | 0.0877 | 0.8189 | 0.8966 | 0.8560 | 116 | 0.8155 | 0.8671 | 0.8405 | 158 | 0.9318 | 0.9919 | 0.9609 | 124 | 0.8525 | 0.9146 | 0.8824 | 0.9761 | | 0.1157 | 10.0 | 960 | 0.0731 | 0.8607 | 0.9052 | 0.8824 | 116 | 0.8519 | 0.8734 | 0.8625 | 158 | 0.9609 | 0.9919 | 0.9762 | 124 | 0.8883 | 0.9196 | 0.9037 | 0.9800 | | 0.1111 | 11.0 | 1056 | 0.0673 | 0.8760 | 0.9138 | 0.8945 | 116 | 0.8606 | 0.8987 | 0.8793 | 158 | 0.9685 | 0.9919 | 0.9801 | 124 | 0.8983 | 0.9322 | 0.9149 | 0.9813 | | 0.1044 | 12.0 | 1152 | 0.0635 | 0.8760 | 0.9138 | 0.8945 | 116 | 0.8554 | 0.8987 | 0.8765 | 158 | 0.9685 | 0.9919 | 0.9801 | 124 | 0.8961 | 0.9322 | 0.9138 | 0.9811 | | 0.098 | 13.0 | 1248 | 0.0578 | 0.8898 | 0.9052 | 0.8974 | 116 | 0.8589 | 0.8861 | 0.8723 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.9042 | 0.9246 | 0.9143 | 0.9816 | | 0.0939 | 14.0 | 1344 | 0.0559 | 0.875 | 0.9052 | 0.8898 | 116 | 0.8642 | 0.8861 | 0.8750 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.9020 | 0.9246 | 0.9132 | 0.9819 | | 0.091 | 15.0 | 1440 | 0.0558 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8402 | 0.8987 | 0.8685 | 158 | 0.9685 | 0.9919 | 0.9801 | 124 | 0.8916 | 0.9296 | 0.9102 | 0.9816 | | 0.088 | 16.0 | 1536 | 0.0555 | 0.875 | 0.9052 | 0.8898 | 116 | 0.8452 | 0.8987 | 0.8712 | 158 | 0.9535 | 0.9919 | 0.9723 | 124 | 0.8873 | 0.9296 | 0.9080 | 0.9811 | | 0.0857 | 17.0 | 1632 | 0.0523 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8868 | 0.8924 | 0.8896 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9156 | 0.9271 | 0.9213 | 0.9846 | | 0.0809 | 18.0 | 1728 | 0.0498 | 0.8678 | 0.9052 | 0.8861 | 116 | 0.8659 | 0.8987 | 0.8820 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9024 | 0.9296 | 0.9158 | 0.9833 | | 0.0773 | 19.0 | 1824 | 0.0482 | 0.8898 | 0.9052 | 0.8974 | 116 | 0.8827 | 0.9051 | 0.8938 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9160 | 0.9322 | 0.9240 | 0.9844 | | 0.0765 | 20.0 | 1920 | 0.0521 | 0.8833 | 0.9138 | 0.8983 | 116 | 0.8571 | 0.9114 | 0.8834 | 158 | 0.9685 | 0.9919 | 0.9801 | 124 | 0.8988 | 0.9372 | 0.9176 | 0.9822 | | 0.0754 | 21.0 | 2016 | 0.0484 | 0.875 | 0.9052 | 0.8898 | 116 | 0.8735 | 0.9177 | 0.8951 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9075 | 0.9372 | 0.9221 | 0.9841 | | 0.072 | 22.0 | 2112 | 0.0469 | 0.875 | 0.9052 | 0.8898 | 116 | 0.8606 | 0.8987 | 0.8793 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9024 | 0.9296 | 0.9158 | 0.9835 | | 0.0689 | 23.0 | 2208 | 0.0440 | 0.8898 | 0.9052 | 0.8974 | 116 | 0.8944 | 0.9114 | 0.9028 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9208 | 0.9347 | 0.9277 | 0.9844 | | 0.0697 | 24.0 | 2304 | 0.0456 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8968 | 0.8797 | 0.8882 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9244 | 0.9221 | 0.9233 | 0.9846 | | 0.0656 | 25.0 | 2400 | 0.0436 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.8812 | 0.8924 | 0.8868 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9181 | 0.9296 | 0.9238 | 0.9846 | | 0.0658 | 26.0 | 2496 | 0.0427 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8704 | 0.8924 | 0.8812 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9134 | 0.9271 | 0.9202 | 0.9841 | | 0.065 | 27.0 | 2592 | 0.0421 | 0.9052 | 0.9052 | 0.9052 | 116 | 0.8834 | 0.9114 | 0.8972 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9208 | 0.9347 | 0.9277 | 0.9855 | | 0.0613 | 28.0 | 2688 | 0.0418 | 0.8833 | 0.9138 | 0.8983 | 116 | 0.8882 | 0.9051 | 0.8966 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9163 | 0.9347 | 0.9254 | 0.9855 | | 0.0591 | 29.0 | 2784 | 0.0398 | 0.9060 | 0.9138 | 0.9099 | 116 | 0.8882 | 0.9051 | 0.8966 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9231 | 0.9347 | 0.9288 | 0.9874 | | 0.06 | 30.0 | 2880 | 0.0395 | 0.9060 | 0.9138 | 0.9099 | 116 | 0.8994 | 0.9051 | 0.9022 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9277 | 0.9347 | 0.9312 | 0.9865 | | 0.0566 | 31.0 | 2976 | 0.0386 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.8827 | 0.9051 | 0.8938 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9185 | 0.9347 | 0.9265 | 0.9863 | | 0.0566 | 32.0 | 3072 | 0.0392 | 0.8889 | 0.8966 | 0.8927 | 116 | 0.9045 | 0.8987 | 0.9016 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9248 | 0.9271 | 0.9260 | 0.9857 | | 0.0566 | 33.0 | 3168 | 0.0398 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.9045 | 0.8987 | 0.9016 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9277 | 0.9347 | 0.9312 | 0.9865 | | 0.0568 | 34.0 | 3264 | 0.0396 | 0.9224 | 0.9224 | 0.9224 | 116 | 0.8951 | 0.9177 | 0.9062 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9305 | 0.9422 | 0.9363 | 0.9871 | | 0.0532 | 35.0 | 3360 | 0.0379 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.9051 | 0.9051 | 0.9051 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9277 | 0.9347 | 0.9312 | 0.9871 | | 0.052 | 36.0 | 3456 | 0.0403 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9332 | 0.9472 | 0.9401 | 0.9879 | | 0.0516 | 37.0 | 3552 | 0.0386 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.9 | 0.9114 | 0.9057 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9256 | 0.9372 | 0.9313 | 0.9874 | | 0.0497 | 38.0 | 3648 | 0.0378 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.8994 | 0.9051 | 0.9022 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9256 | 0.9372 | 0.9313 | 0.9879 | | 0.052 | 39.0 | 3744 | 0.0366 | 0.9138 | 0.9138 | 0.9138 | 116 | 0.9006 | 0.9177 | 0.9091 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9303 | 0.9397 | 0.9350 | 0.9885 | | 0.0472 | 40.0 | 3840 | 0.0367 | 0.9138 | 0.9138 | 0.9138 | 116 | 0.8987 | 0.8987 | 0.8987 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9298 | 0.9322 | 0.9310 | 0.9868 | | 0.0486 | 41.0 | 3936 | 0.0388 | 0.9076 | 0.9310 | 0.9191 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9310 | 0.9497 | 0.9403 | 0.9882 | | 0.047 | 42.0 | 4032 | 0.0375 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9161 | 0.8987 | 0.9073 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9347 | 0.9347 | 0.9347 | 0.9874 | | 0.0481 | 43.0 | 4128 | 0.0380 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.9051 | 0.9051 | 0.9051 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9277 | 0.9347 | 0.9312 | 0.9860 | | 0.0468 | 44.0 | 4224 | 0.0391 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9062 | 0.9177 | 0.9119 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9353 | 0.9447 | 0.94 | 0.9876 | | 0.0473 | 45.0 | 4320 | 0.0366 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.9045 | 0.8987 | 0.9016 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9277 | 0.9347 | 0.9312 | 0.9868 | | 0.0441 | 46.0 | 4416 | 0.0372 | 0.9 | 0.9310 | 0.9153 | 116 | 0.9006 | 0.9177 | 0.9091 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9261 | 0.9447 | 0.9353 | 0.9887 | | 0.0441 | 47.0 | 4512 | 0.0375 | 0.9224 | 0.9224 | 0.9224 | 116 | 0.9068 | 0.9241 | 0.9154 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9353 | 0.9447 | 0.94 | 0.9887 | | 0.0416 | 48.0 | 4608 | 0.0359 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9363 | 0.9304 | 0.9333 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9475 | 0.9523 | 0.9499 | 0.9898 | | 0.0446 | 49.0 | 4704 | 0.0355 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8931 | 0.8987 | 0.8959 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9279 | 0.9372 | 0.9325 | 0.9876 | | 0.0425 | 50.0 | 4800 | 0.0366 | 0.9160 | 0.9397 | 0.9277 | 116 | 0.9 | 0.9114 | 0.9057 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9307 | 0.9447 | 0.9377 | 0.9887 | | 0.0422 | 51.0 | 4896 | 0.0364 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9167 | 0.9051 | 0.9108 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9373 | 0.9397 | 0.9385 | 0.9871 | | 0.0409 | 52.0 | 4992 | 0.0357 | 0.9145 | 0.9224 | 0.9185 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9332 | 0.9472 | 0.9401 | 0.9896 | | 0.0414 | 53.0 | 5088 | 0.0359 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9136 | 0.9367 | 0.9250 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9381 | 0.9523 | 0.9451 | 0.9901 | | 0.0403 | 54.0 | 5184 | 0.0353 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.8963 | 0.9304 | 0.9130 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9310 | 0.9497 | 0.9403 | 0.9896 | | 0.0393 | 55.0 | 5280 | 0.0352 | 0.9145 | 0.9224 | 0.9185 | 116 | 0.9136 | 0.9367 | 0.9250 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9356 | 0.9497 | 0.9426 | 0.9898 | | 0.0405 | 56.0 | 5376 | 0.0359 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9430 | 0.9430 | 0.9430 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9501 | 0.9573 | 0.9537 | 0.9901 | | 0.0404 | 57.0 | 5472 | 0.0370 | 0.9160 | 0.9397 | 0.9277 | 116 | 0.9371 | 0.9430 | 0.9401 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9454 | 0.9573 | 0.9513 | 0.9896 | | 0.0398 | 58.0 | 5568 | 0.0355 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9308 | 0.9367 | 0.9338 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9476 | 0.9548 | 0.9512 | 0.9904 | | 0.0382 | 59.0 | 5664 | 0.0355 | 0.9397 | 0.9397 | 0.9397 | 116 | 0.9551 | 0.9430 | 0.9490 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9597 | 0.9573 | 0.9585 | 0.9904 | | 0.0396 | 60.0 | 5760 | 0.0344 | 0.9160 | 0.9397 | 0.9277 | 116 | 0.9125 | 0.9241 | 0.9182 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9356 | 0.9497 | 0.9426 | 0.9893 | | 0.0362 | 61.0 | 5856 | 0.0356 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9226 | 0.9051 | 0.9137 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9421 | 0.9397 | 0.9409 | 0.9879 | | 0.037 | 62.0 | 5952 | 0.0360 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9167 | 0.9051 | 0.9108 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9398 | 0.9422 | 0.9410 | 0.9882 | | 0.0386 | 63.0 | 6048 | 0.0364 | 0.9310 | 0.9310 | 0.9310 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9499 | 0.9523 | 0.9511 | 0.9896 | | 0.0365 | 64.0 | 6144 | 0.0360 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9412 | 0.9114 | 0.9260 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9470 | 0.9422 | 0.9446 | 0.9887 | | 0.0347 | 65.0 | 6240 | 0.0354 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9416 | 0.9177 | 0.9295 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9496 | 0.9472 | 0.9484 | 0.9887 | | 0.0393 | 66.0 | 6336 | 0.0366 | 0.9397 | 0.9397 | 0.9397 | 116 | 0.9355 | 0.9177 | 0.9265 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9520 | 0.9472 | 0.9496 | 0.9887 | | 0.0359 | 67.0 | 6432 | 0.0348 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9241 | 0.9241 | 0.9241 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.945 | 0.9497 | 0.9474 | 0.9893 | | 0.0331 | 68.0 | 6528 | 0.0347 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9177 | 0.9177 | 0.9177 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9425 | 0.9472 | 0.9449 | 0.9890 | | 0.0344 | 69.0 | 6624 | 0.0341 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9363 | 0.9304 | 0.9333 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9521 | 0.9497 | 0.9509 | 0.9898 | | 0.0349 | 70.0 | 6720 | 0.0345 | 0.9397 | 0.9397 | 0.9397 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9548 | 0.9548 | 0.9548 | 0.9901 | | 0.0349 | 71.0 | 6816 | 0.0354 | 0.9310 | 0.9310 | 0.9310 | 116 | 0.9299 | 0.9241 | 0.9270 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9472 | 0.9472 | 0.9472 | 0.9885 | | 0.0342 | 72.0 | 6912 | 0.0343 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9299 | 0.9241 | 0.9270 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.945 | 0.9497 | 0.9474 | 0.9887 | | 0.0333 | 73.0 | 7008 | 0.0354 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9241 | 0.9241 | 0.9241 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9472 | 0.9472 | 0.9472 | 0.9890 | | 0.0332 | 74.0 | 7104 | 0.0346 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9241 | 0.9241 | 0.9241 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9425 | 0.9472 | 0.9449 | 0.9893 | | 0.0346 | 75.0 | 7200 | 0.0342 | 0.9310 | 0.9310 | 0.9310 | 116 | 0.9245 | 0.9304 | 0.9274 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.945 | 0.9497 | 0.9474 | 0.9896 | | 0.0334 | 76.0 | 7296 | 0.0346 | 0.9224 | 0.9224 | 0.9224 | 116 | 0.925 | 0.9367 | 0.9308 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9426 | 0.9497 | 0.9462 | 0.9904 | | 0.034 | 77.0 | 7392 | 0.0350 | 0.9397 | 0.9397 | 0.9397 | 116 | 0.9299 | 0.9241 | 0.9270 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9497 | 0.9497 | 0.9497 | 0.9896 | | 0.0341 | 78.0 | 7488 | 0.0340 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9363 | 0.9304 | 0.9333 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9499 | 0.9523 | 0.9511 | 0.9904 | | 0.033 | 79.0 | 7584 | 0.0348 | 0.9304 | 0.9224 | 0.9264 | 116 | 0.925 | 0.9367 | 0.9308 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.945 | 0.9497 | 0.9474 | 0.9896 | | 0.0308 | 80.0 | 7680 | 0.0337 | 0.9138 | 0.9138 | 0.9138 | 116 | 0.9193 | 0.9367 | 0.9279 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9378 | 0.9472 | 0.9425 | 0.9898 | | 0.031 | 81.0 | 7776 | 0.0341 | 0.9224 | 0.9224 | 0.9224 | 116 | 0.9193 | 0.9367 | 0.9279 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9403 | 0.9497 | 0.9450 | 0.9901 | | 0.0315 | 82.0 | 7872 | 0.0340 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9363 | 0.9304 | 0.9333 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9475 | 0.9523 | 0.9499 | 0.9904 | | 0.0321 | 83.0 | 7968 | 0.0343 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9901 | | 0.0317 | 84.0 | 8064 | 0.0340 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9901 | | 0.0324 | 85.0 | 8160 | 0.0340 | 0.9145 | 0.9224 | 0.9185 | 116 | 0.9187 | 0.9304 | 0.9245 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9378 | 0.9472 | 0.9425 | 0.9893 | | 0.0317 | 86.0 | 8256 | 0.0339 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9423 | 0.9304 | 0.9363 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9901 | | 0.0308 | 87.0 | 8352 | 0.0347 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9423 | 0.9304 | 0.9363 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9898 | | 0.0311 | 88.0 | 8448 | 0.0344 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9898 | | 0.0295 | 89.0 | 8544 | 0.0346 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | | 0.0304 | 90.0 | 8640 | 0.0343 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | | 0.0315 | 91.0 | 8736 | 0.0343 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | | 0.0314 | 92.0 | 8832 | 0.0342 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | | 0.0322 | 93.0 | 8928 | 0.0340 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9898 | | 0.0303 | 94.0 | 9024 | 0.0343 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9898 | | 0.0316 | 95.0 | 9120 | 0.0343 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9367 | 0.9367 | 0.9367 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9523 | 0.9523 | 0.9523 | 0.9898 | | 0.0317 | 96.0 | 9216 | 0.0342 | 0.9391 | 0.9310 | 0.9351 | 116 | 0.9427 | 0.9367 | 0.9397 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | | 0.0321 | 97.0 | 9312 | 0.0341 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9484 | 0.9304 | 0.9393 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9898 | | 0.0295 | 98.0 | 9408 | 0.0342 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9484 | 0.9304 | 0.9393 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9898 | | 0.031 | 99.0 | 9504 | 0.0341 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9484 | 0.9304 | 0.9393 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9898 | | 0.0299 | 100.0 | 9600 | 0.0342 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9484 | 0.9304 | 0.9393 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9547 | 0.9523 | 0.9535 | 0.9896 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
vaiv/GeM2-Llamion-14B-Chat
vaiv
2024-06-04T01:49:33Z
2,245
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-05-13T08:43:21Z
--- license: apache-2.0 --- # **GeM2-Llamion-14B** We have released **Llamion** as **GeM 2.0**, the second series of generative models developed by VAIV Company to address the our principal business needs. **Llamion** (Llamafied Orion) is derived from transforming the [Orion model](https://huggingface.co/OrionStarAI/Orion-14B-Chat) into [the standard LLaMA architecture](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py) through parameter mapping and offline knowledge transfer. Further technical specifications and study results will be detailed in our upcoming paper, available on this page. <!-- Note that this model has NOT been contaminated to artificially inflate its scores for the Open LLM Leaderboards, unlike some recent models which have been intentionally tainted. --> ![vaiv_png](./vaiv.png) ### Contributors - VAIV Company AI Lab ([vaiv.kr](https://www.vaiv.kr/))
vaiv/GeM2-Llamion-14B-Base
vaiv
2024-06-04T01:49:19Z
3,505
6
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-05-13T08:42:16Z
--- license: apache-2.0 --- # **GeM2-Llamion-14B** We have released **Llamion** as **GeM 2.0**, the second series of generative models developed by VAIV Company to address the our principal business needs. **Llamion** (Llamafied Orion) is derived from transforming the [Orion model](https://huggingface.co/OrionStarAI/Orion-14B-Base) into [the standard LLaMA architecture](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py) through parameter mapping and offline knowledge transfer. Further technical specifications and study results will be detailed in our upcoming paper, available on this page. <!-- Note that this model has NOT been contaminated to artificially inflate its scores for the Open LLM Leaderboards, unlike some recent models which have been intentionally tainted. --> ![vaiv_png](./vaiv.png) ### Contributors - VAIV Company AI Lab ([vaiv.kr](https://www.vaiv.kr/))
apwic/nerui-lora-r8-1
apwic
2024-06-04T01:45:11Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T12:48:14Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r8-1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r8-1 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0379 - Location Precision: 0.9153 - Location Recall: 0.9310 - Location F1: 0.9231 - Location Number: 116 - Organization Precision: 0.9012 - Organization Recall: 0.9241 - Organization F1: 0.9125 - Organization Number: 158 - Person Precision: 0.984 - Person Recall: 0.9919 - Person F1: 0.9880 - Person Number: 124 - Overall Precision: 0.9309 - Overall Recall: 0.9472 - Overall F1: 0.9390 - Overall Accuracy: 0.9868 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.1502 | 1.0 | 96 | 0.6915 | 0.0 | 0.0 | 0.0 | 116 | 0.0 | 0.0 | 0.0 | 158 | 0.0 | 0.0 | 0.0 | 124 | 0.0 | 0.0 | 0.0 | 0.8394 | | 0.6681 | 2.0 | 192 | 0.5641 | 0.0 | 0.0 | 0.0 | 116 | 0.5 | 0.0063 | 0.0125 | 158 | 0.0 | 0.0 | 0.0 | 124 | 0.5 | 0.0025 | 0.005 | 0.8397 | | 0.5591 | 3.0 | 288 | 0.4474 | 0.0 | 0.0 | 0.0 | 116 | 0.4286 | 0.0570 | 0.1006 | 158 | 0.2727 | 0.0484 | 0.0822 | 124 | 0.3333 | 0.0377 | 0.0677 | 0.8471 | | 0.4414 | 4.0 | 384 | 0.3290 | 0.2692 | 0.0603 | 0.0986 | 116 | 0.3592 | 0.2342 | 0.2835 | 158 | 0.4071 | 0.4597 | 0.4318 | 124 | 0.3755 | 0.2538 | 0.3028 | 0.8847 | | 0.3301 | 5.0 | 480 | 0.2424 | 0.4459 | 0.2845 | 0.3474 | 116 | 0.4874 | 0.6139 | 0.5434 | 158 | 0.5669 | 0.7177 | 0.6335 | 124 | 0.5093 | 0.5503 | 0.5290 | 0.9256 | | 0.2536 | 6.0 | 576 | 0.1846 | 0.6372 | 0.6207 | 0.6288 | 116 | 0.6264 | 0.7215 | 0.6706 | 158 | 0.7347 | 0.8710 | 0.7970 | 124 | 0.6652 | 0.7387 | 0.7 | 0.9525 | | 0.2029 | 7.0 | 672 | 0.1468 | 0.7328 | 0.7328 | 0.7328 | 116 | 0.6778 | 0.7722 | 0.7219 | 158 | 0.8676 | 0.9516 | 0.9077 | 124 | 0.7523 | 0.8166 | 0.7831 | 0.9629 | | 0.1712 | 8.0 | 768 | 0.1217 | 0.7949 | 0.8017 | 0.7983 | 116 | 0.7356 | 0.8101 | 0.7711 | 158 | 0.9104 | 0.9839 | 0.9457 | 124 | 0.8071 | 0.8618 | 0.8335 | 0.9679 | | 0.1504 | 9.0 | 864 | 0.1066 | 0.8220 | 0.8362 | 0.8291 | 116 | 0.7630 | 0.8354 | 0.7976 | 158 | 0.9173 | 0.9839 | 0.9494 | 124 | 0.8278 | 0.8819 | 0.8540 | 0.9717 | | 0.1356 | 10.0 | 960 | 0.0944 | 0.8305 | 0.8448 | 0.8376 | 116 | 0.7917 | 0.8418 | 0.8160 | 158 | 0.9173 | 0.9839 | 0.9494 | 124 | 0.8425 | 0.8869 | 0.8641 | 0.9734 | | 0.1276 | 11.0 | 1056 | 0.0848 | 0.8305 | 0.8448 | 0.8376 | 116 | 0.8084 | 0.8544 | 0.8308 | 158 | 0.9173 | 0.9839 | 0.9494 | 124 | 0.8493 | 0.8920 | 0.8701 | 0.9745 | | 0.1202 | 12.0 | 1152 | 0.0797 | 0.8739 | 0.8966 | 0.8851 | 116 | 0.8313 | 0.8734 | 0.8519 | 158 | 0.9173 | 0.9839 | 0.9494 | 124 | 0.8708 | 0.9146 | 0.8922 | 0.9769 | | 0.1131 | 13.0 | 1248 | 0.0725 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8274 | 0.8797 | 0.8528 | 158 | 0.9531 | 0.9839 | 0.9683 | 124 | 0.8819 | 0.9196 | 0.9004 | 0.9786 | | 0.1074 | 14.0 | 1344 | 0.0678 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.8625 | 0.8734 | 0.8679 | 158 | 0.9457 | 0.9839 | 0.9644 | 124 | 0.8993 | 0.9196 | 0.9093 | 0.9797 | | 0.1046 | 15.0 | 1440 | 0.0671 | 0.8618 | 0.9138 | 0.8870 | 116 | 0.8383 | 0.8861 | 0.8615 | 158 | 0.9462 | 0.9919 | 0.9685 | 124 | 0.8786 | 0.9271 | 0.9022 | 0.9786 | | 0.0992 | 16.0 | 1536 | 0.0648 | 0.8833 | 0.9138 | 0.8983 | 116 | 0.8393 | 0.8924 | 0.8650 | 158 | 0.9535 | 0.9919 | 0.9723 | 124 | 0.8873 | 0.9296 | 0.9080 | 0.9800 | | 0.0972 | 17.0 | 1632 | 0.0611 | 0.9052 | 0.9052 | 0.9052 | 116 | 0.8805 | 0.8861 | 0.8833 | 158 | 0.9531 | 0.9839 | 0.9683 | 124 | 0.9107 | 0.9221 | 0.9164 | 0.9822 | | 0.0908 | 18.0 | 1728 | 0.0583 | 0.8678 | 0.9052 | 0.8861 | 116 | 0.8720 | 0.9051 | 0.8882 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9002 | 0.9296 | 0.9147 | 0.9822 | | 0.089 | 19.0 | 1824 | 0.0568 | 0.8678 | 0.9052 | 0.8861 | 116 | 0.8805 | 0.8861 | 0.8833 | 158 | 0.9606 | 0.9839 | 0.9721 | 124 | 0.9017 | 0.9221 | 0.9118 | 0.9816 | | 0.0872 | 20.0 | 1920 | 0.0591 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8462 | 0.9051 | 0.8746 | 158 | 0.9535 | 0.9919 | 0.9723 | 124 | 0.8897 | 0.9322 | 0.9104 | 0.9805 | | 0.0863 | 21.0 | 2016 | 0.0565 | 0.8770 | 0.9224 | 0.8992 | 116 | 0.8421 | 0.9114 | 0.8754 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.8926 | 0.9397 | 0.9155 | 0.9822 | | 0.0834 | 22.0 | 2112 | 0.0545 | 0.8833 | 0.9138 | 0.8983 | 116 | 0.8471 | 0.9114 | 0.8780 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.8966 | 0.9372 | 0.9165 | 0.9822 | | 0.0795 | 23.0 | 2208 | 0.0511 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8667 | 0.9051 | 0.8854 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9024 | 0.9296 | 0.9158 | 0.9835 | | 0.0815 | 24.0 | 2304 | 0.0501 | 0.8898 | 0.9052 | 0.8974 | 116 | 0.8861 | 0.8861 | 0.8861 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.9154 | 0.9246 | 0.92 | 0.9824 | | 0.0764 | 25.0 | 2400 | 0.0491 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8727 | 0.9114 | 0.8916 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.9118 | 0.9347 | 0.9231 | 0.9844 | | 0.077 | 26.0 | 2496 | 0.0477 | 0.9052 | 0.9052 | 0.9052 | 116 | 0.8788 | 0.9177 | 0.8978 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9187 | 0.9372 | 0.9279 | 0.9849 | | 0.0749 | 27.0 | 2592 | 0.0504 | 0.9060 | 0.9138 | 0.9099 | 116 | 0.8855 | 0.9304 | 0.9074 | 158 | 0.9762 | 0.9919 | 0.9840 | 124 | 0.9193 | 0.9447 | 0.9318 | 0.9844 | | 0.0728 | 28.0 | 2688 | 0.0490 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8667 | 0.9051 | 0.8854 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9071 | 0.9322 | 0.9195 | 0.9841 | | 0.0698 | 29.0 | 2784 | 0.0478 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8727 | 0.9114 | 0.8916 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9095 | 0.9347 | 0.9219 | 0.9841 | | 0.0694 | 30.0 | 2880 | 0.0466 | 0.9052 | 0.9052 | 0.9052 | 116 | 0.8683 | 0.9177 | 0.8923 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9095 | 0.9347 | 0.9219 | 0.9846 | | 0.0661 | 31.0 | 2976 | 0.0459 | 0.9060 | 0.9138 | 0.9099 | 116 | 0.8848 | 0.9241 | 0.9040 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9214 | 0.9422 | 0.9317 | 0.9855 | | 0.0672 | 32.0 | 3072 | 0.0454 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8659 | 0.8987 | 0.8820 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9113 | 0.9296 | 0.9204 | 0.9849 | | 0.0663 | 33.0 | 3168 | 0.0459 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8606 | 0.8987 | 0.8793 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9091 | 0.9296 | 0.9193 | 0.9846 | | 0.067 | 34.0 | 3264 | 0.0461 | 0.8824 | 0.9052 | 0.8936 | 116 | 0.8667 | 0.9051 | 0.8854 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9071 | 0.9322 | 0.9195 | 0.9841 | | 0.0628 | 35.0 | 3360 | 0.0449 | 0.9052 | 0.9052 | 0.9052 | 116 | 0.8675 | 0.9114 | 0.8889 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9140 | 0.9347 | 0.9242 | 0.9852 | | 0.0617 | 36.0 | 3456 | 0.0461 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.8780 | 0.9114 | 0.8944 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9167 | 0.9397 | 0.9280 | 0.9852 | | 0.0617 | 37.0 | 3552 | 0.0432 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8788 | 0.9177 | 0.8978 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9165 | 0.9372 | 0.9267 | 0.9855 | | 0.0603 | 38.0 | 3648 | 0.0430 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.8944 | 0.9114 | 0.9028 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9187 | 0.9372 | 0.9279 | 0.9860 | | 0.0617 | 39.0 | 3744 | 0.0413 | 0.8974 | 0.9052 | 0.9013 | 116 | 0.8727 | 0.9114 | 0.8916 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9140 | 0.9347 | 0.9242 | 0.9852 | | 0.0563 | 40.0 | 3840 | 0.0410 | 0.8983 | 0.9138 | 0.9060 | 116 | 0.8827 | 0.9051 | 0.8938 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9185 | 0.9347 | 0.9265 | 0.9855 | | 0.0579 | 41.0 | 3936 | 0.0427 | 0.9008 | 0.9397 | 0.9198 | 116 | 0.8938 | 0.9051 | 0.8994 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9236 | 0.9422 | 0.9328 | 0.9857 | | 0.0566 | 42.0 | 4032 | 0.0413 | 0.8926 | 0.9310 | 0.9114 | 116 | 0.8875 | 0.8987 | 0.8931 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9140 | 0.9347 | 0.9242 | 0.9855 | | 0.0578 | 43.0 | 4128 | 0.0422 | 0.9060 | 0.9138 | 0.9099 | 116 | 0.8944 | 0.9114 | 0.9028 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9208 | 0.9347 | 0.9277 | 0.9860 | | 0.0567 | 44.0 | 4224 | 0.0414 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9125 | 0.9241 | 0.9182 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9330 | 0.9447 | 0.9388 | 0.9871 | | 0.0568 | 45.0 | 4320 | 0.0400 | 0.8926 | 0.9310 | 0.9114 | 116 | 0.8994 | 0.9051 | 0.9022 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9187 | 0.9372 | 0.9279 | 0.9860 | | 0.053 | 46.0 | 4416 | 0.0409 | 0.9076 | 0.9310 | 0.9191 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9865 | | 0.0536 | 47.0 | 4512 | 0.0408 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9018 | 0.9304 | 0.9159 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9865 | | 0.0519 | 48.0 | 4608 | 0.0401 | 0.8917 | 0.9224 | 0.9068 | 116 | 0.8951 | 0.9177 | 0.9062 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9214 | 0.9422 | 0.9317 | 0.9865 | | 0.0539 | 49.0 | 4704 | 0.0401 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9125 | 0.9241 | 0.9182 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9330 | 0.9447 | 0.9388 | 0.9865 | | 0.0522 | 50.0 | 4800 | 0.0418 | 0.9008 | 0.9397 | 0.9198 | 116 | 0.9 | 0.9114 | 0.9057 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9261 | 0.9447 | 0.9353 | 0.9865 | | 0.0518 | 51.0 | 4896 | 0.0404 | 0.8992 | 0.9224 | 0.9106 | 116 | 0.9062 | 0.9177 | 0.9119 | 158 | 0.9683 | 0.9839 | 0.976 | 124 | 0.9235 | 0.9397 | 0.9315 | 0.9863 | | 0.0503 | 52.0 | 4992 | 0.0393 | 0.9138 | 0.9138 | 0.9138 | 116 | 0.8916 | 0.9367 | 0.9136 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9263 | 0.9472 | 0.9366 | 0.9868 | | 0.0499 | 53.0 | 5088 | 0.0392 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.8963 | 0.9304 | 0.9130 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9312 | 0.9523 | 0.9416 | 0.9876 | | 0.0498 | 54.0 | 5184 | 0.0393 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9018 | 0.9304 | 0.9159 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0492 | 55.0 | 5280 | 0.0390 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9018 | 0.9304 | 0.9159 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9874 | | 0.0503 | 56.0 | 5376 | 0.0399 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9024 | 0.9367 | 0.9193 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9337 | 0.9548 | 0.9441 | 0.9876 | | 0.0491 | 57.0 | 5472 | 0.0408 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9024 | 0.9367 | 0.9193 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9337 | 0.9548 | 0.9441 | 0.9876 | | 0.0492 | 58.0 | 5568 | 0.0387 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9024 | 0.9367 | 0.9193 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9337 | 0.9548 | 0.9441 | 0.9882 | | 0.0477 | 59.0 | 5664 | 0.0390 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9136 | 0.9367 | 0.9250 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9383 | 0.9548 | 0.9465 | 0.9882 | | 0.0489 | 60.0 | 5760 | 0.0385 | 0.9244 | 0.9483 | 0.9362 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9360 | 0.9548 | 0.9453 | 0.9879 | | 0.0446 | 61.0 | 5856 | 0.0391 | 0.9160 | 0.9397 | 0.9277 | 116 | 0.9177 | 0.9177 | 0.9177 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9378 | 0.9472 | 0.9425 | 0.9865 | | 0.0463 | 62.0 | 5952 | 0.0402 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9245 | 0.9304 | 0.9274 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9428 | 0.9523 | 0.9475 | 0.9871 | | 0.0482 | 63.0 | 6048 | 0.0401 | 0.9316 | 0.9397 | 0.9356 | 116 | 0.9130 | 0.9304 | 0.9216 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9404 | 0.9523 | 0.9463 | 0.9868 | | 0.0455 | 64.0 | 6144 | 0.0387 | 0.9068 | 0.9224 | 0.9145 | 116 | 0.9231 | 0.9114 | 0.9172 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9373 | 0.9397 | 0.9385 | 0.9863 | | 0.0432 | 65.0 | 6240 | 0.0392 | 0.9231 | 0.9310 | 0.9270 | 116 | 0.9125 | 0.9241 | 0.9182 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9378 | 0.9472 | 0.9425 | 0.9865 | | 0.0484 | 66.0 | 6336 | 0.0392 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9068 | 0.9241 | 0.9154 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9332 | 0.9472 | 0.9401 | 0.9865 | | 0.044 | 67.0 | 6432 | 0.0385 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9068 | 0.9241 | 0.9154 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9332 | 0.9472 | 0.9401 | 0.9865 | | 0.0425 | 68.0 | 6528 | 0.0386 | 0.9237 | 0.9397 | 0.9316 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9333 | 0.9497 | 0.9415 | 0.9871 | | 0.044 | 69.0 | 6624 | 0.0381 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9865 | | 0.0447 | 70.0 | 6720 | 0.0381 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9333 | 0.9497 | 0.9415 | 0.9868 | | 0.0439 | 71.0 | 6816 | 0.0389 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9333 | 0.9497 | 0.9415 | 0.9871 | | 0.0426 | 72.0 | 6912 | 0.0383 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9006 | 0.9177 | 0.9091 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9307 | 0.9447 | 0.9377 | 0.9860 | | 0.0423 | 73.0 | 7008 | 0.0387 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9074 | 0.9304 | 0.9187 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9333 | 0.9497 | 0.9415 | 0.9871 | | 0.0427 | 74.0 | 7104 | 0.0385 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9871 | | 0.044 | 75.0 | 7200 | 0.0387 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9018 | 0.9304 | 0.9159 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9310 | 0.9497 | 0.9403 | 0.9871 | | 0.0415 | 76.0 | 7296 | 0.0386 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9018 | 0.9304 | 0.9159 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9310 | 0.9497 | 0.9403 | 0.9874 | | 0.0421 | 77.0 | 7392 | 0.0385 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9068 | 0.9241 | 0.9154 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9332 | 0.9472 | 0.9401 | 0.9874 | | 0.0428 | 78.0 | 7488 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9125 | 0.9241 | 0.9182 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9355 | 0.9472 | 0.9413 | 0.9871 | | 0.0414 | 79.0 | 7584 | 0.0385 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8896 | 0.9177 | 0.9034 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9261 | 0.9447 | 0.9353 | 0.9865 | | 0.0394 | 80.0 | 7680 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8841 | 0.9177 | 0.9006 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9238 | 0.9447 | 0.9342 | 0.9868 | | 0.0402 | 81.0 | 7776 | 0.0385 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8963 | 0.9304 | 0.9130 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9287 | 0.9497 | 0.9391 | 0.9876 | | 0.0404 | 82.0 | 7872 | 0.0377 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9874 | | 0.0407 | 83.0 | 7968 | 0.0381 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0406 | 84.0 | 8064 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8902 | 0.9241 | 0.9068 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9263 | 0.9472 | 0.9366 | 0.9874 | | 0.0425 | 85.0 | 8160 | 0.0381 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8963 | 0.9304 | 0.9130 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9287 | 0.9497 | 0.9391 | 0.9876 | | 0.0402 | 86.0 | 8256 | 0.0374 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9062 | 0.9177 | 0.9119 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9330 | 0.9447 | 0.9388 | 0.9868 | | 0.0402 | 87.0 | 8352 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9871 | | 0.0407 | 88.0 | 8448 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8902 | 0.9241 | 0.9068 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9263 | 0.9472 | 0.9366 | 0.9874 | | 0.0385 | 89.0 | 8544 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0388 | 90.0 | 8640 | 0.0377 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0406 | 91.0 | 8736 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0404 | 92.0 | 8832 | 0.0377 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0409 | 93.0 | 8928 | 0.0377 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8951 | 0.9177 | 0.9062 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9284 | 0.9447 | 0.9365 | 0.9865 | | 0.0382 | 94.0 | 9024 | 0.0380 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8957 | 0.9241 | 0.9097 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9286 | 0.9472 | 0.9378 | 0.9871 | | 0.0409 | 95.0 | 9120 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9868 | | 0.0406 | 96.0 | 9216 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9868 | | 0.0413 | 97.0 | 9312 | 0.0378 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.8951 | 0.9177 | 0.9062 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9284 | 0.9447 | 0.9365 | 0.9865 | | 0.0384 | 98.0 | 9408 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9868 | | 0.0394 | 99.0 | 9504 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9868 | | 0.0386 | 100.0 | 9600 | 0.0379 | 0.9153 | 0.9310 | 0.9231 | 116 | 0.9012 | 0.9241 | 0.9125 | 158 | 0.984 | 0.9919 | 0.9880 | 124 | 0.9309 | 0.9472 | 0.9390 | 0.9868 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
0xfaskety/Qwen-Qwen1.5-7B-1717464763
0xfaskety
2024-06-04T01:39:28Z
7
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2024-06-04T01:32:50Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF
DarkJanissary
2024-06-04T01:39:03Z
3
0
null
[ "gguf", "LLMs", "mistral", "math", "Intel", "llama-cpp", "gguf-my-repo", "en", "dataset:meta-math/MetaMathQA", "base_model:Intel/neural-chat-7b-v3-2", "base_model:quantized:Intel/neural-chat-7b-v3-2", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2024-06-04T01:38:48Z
--- language: - en license: apache-2.0 tags: - LLMs - mistral - math - Intel - llama-cpp - gguf-my-repo base_model: Intel/neural-chat-7b-v3-2 datasets: - meta-math/MetaMathQA model-index: - name: neural-chat-7b-v3-2 results: - task: type: Large Language Model name: Large Language Model dataset: name: meta-math/MetaMathQA type: meta-math/MetaMathQA metrics: - type: ARC (25-shot) value: 67.49 name: ARC (25-shot) verified: true - type: HellaSwag (10-shot) value: 83.92 name: HellaSwag (10-shot) verified: true - type: MMLU (5-shot) value: 63.55 name: MMLU (5-shot) verified: true - type: TruthfulQA (0-shot) value: 59.68 name: TruthfulQA (0-shot) verified: true - type: Winogrande (5-shot) value: 79.95 name: Winogrande (5-shot) verified: true - type: GSM8K (5-shot) value: 55.12 name: GSM8K (5-shot) verified: true --- # DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF This model was converted to GGUF format from [`Intel/neural-chat-7b-v3-2`](https://huggingface.co/Intel/neural-chat-7b-v3-2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Intel/neural-chat-7b-v3-2) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama --hf-repo DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF --hf-file neural-chat-7b-v3-2-q6_k.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF --hf-file neural-chat-7b-v3-2-q6_k.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./main --hf-repo DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF --hf-file neural-chat-7b-v3-2-q6_k.gguf -p "The meaning to life and the universe is" ``` or ``` ./server --hf-repo DarkJanissary/neural-chat-7b-v3-2-Q6_K-GGUF --hf-file neural-chat-7b-v3-2-q6_k.gguf -c 2048 ```
apwic/nerui-lora-r16-0
apwic
2024-06-04T01:35:19Z
0
0
null
[ "tensorboard", "generated_from_trainer", "id", "base_model:indolem/indobert-base-uncased", "base_model:finetune:indolem/indobert-base-uncased", "license:mit", "region:us" ]
null
2024-05-28T12:30:18Z
--- language: - id license: mit base_model: indolem/indobert-base-uncased tags: - generated_from_trainer model-index: - name: nerui-lora-r16-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nerui-lora-r16-0 This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0440 - Location Precision: 0.8318 - Location Recall: 0.9468 - Location F1: 0.8856 - Location Number: 94 - Organization Precision: 0.8827 - Organization Recall: 0.8563 - Organization F1: 0.8693 - Organization Number: 167 - Person Precision: 1.0 - Person Recall: 0.9854 - Person F1: 0.9926 - Person Number: 137 - Overall Precision: 0.9084 - Overall Recall: 0.9221 - Overall F1: 0.9152 - Overall Accuracy: 0.9845 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Location Precision | Location Recall | Location F1 | Location Number | Organization Precision | Organization Recall | Organization F1 | Organization Number | Person Precision | Person Recall | Person F1 | Person Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:----------------------:|:-------------------:|:---------------:|:-------------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 1.0607 | 1.0 | 96 | 0.6834 | 0.0 | 0.0 | 0.0 | 94 | 0.0 | 0.0 | 0.0 | 167 | 0.0 | 0.0 | 0.0 | 137 | 0.0 | 0.0 | 0.0 | 0.8343 | | 0.6397 | 2.0 | 192 | 0.5323 | 0.0 | 0.0 | 0.0 | 94 | 0.6667 | 0.0240 | 0.0462 | 167 | 0.2 | 0.0146 | 0.0272 | 137 | 0.375 | 0.0151 | 0.0290 | 0.8367 | | 0.4975 | 3.0 | 288 | 0.3811 | 0.16 | 0.0426 | 0.0672 | 94 | 0.2903 | 0.1617 | 0.2077 | 167 | 0.2606 | 0.2701 | 0.2652 | 137 | 0.2615 | 0.1709 | 0.2067 | 0.8663 | | 0.3542 | 4.0 | 384 | 0.2727 | 0.3231 | 0.2234 | 0.2642 | 94 | 0.4834 | 0.6108 | 0.5397 | 167 | 0.4343 | 0.6277 | 0.5134 | 137 | 0.4409 | 0.5251 | 0.4794 | 0.9141 | | 0.2514 | 5.0 | 480 | 0.1973 | 0.5393 | 0.5106 | 0.5246 | 94 | 0.6049 | 0.7425 | 0.6667 | 167 | 0.7532 | 0.8686 | 0.8068 | 137 | 0.6438 | 0.7312 | 0.6847 | 0.9434 | | 0.2019 | 6.0 | 576 | 0.1491 | 0.6915 | 0.6915 | 0.6915 | 94 | 0.7228 | 0.7964 | 0.7578 | 167 | 0.9161 | 0.9562 | 0.9357 | 137 | 0.7815 | 0.8266 | 0.8034 | 0.9602 | | 0.1643 | 7.0 | 672 | 0.1244 | 0.7170 | 0.8085 | 0.76 | 94 | 0.7308 | 0.7964 | 0.7622 | 167 | 0.9301 | 0.9708 | 0.9500 | 137 | 0.7935 | 0.8593 | 0.8251 | 0.9644 | | 0.1449 | 8.0 | 768 | 0.1025 | 0.7475 | 0.7872 | 0.7668 | 94 | 0.7697 | 0.8204 | 0.7942 | 167 | 0.9496 | 0.9635 | 0.9565 | 137 | 0.8245 | 0.8618 | 0.8428 | 0.9693 | | 0.1318 | 9.0 | 864 | 0.0919 | 0.8163 | 0.8511 | 0.8333 | 94 | 0.7838 | 0.8683 | 0.8239 | 167 | 0.95 | 0.9708 | 0.9603 | 137 | 0.8463 | 0.8995 | 0.8721 | 0.9721 | | 0.1184 | 10.0 | 960 | 0.0846 | 0.8 | 0.8936 | 0.8442 | 94 | 0.8246 | 0.8443 | 0.8343 | 167 | 0.9504 | 0.9781 | 0.9640 | 137 | 0.8609 | 0.9020 | 0.8810 | 0.9751 | | 0.11 | 11.0 | 1056 | 0.0744 | 0.8454 | 0.8723 | 0.8586 | 94 | 0.8324 | 0.8623 | 0.8471 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8780 | 0.9045 | 0.8911 | 0.9773 | | 0.103 | 12.0 | 1152 | 0.0714 | 0.8431 | 0.9149 | 0.8776 | 94 | 0.8471 | 0.8623 | 0.8546 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8835 | 0.9146 | 0.8988 | 0.9776 | | 0.0954 | 13.0 | 1248 | 0.0672 | 0.8586 | 0.9043 | 0.8808 | 94 | 0.8471 | 0.8623 | 0.8546 | 167 | 0.9853 | 0.9781 | 0.9817 | 137 | 0.8963 | 0.9121 | 0.9041 | 0.9790 | | 0.0896 | 14.0 | 1344 | 0.0617 | 0.8673 | 0.9043 | 0.8854 | 94 | 0.8466 | 0.8922 | 0.8688 | 167 | 0.9710 | 0.9781 | 0.9745 | 137 | 0.8932 | 0.9246 | 0.9086 | 0.9804 | | 0.0894 | 15.0 | 1440 | 0.0573 | 0.8687 | 0.9149 | 0.8912 | 94 | 0.8596 | 0.8802 | 0.8698 | 167 | 0.9640 | 0.9781 | 0.9710 | 137 | 0.8973 | 0.9221 | 0.9095 | 0.9801 | | 0.0853 | 16.0 | 1536 | 0.0628 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.8457 | 0.8862 | 0.8655 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.8897 | 0.9322 | 0.9104 | 0.9798 | | 0.0813 | 17.0 | 1632 | 0.0562 | 0.8763 | 0.9043 | 0.8901 | 94 | 0.8629 | 0.9042 | 0.8830 | 167 | 0.9640 | 0.9781 | 0.9710 | 137 | 0.9002 | 0.9296 | 0.9147 | 0.9815 | | 0.0804 | 18.0 | 1728 | 0.0545 | 0.85 | 0.9043 | 0.8763 | 94 | 0.8529 | 0.8683 | 0.8605 | 167 | 0.9571 | 0.9781 | 0.9675 | 137 | 0.8878 | 0.9146 | 0.9010 | 0.9798 | | 0.0761 | 19.0 | 1824 | 0.0517 | 0.84 | 0.8936 | 0.8660 | 94 | 0.8675 | 0.8623 | 0.8649 | 167 | 0.9853 | 0.9781 | 0.9817 | 137 | 0.9005 | 0.9095 | 0.905 | 0.9812 | | 0.0761 | 20.0 | 1920 | 0.0532 | 0.84 | 0.8936 | 0.8660 | 94 | 0.8706 | 0.8862 | 0.8783 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.8993 | 0.9196 | 0.9093 | 0.9815 | | 0.071 | 21.0 | 2016 | 0.0553 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.8659 | 0.8503 | 0.8580 | 167 | 0.9781 | 0.9781 | 0.9781 | 137 | 0.8988 | 0.9146 | 0.9066 | 0.9812 | | 0.07 | 22.0 | 2112 | 0.0499 | 0.85 | 0.9043 | 0.8763 | 94 | 0.8728 | 0.9042 | 0.8882 | 167 | 0.9926 | 0.9854 | 0.9890 | 137 | 0.9071 | 0.9322 | 0.9195 | 0.9834 | | 0.0673 | 23.0 | 2208 | 0.0517 | 0.8286 | 0.9255 | 0.8744 | 94 | 0.8712 | 0.8503 | 0.8606 | 167 | 0.9783 | 0.9854 | 0.9818 | 137 | 0.8966 | 0.9146 | 0.9055 | 0.9820 | | 0.0657 | 24.0 | 2304 | 0.0489 | 0.8515 | 0.9149 | 0.8821 | 94 | 0.8772 | 0.8982 | 0.8876 | 167 | 0.9853 | 0.9781 | 0.9817 | 137 | 0.9069 | 0.9296 | 0.9181 | 0.9831 | | 0.0643 | 25.0 | 2400 | 0.0501 | 0.8148 | 0.9362 | 0.8713 | 94 | 0.8805 | 0.8383 | 0.8589 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9030 | 0.9121 | 0.9075 | 0.9823 | | 0.0607 | 26.0 | 2496 | 0.0486 | 0.8317 | 0.8936 | 0.8615 | 94 | 0.8841 | 0.8683 | 0.8761 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.91 | 0.9146 | 0.9123 | 0.9837 | | 0.0629 | 27.0 | 2592 | 0.0493 | 0.8571 | 0.8936 | 0.875 | 94 | 0.8802 | 0.8802 | 0.8802 | 167 | 0.9779 | 0.9708 | 0.9744 | 137 | 0.9077 | 0.9146 | 0.9111 | 0.9826 | | 0.0571 | 28.0 | 2688 | 0.0495 | 0.85 | 0.9043 | 0.8763 | 94 | 0.8727 | 0.8623 | 0.8675 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9075 | 0.9121 | 0.9098 | 0.9823 | | 0.0564 | 29.0 | 2784 | 0.0469 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8909 | 0.8802 | 0.8855 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9156 | 0.9271 | 0.9213 | 0.9851 | | 0.0578 | 30.0 | 2880 | 0.0486 | 0.8476 | 0.9468 | 0.8945 | 94 | 0.875 | 0.8802 | 0.8776 | 167 | 0.9853 | 0.9781 | 0.9817 | 137 | 0.9046 | 0.9296 | 0.9170 | 0.9837 | | 0.0571 | 31.0 | 2976 | 0.0466 | 0.87 | 0.9255 | 0.8969 | 94 | 0.8727 | 0.8623 | 0.8675 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9125 | 0.9171 | 0.9148 | 0.9848 | | 0.0517 | 32.0 | 3072 | 0.0480 | 0.8091 | 0.9468 | 0.8725 | 94 | 0.8704 | 0.8443 | 0.8571 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8968 | 0.9171 | 0.9068 | 0.9829 | | 0.0509 | 33.0 | 3168 | 0.0467 | 0.8224 | 0.9362 | 0.8756 | 94 | 0.8720 | 0.8563 | 0.8640 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9015 | 0.9196 | 0.9104 | 0.9837 | | 0.051 | 34.0 | 3264 | 0.0469 | 0.8286 | 0.9255 | 0.8744 | 94 | 0.8780 | 0.8623 | 0.8701 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9035 | 0.9171 | 0.9102 | 0.9834 | | 0.0509 | 35.0 | 3360 | 0.0447 | 0.85 | 0.9043 | 0.8763 | 94 | 0.8848 | 0.8743 | 0.8795 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.915 | 0.9196 | 0.9173 | 0.9845 | | 0.0498 | 36.0 | 3456 | 0.0467 | 0.8614 | 0.9255 | 0.8923 | 94 | 0.8713 | 0.8922 | 0.8817 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9091 | 0.9296 | 0.9193 | 0.9843 | | 0.0486 | 37.0 | 3552 | 0.0439 | 0.86 | 0.9149 | 0.8866 | 94 | 0.8862 | 0.8862 | 0.8862 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9154 | 0.9246 | 0.92 | 0.9845 | | 0.0486 | 38.0 | 3648 | 0.0430 | 0.8529 | 0.9255 | 0.8878 | 94 | 0.8896 | 0.8683 | 0.8788 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.915 | 0.9196 | 0.9173 | 0.9845 | | 0.0508 | 39.0 | 3744 | 0.0458 | 0.8224 | 0.9362 | 0.8756 | 94 | 0.8758 | 0.8443 | 0.8598 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9007 | 0.9121 | 0.9064 | 0.9837 | | 0.0487 | 40.0 | 3840 | 0.0416 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8869 | 0.8922 | 0.8896 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9163 | 0.9347 | 0.9254 | 0.9859 | | 0.0453 | 41.0 | 3936 | 0.0431 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8889 | 0.8623 | 0.8754 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9107 | 0.9221 | 0.9164 | 0.9848 | | 0.0459 | 42.0 | 4032 | 0.0421 | 0.8673 | 0.9043 | 0.8854 | 94 | 0.8909 | 0.8802 | 0.8855 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9221 | 0.9221 | 0.9221 | 0.9854 | | 0.0461 | 43.0 | 4128 | 0.0444 | 0.8286 | 0.9255 | 0.8744 | 94 | 0.8773 | 0.8563 | 0.8667 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9057 | 0.9171 | 0.9114 | 0.9840 | | 0.0436 | 44.0 | 4224 | 0.0418 | 0.8515 | 0.9149 | 0.8821 | 94 | 0.8712 | 0.8503 | 0.8606 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9098 | 0.9121 | 0.9109 | 0.9837 | | 0.0444 | 45.0 | 4320 | 0.0397 | 0.8614 | 0.9255 | 0.8923 | 94 | 0.8970 | 0.8862 | 0.8916 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9227 | 0.9296 | 0.9262 | 0.9867 | | 0.042 | 46.0 | 4416 | 0.0421 | 0.8286 | 0.9255 | 0.8744 | 94 | 0.8820 | 0.8503 | 0.8659 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9077 | 0.9146 | 0.9111 | 0.9848 | | 0.0425 | 47.0 | 4512 | 0.0443 | 0.8241 | 0.9468 | 0.8812 | 94 | 0.8841 | 0.8683 | 0.8761 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9066 | 0.9271 | 0.9168 | 0.9845 | | 0.0416 | 48.0 | 4608 | 0.0418 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.9012 | 0.8743 | 0.8875 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9202 | 0.9271 | 0.9237 | 0.9862 | | 0.0401 | 49.0 | 4704 | 0.0418 | 0.8544 | 0.9362 | 0.8934 | 94 | 0.8841 | 0.8683 | 0.8761 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9154 | 0.9246 | 0.92 | 0.9854 | | 0.0395 | 50.0 | 4800 | 0.0428 | 0.8365 | 0.9255 | 0.8788 | 94 | 0.8773 | 0.8563 | 0.8667 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9080 | 0.9171 | 0.9125 | 0.9848 | | 0.0404 | 51.0 | 4896 | 0.0426 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.8712 | 0.8503 | 0.8606 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9080 | 0.9171 | 0.9125 | 0.9848 | | 0.0388 | 52.0 | 4992 | 0.0405 | 0.8889 | 0.9362 | 0.9119 | 94 | 0.8824 | 0.8982 | 0.8902 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9233 | 0.9372 | 0.9302 | 0.9876 | | 0.0406 | 53.0 | 5088 | 0.0409 | 0.87 | 0.9255 | 0.8969 | 94 | 0.875 | 0.8802 | 0.8776 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9156 | 0.9271 | 0.9213 | 0.9856 | | 0.0403 | 54.0 | 5184 | 0.0410 | 0.8713 | 0.9362 | 0.9026 | 94 | 0.8855 | 0.8802 | 0.8829 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9204 | 0.9296 | 0.925 | 0.9856 | | 0.0393 | 55.0 | 5280 | 0.0407 | 0.8529 | 0.9255 | 0.8878 | 94 | 0.8580 | 0.8683 | 0.8631 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9039 | 0.9221 | 0.9129 | 0.9854 | | 0.0397 | 56.0 | 5376 | 0.0408 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8598 | 0.8443 | 0.8520 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8988 | 0.9146 | 0.9066 | 0.9834 | | 0.0376 | 57.0 | 5472 | 0.0423 | 0.8257 | 0.9574 | 0.8867 | 94 | 0.8812 | 0.8443 | 0.8624 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9059 | 0.9196 | 0.9127 | 0.9845 | | 0.0385 | 58.0 | 5568 | 0.0406 | 0.8687 | 0.9149 | 0.8912 | 94 | 0.8743 | 0.8743 | 0.8743 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9152 | 0.9221 | 0.9186 | 0.9856 | | 0.0371 | 59.0 | 5664 | 0.0407 | 0.8776 | 0.9149 | 0.8958 | 94 | 0.8855 | 0.8802 | 0.8829 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9223 | 0.9246 | 0.9235 | 0.9865 | | 0.0361 | 60.0 | 5760 | 0.0428 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.9062 | 0.8683 | 0.8869 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9179 | 0.9271 | 0.9225 | 0.9851 | | 0.036 | 61.0 | 5856 | 0.0413 | 0.8713 | 0.9362 | 0.9026 | 94 | 0.8935 | 0.9042 | 0.8988 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9235 | 0.9397 | 0.9315 | 0.9862 | | 0.0383 | 62.0 | 5952 | 0.0421 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8909 | 0.8802 | 0.8855 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9113 | 0.9296 | 0.9204 | 0.9848 | | 0.0339 | 63.0 | 6048 | 0.0419 | 0.8381 | 0.9362 | 0.8844 | 94 | 0.9012 | 0.8743 | 0.8875 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9179 | 0.9271 | 0.9225 | 0.9854 | | 0.0363 | 64.0 | 6144 | 0.0428 | 0.8241 | 0.9468 | 0.8812 | 94 | 0.875 | 0.8383 | 0.8563 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9032 | 0.9146 | 0.9089 | 0.9845 | | 0.0355 | 65.0 | 6240 | 0.0422 | 0.8224 | 0.9362 | 0.8756 | 94 | 0.8650 | 0.8443 | 0.8545 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8988 | 0.9146 | 0.9066 | 0.9845 | | 0.0339 | 66.0 | 6336 | 0.0448 | 0.8241 | 0.9468 | 0.8812 | 94 | 0.8831 | 0.8144 | 0.8474 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9068 | 0.9045 | 0.9057 | 0.9829 | | 0.0352 | 67.0 | 6432 | 0.0429 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8820 | 0.8503 | 0.8659 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9082 | 0.9196 | 0.9139 | 0.9843 | | 0.0337 | 68.0 | 6528 | 0.0458 | 0.8241 | 0.9468 | 0.8812 | 94 | 0.8710 | 0.8084 | 0.8385 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9020 | 0.9020 | 0.9020 | 0.9826 | | 0.0353 | 69.0 | 6624 | 0.0425 | 0.8381 | 0.9362 | 0.8844 | 94 | 0.8841 | 0.8683 | 0.8761 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9109 | 0.9246 | 0.9177 | 0.9851 | | 0.0338 | 70.0 | 6720 | 0.0428 | 0.8365 | 0.9255 | 0.8788 | 94 | 0.8589 | 0.8383 | 0.8485 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9005 | 0.9095 | 0.905 | 0.9834 | | 0.0348 | 71.0 | 6816 | 0.0432 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.9012 | 0.8743 | 0.8875 | 167 | 0.9926 | 0.9781 | 0.9853 | 137 | 0.9134 | 0.9271 | 0.9202 | 0.9851 | | 0.0351 | 72.0 | 6912 | 0.0449 | 0.8091 | 0.9468 | 0.8725 | 94 | 0.8868 | 0.8443 | 0.8650 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9035 | 0.9171 | 0.9102 | 0.9837 | | 0.0327 | 73.0 | 7008 | 0.0439 | 0.8091 | 0.9468 | 0.8725 | 94 | 0.8625 | 0.8263 | 0.8440 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8938 | 0.9095 | 0.9016 | 0.9826 | | 0.0314 | 74.0 | 7104 | 0.0431 | 0.8462 | 0.9362 | 0.8889 | 94 | 0.8758 | 0.8443 | 0.8598 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.91 | 0.9146 | 0.9123 | 0.9837 | | 0.0332 | 75.0 | 7200 | 0.0430 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8485 | 0.8383 | 0.8434 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8941 | 0.9121 | 0.9030 | 0.9834 | | 0.0311 | 76.0 | 7296 | 0.0438 | 0.8365 | 0.9255 | 0.8788 | 94 | 0.8598 | 0.8443 | 0.8520 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9007 | 0.9121 | 0.9064 | 0.9840 | | 0.0322 | 77.0 | 7392 | 0.0455 | 0.8165 | 0.9468 | 0.8768 | 94 | 0.8671 | 0.8204 | 0.8431 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.8980 | 0.9070 | 0.9025 | 0.9823 | | 0.0313 | 78.0 | 7488 | 0.0442 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8712 | 0.8503 | 0.8606 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9035 | 0.9171 | 0.9102 | 0.9840 | | 0.0313 | 79.0 | 7584 | 0.0435 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8659 | 0.8503 | 0.8580 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9012 | 0.9171 | 0.9091 | 0.9845 | | 0.0321 | 80.0 | 7680 | 0.0450 | 0.8165 | 0.9468 | 0.8768 | 94 | 0.8812 | 0.8443 | 0.8624 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9035 | 0.9171 | 0.9102 | 0.9831 | | 0.0303 | 81.0 | 7776 | 0.0441 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8765 | 0.8503 | 0.8632 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9059 | 0.9196 | 0.9127 | 0.9843 | | 0.0322 | 82.0 | 7872 | 0.0442 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8797 | 0.8323 | 0.8554 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9075 | 0.9121 | 0.9098 | 0.9834 | | 0.0313 | 83.0 | 7968 | 0.0447 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8812 | 0.8443 | 0.8624 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9080 | 0.9171 | 0.9125 | 0.9834 | | 0.0292 | 84.0 | 8064 | 0.0448 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8765 | 0.8503 | 0.8632 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9059 | 0.9196 | 0.9127 | 0.9840 | | 0.03 | 85.0 | 8160 | 0.0465 | 0.8396 | 0.9468 | 0.89 | 94 | 0.8734 | 0.8263 | 0.8492 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9073 | 0.9095 | 0.9084 | 0.9831 | | 0.0311 | 86.0 | 8256 | 0.0455 | 0.8302 | 0.9362 | 0.88 | 94 | 0.8773 | 0.8563 | 0.8667 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9059 | 0.9196 | 0.9127 | 0.9837 | | 0.0302 | 87.0 | 8352 | 0.0458 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8696 | 0.8383 | 0.8537 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9032 | 0.9146 | 0.9089 | 0.9834 | | 0.0311 | 88.0 | 8448 | 0.0445 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8758 | 0.8443 | 0.8598 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9057 | 0.9171 | 0.9114 | 0.9834 | | 0.0306 | 89.0 | 8544 | 0.0432 | 0.8365 | 0.9255 | 0.8788 | 94 | 0.8727 | 0.8623 | 0.8675 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9059 | 0.9196 | 0.9127 | 0.9843 | | 0.0292 | 90.0 | 8640 | 0.0444 | 0.8396 | 0.9468 | 0.89 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9107 | 0.9221 | 0.9164 | 0.9837 | | 0.0302 | 91.0 | 8736 | 0.0451 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.875 | 0.8383 | 0.8563 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9055 | 0.9146 | 0.91 | 0.9829 | | 0.0288 | 92.0 | 8832 | 0.0445 | 0.8396 | 0.9468 | 0.89 | 94 | 0.8773 | 0.8563 | 0.8667 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | | 0.0313 | 93.0 | 8928 | 0.0444 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | | 0.0293 | 94.0 | 9024 | 0.0441 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8944 | 0.8623 | 0.8780 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9132 | 0.9246 | 0.9189 | 0.9848 | | 0.03 | 95.0 | 9120 | 0.0450 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8812 | 0.8443 | 0.8624 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9080 | 0.9171 | 0.9125 | 0.9837 | | 0.0313 | 96.0 | 9216 | 0.0443 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | | 0.0299 | 97.0 | 9312 | 0.0445 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.875 | 0.8383 | 0.8563 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9055 | 0.9146 | 0.91 | 0.9837 | | 0.0316 | 98.0 | 9408 | 0.0442 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | | 0.0301 | 99.0 | 9504 | 0.0439 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | | 0.0308 | 100.0 | 9600 | 0.0440 | 0.8318 | 0.9468 | 0.8856 | 94 | 0.8827 | 0.8563 | 0.8693 | 167 | 1.0 | 0.9854 | 0.9926 | 137 | 0.9084 | 0.9221 | 0.9152 | 0.9845 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.15.2
Charixfox/Llama-3-70b-Uncensored-Lumi-Tess-gradient-AWQ-4bit
Charixfox
2024-06-04T01:35:15Z
21
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "awq", "region:us" ]
text-generation
2024-06-04T00:25:26Z
--- license: other license_name: llama3 license_link: https://llama.meta.com/llama3/license/ ---