modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-08-30 18:26:50
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
530 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-08-30 18:26:48
card
stringlengths
11
1.01M
Word2vec/polyglot_words_embeddings_ilo
Word2vec
2023-05-28T19:51:31Z
0
0
null
[ "word2vec", "ilo", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:03:41Z
--- tags: - word2vec language: ilo license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_fra
Word2vec
2023-05-28T19:49:42Z
0
0
null
[ "word2vec", "fr", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:02:20Z
--- tags: - word2vec language: fr license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_hsb
Word2vec
2023-05-28T19:48:40Z
0
0
null
[ "word2vec", "hsb", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:03:15Z
--- tags: - word2vec language: hsb license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_hif
Word2vec
2023-05-28T19:48:23Z
0
0
null
[ "word2vec", "hif", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:03:06Z
--- tags: - word2vec language: hif license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_bos
Word2vec
2023-05-28T19:44:39Z
0
0
null
[ "word2vec", "bs", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:40Z
--- tags: - word2vec language: bs license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_cat
Word2vec
2023-05-28T19:44:29Z
0
0
null
[ "word2vec", "ca", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:46Z
--- tags: - word2vec language: ca license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_che
Word2vec
2023-05-28T19:44:21Z
0
0
null
[ "word2vec", "ce", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:51Z
--- tags: - word2vec language: ce license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_ces
Word2vec
2023-05-28T19:43:55Z
0
0
null
[ "word2vec", "cs", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:58Z
--- tags: - word2vec language: cs license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_chv
Word2vec
2023-05-28T19:43:45Z
0
0
null
[ "word2vec", "cv", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:03Z
--- tags: - word2vec language: cv license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_cym
Word2vec
2023-05-28T19:43:35Z
0
0
null
[ "word2vec", "cy", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:06Z
--- tags: - word2vec language: cy license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_dan
Word2vec
2023-05-28T19:43:21Z
0
0
null
[ "word2vec", "da", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:10Z
--- tags: - word2vec language: da license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_deu
Word2vec
2023-05-28T19:43:10Z
0
0
null
[ "word2vec", "de", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:15Z
--- tags: - word2vec language: de license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_div
Word2vec
2023-05-28T19:42:48Z
0
0
null
[ "word2vec", "dv", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:23Z
--- tags: - word2vec language: dv license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_eng
Word2vec
2023-05-28T19:41:40Z
0
0
null
[ "word2vec", "en", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:36Z
--- tags: - word2vec language: en license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_spa
Word2vec
2023-05-28T19:37:07Z
0
2
null
[ "word2vec", "es", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:47Z
--- tags: - word2vec language: es license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_est
Word2vec
2023-05-28T19:36:57Z
0
0
null
[ "word2vec", "et", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:01:52Z
--- tags: - word2vec language: et license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_ast
Word2vec
2023-05-28T19:21:25Z
0
0
null
[ "word2vec", "ast", "license:gpl-3.0", "region:us" ]
null
2023-05-19T21:59:59Z
--- tags: - word2vec language: ast license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_aze
Word2vec
2023-05-28T19:21:16Z
0
0
null
[ "word2vec", "az", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:03Z
--- tags: - word2vec language: az license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
asenella/mmnist_MMVAEPlusconfig2_seed_2_ratio_02_c
asenella
2023-05-28T19:20:59Z
0
0
null
[ "multivae", "en", "license:apache-2.0", "region:us" ]
null
2023-05-25T10:42:03Z
--- language: en tags: - multivae license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from multivae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name") ```
Word2vec/polyglot_words_embeddings_bul
Word2vec
2023-05-28T19:20:32Z
0
4
null
[ "word2vec", "bg", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:21Z
--- tags: - word2vec language: bg license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_bpy
Word2vec
2023-05-28T19:19:50Z
0
0
null
[ "word2vec", "bpy", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:00:33Z
--- tags: - word2vec language: bpy license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_amh
Word2vec
2023-05-28T19:19:41Z
0
0
null
[ "word2vec", "am", "license:gpl-3.0", "region:us" ]
null
2023-05-19T21:59:41Z
--- tags: - word2vec language: am license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_nld
Word2vec
2023-05-28T19:14:08Z
0
0
null
[ "word2vec", "nl", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:05:40Z
--- tags: - word2vec language: nl license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_nno
Word2vec
2023-05-28T19:13:59Z
0
0
null
[ "word2vec", "nn", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:05:45Z
--- tags: - word2vec language: nn license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_ori
Word2vec
2023-05-28T19:13:23Z
0
0
null
[ "word2vec", "or", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:03Z
--- tags: - word2vec language: or license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_oss
Word2vec
2023-05-28T19:13:14Z
0
0
null
[ "word2vec", "os", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:07Z
--- tags: - word2vec language: os license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_pan
Word2vec
2023-05-28T19:13:06Z
0
0
null
[ "word2vec", "pa", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:09Z
--- tags: - word2vec language: pa license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
YakovElm/IntelDAOS5Classic_512
YakovElm
2023-05-28T19:12:59Z
61
0
transformers
[ "transformers", "tf", "bert", "text-classification", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-28T19:12:26Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: IntelDAOS5Classic_512 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # IntelDAOS5Classic_512 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3745 - Train Accuracy: 0.8740 - Validation Loss: 0.4273 - Validation Accuracy: 0.8438 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': 1.0, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 3e-05, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.3984 | 0.8710 | 0.4399 | 0.8438 | 0 | | 0.3811 | 0.8740 | 0.4332 | 0.8438 | 1 | | 0.3745 | 0.8740 | 0.4273 | 0.8438 | 2 | ### Framework versions - Transformers 4.29.2 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Word2vec/polyglot_words_embeddings_pam
Word2vec
2023-05-28T19:12:57Z
0
0
null
[ "word2vec", "pam", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:13Z
--- tags: - word2vec language: pam license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_pol
Word2vec
2023-05-28T19:12:48Z
0
1
null
[ "word2vec", "pl", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:16Z
--- tags: - word2vec language: pl license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_pms
Word2vec
2023-05-28T19:12:38Z
0
0
null
[ "word2vec", "pms", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:27Z
--- tags: - word2vec language: pms license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_que
Word2vec
2023-05-28T19:12:09Z
0
0
null
[ "word2vec", "qu", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:41Z
--- tags: - word2vec language: qu license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_roh
Word2vec
2023-05-28T19:12:00Z
0
0
null
[ "word2vec", "rm", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:06:44Z
--- tags: - word2vec language: rm license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_nep
Word2vec
2023-05-28T19:11:40Z
0
0
null
[ "word2vec", "ne", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:05:36Z
--- tags: - word2vec language: ne license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_sco
Word2vec
2023-05-28T19:06:02Z
0
0
null
[ "word2vec", "sco", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:04Z
--- tags: - word2vec language: sco license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_sme
Word2vec
2023-05-28T19:05:52Z
0
0
null
[ "word2vec", "se", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:07Z
--- tags: - word2vec language: se license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_sin
Word2vec
2023-05-28T19:05:32Z
0
0
null
[ "word2vec", "si", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:18Z
--- tags: - word2vec language: si license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_scn
Word2vec
2023-05-28T19:04:38Z
0
0
null
[ "word2vec", "scn", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:01Z
--- tags: - word2vec language: scn license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_sqi
Word2vec
2023-05-28T19:04:09Z
0
0
null
[ "word2vec", "sq", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:33Z
--- tags: - word2vec language: sq license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_srp
Word2vec
2023-05-28T19:03:53Z
0
0
null
[ "word2vec", "sr", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:38Z
--- tags: - word2vec language: sr license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_sun
Word2vec
2023-05-28T19:03:35Z
0
0
null
[ "word2vec", "su", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:43Z
--- tags: - word2vec language: su license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_swe
Word2vec
2023-05-28T19:03:23Z
0
0
null
[ "word2vec", "sv", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:47Z
--- tags: - word2vec language: sv license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_szl
Word2vec
2023-05-28T19:03:02Z
0
0
null
[ "word2vec", "szl", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:07:55Z
--- tags: - word2vec language: szl license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_tgk
Word2vec
2023-05-28T18:55:01Z
0
0
null
[ "word2vec", "tg", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:08:09Z
--- tags: - word2vec language: tg license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_tuk
Word2vec
2023-05-28T18:54:33Z
0
0
null
[ "word2vec", "tk", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:08:16Z
--- tags: - word2vec language: tk license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_uig
Word2vec
2023-05-28T18:53:51Z
0
0
null
[ "word2vec", "ug", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:08:33Z
--- tags: - word2vec language: ug license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_uzb
Word2vec
2023-05-28T18:53:21Z
0
0
null
[ "word2vec", "uz", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:08:46Z
--- tags: - word2vec language: uz license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_vol
Word2vec
2023-05-28T18:52:13Z
0
0
null
[ "word2vec", "vo", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:09:02Z
--- tags: - word2vec language: vo license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_yor
Word2vec
2023-05-28T18:48:16Z
0
0
null
[ "word2vec", "yo", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:09:15Z
--- tags: - word2vec language: yo license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
scroobiustrip/topic-model-v3
scroobiustrip
2023-05-28T18:31:52Z
7
0
transformers
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-28T16:39:10Z
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: topic-model-v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # topic-model-v3 This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3458 - F1: 0.9015 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.6305 | 1.0 | 11964 | 0.4299 | 0.8788 | | 0.3877 | 2.0 | 23928 | 0.3623 | 0.8953 | | 0.3173 | 3.0 | 35892 | 0.3458 | 0.9015 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
glombardo/misogynistic-statements-restructuring-model
glombardo
2023-05-28T18:22:03Z
5
0
transformers
[ "transformers", "pytorch", "t5", "text2text-generation", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
2023-05-17T19:07:31Z
--- license: apache-2.0 tags: - text2text-generation - generated_from_trainer model-index: - name: misogynistic-statements-restructuring-model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # misogynistic-statements-restructuring-model This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on the glombardo/misogynistic-statements-and-their-potential-restructuring dataset. It achieves the following results on the evaluation set: - Loss: 23.0975 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
adelmomo/q-FrozenLake-v1-no-slippery
adelmomo
2023-05-28T18:00:42Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T18:00:08Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-no-slippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="adelmomo/q-FrozenLake-v1-no-slippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
chieunq/vietnamese-sentence-paraphase-v1
chieunq
2023-05-28T17:52:05Z
109
0
transformers
[ "transformers", "pytorch", "mt5", "text2text-generation", "vi-T5", "paraphase", "sentence-paraphase", "vi", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2023-05-28T17:36:47Z
--- language: - vi tags: - vi-T5 - paraphase - sentence-paraphase --- This is tool for paraphase Vietnamese sentence. ### How to run ``` CKPT = 'chieunq/vietnamese-sentence-paraphase-v1' from transformers import MT5Tokenizer, MT5ForConditionalGeneration tokenizer = MT5Tokenizer.from_pretrained(CKPT) model = MT5ForConditionalGeneration.from_pretrained(CKPT) def paraphase(text): inputs = tokenizer(text, padding='longest', max_length=64, return_tensors='pt') input_ids = inputs.input_ids attention_mask = inputs.attention_mask output = model.generate(input_ids, attention_mask=attention_mask, max_length=64) return tokenizer.decode(output[0], skip_special_tokens=True) texts = ["Làm sao để sống tốt đời đẹp đạo", "Bài học tốt nhất trong cuộc sống là gì?", "Các bước chuẩn bị phỏng vấn tại Google", "Vì sao phụ nữ thường sống thọ hơn đàn ông?", "Nắng nóng bắt đầu xảy ra ở nhiều nơi tại Bắc Bộ và sẽ tiếp tục tăng nhiệt trong vài ngày tới." ] for text in texts: print(f'Input: {text}') print(f'Output: {paraphase(text)}') print('-'*100) ``` ### Output ``` Input: Làm sao để sống tốt đời đẹp đạo Output: Làm thế nào để tôi sống tốt đẹp ---------------------------------------------------------------------------------------------------- Input: Bài học tốt nhất trong cuộc sống là gì? Output: Bài học cuộc sống tốt nhất là gì? ---------------------------------------------------------------------------------------------------- Input: Các bước chuẩn bị phỏng vấn tại Google Output: Các bước chuẩn bị cho cuộc phỏng vấn tại Google là gì? ---------------------------------------------------------------------------------------------------- Input: Vì sao phụ nữ thường sống thọ hơn đàn ông? Output: Tại sao phụ nữ sống thọ hơn đàn ông? ---------------------------------------------------------------------------------------------------- Input: Nắng nóng bắt đầu xảy ra ở nhiều nơi tại Bắc Bộ và sẽ tiếp tục tăng nhiệt trong vài ngày tới. Output: Nắng nóng bắt đầu xảy ra ở Bắc Bộ và sẽ tiếp tục tăng nhiệt trong vài ngày tới. ---------------------------------------------------------------------------------------------------- ```
edata/taxi-v3
edata
2023-05-28T17:44:57Z
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T17:44:50Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.69 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="edata/taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
gugomea/noticias_casificador
gugomea
2023-05-28T17:44:19Z
0
0
fastai
[ "fastai", "region:us" ]
null
2023-05-28T17:44:14Z
--- tags: - fastai --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
damian0815/minigpt4-ff7r
damian0815
2023-05-28T17:40:34Z
0
0
null
[ "license:cc-by-nc-4.0", "region:us" ]
null
2023-05-28T15:53:12Z
--- license: cc-by-nc-4.0 --- MiniGPT-4 checkpoint aligned with @panopstor's FF7R dataset (link in the EveryDream discord). Produces captions that are more useful for training SD datasets that MiniGPT4's default output. Easiest way to use this is to launch a docker instance for [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui), eg `TheBloke/runpod-pytorch-runclick`, follow the instructions for MiniGPT-4 [here](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal). For now you'll need to manually edit `minigpt_pipeline.py` ([this line](`https://github.com/Wojtab/minigpt-4-pipeline/blob/16eda85c4bb15e2b1b05b20c55907a8ea2c06764/minigpt4_pipeline.py#L52) to point to [the .pth file in this repo](minigpt4-align-ff7r.pth) instead of the default. ## Dataset adapted from the @panopstor's FF7R dataset - [zip here](cc_sbu_align_ff7r.zip) ## Sample output: ![](examples/1.png) ![](examples/2.png) ![](examples/3.png)
Dr-BERT/CAS-Biomedical-POS-Tagging
Dr-BERT
2023-05-28T17:38:50Z
104
5
transformers
[ "transformers", "pytorch", "camembert", "token-classification", "medical", "fr", "dataset:bigbio/cas", "arxiv:2304.00958", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-04-05T06:19:37Z
--- license: apache-2.0 datasets: - bigbio/cas language: - fr metrics: - f1 library_name: transformers tags: - medical widget: - text: Patiente atteinte d’une pathologie chronique - text: Vous êtes amené à prendre en charge un homme de 54 ans qui souffre d’une spondylarthrite ankylosante sévère. --- <p align="center"> <img src="https://github.com/qanastek/DrBERT/blob/main/assets/logo.png?raw=true" alt="drawing" width="250"/> </p> - Corpora: [bigbio/cas](https://huggingface.co/datasets/bigbio/cas) - Embeddings & Sequence Labelling: [DrBERT-7GB](https://arxiv.org/abs/2304.00958) - Number of Epochs: 200 # DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains In recent years, pre-trained language models (PLMs) achieve the best performance on a wide range of natural language processing (NLP) tasks. While the first models were trained on general domain data, specialized ones have emerged to more effectively treat specific domains. In this paper, we propose an original study of PLMs in the medical domain on French language. We compare, for the first time, the performance of PLMs trained on both public data from the web and private data from healthcare establishments. We also evaluate different learning strategies on a set of biomedical tasks. Finally, we release the first specialized PLMs for the biomedical field in French, called DrBERT, as well as the largest corpus of medical data under free license on which these models are trained. # CAS: French Corpus with Clinical Cases | | Train | Dev | Test | |:---------:|:-----:|:-----:|:-----:| | Documents | 5,306 | 1,137 | 1,137 | The ESSAIS (Dalloux et al., 2021) and CAS (Grabar et al., 2018) corpora respectively contain 13,848 and 7,580 clinical cases in French. Some clinical cases are associated with discussions. A subset of the whole set of cases is enriched with morpho-syntactic (part-of-speech (POS) tagging, lemmatization) and semantic (UMLS concepts, negation, uncertainty) annotations. In our case, we focus only on the POS tagging task. # Model Metric ```plain precision recall f1-score support ABR 0.8683 0.8480 0.8580 171 ADJ 0.9634 0.9751 0.9692 4018 ADV 0.9935 0.9849 0.9892 926 DET:ART 0.9982 0.9997 0.9989 3308 DET:POS 1.0000 1.0000 1.0000 133 INT 1.0000 0.7000 0.8235 10 KON 0.9883 0.9976 0.9929 845 NAM 0.9144 0.9353 0.9247 834 NOM 0.9827 0.9803 0.9815 7980 NUM 0.9825 0.9845 0.9835 1422 PRO:DEM 0.9924 1.0000 0.9962 131 PRO:IND 0.9630 1.0000 0.9811 78 PRO:PER 0.9948 0.9931 0.9939 579 PRO:REL 1.0000 0.9908 0.9954 109 PRP 0.9989 0.9982 0.9985 3785 PRP:det 1.0000 0.9985 0.9993 681 PUN 0.9996 0.9958 0.9977 2376 PUN:cit 0.9756 0.9524 0.9639 84 SENT 1.0000 0.9974 0.9987 1174 SYM 0.9495 1.0000 0.9741 94 VER:cond 1.0000 1.0000 1.0000 11 VER:futu 1.0000 0.9444 0.9714 18 VER:impf 1.0000 0.9963 0.9981 804 VER:infi 1.0000 0.9585 0.9788 193 VER:pper 0.9742 0.9564 0.9652 1261 VER:ppre 0.9617 0.9901 0.9757 203 VER:pres 0.9833 0.9904 0.9868 830 VER:simp 0.9123 0.7761 0.8387 67 VER:subi 1.0000 0.7000 0.8235 10 VER:subp 1.0000 0.8333 0.9091 18 accuracy 0.9842 32153 macro avg 0.9799 0.9492 0.9623 32153 weighted avg 0.9843 0.9842 0.9842 32153 ``` # Citation BibTeX ```bibtex @inproceedings{labrak2023drbert, title = {{DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains}}, author = {Labrak, Yanis and Bazoge, Adrien and Dufour, Richard and Rouvier, Mickael and Morin, Emmanuel and Daille, Béatrice and Gourraud, Pierre-Antoine}, booktitle = {Proceedings of the 61th Annual Meeting of the Association for Computational Linguistics (ACL'23), Long Paper}, month = july, year = 2023, address = {Toronto, Canada}, publisher = {Association for Computational Linguistics} } ```
Dr-BERT/DrBERT-7GB-Large
Dr-BERT
2023-05-28T17:38:14Z
243
2
transformers
[ "transformers", "pytorch", "camembert", "fill-mask", "medical", "chemistry", "biomedical", "life science", "fr", "dataset:Dr-BERT/NACHOS", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2023-04-09T13:59:08Z
--- license: apache-2.0 datasets: - Dr-BERT/NACHOS language: - fr library_name: transformers tags: - medical - chemistry - biomedical - life science widget: - text: "Le patient est atteint d'une <mask>." --- <p align="center"> <img src="https://github.com/qanastek/DrBERT/blob/main/assets/logo.png?raw=true" alt="drawing" width="250"/> </p> # DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains In recent years, pre-trained language models (PLMs) achieve the best performance on a wide range of natural language processing (NLP) tasks. While the first models were trained on general domain data, specialized ones have emerged to more effectively treat specific domains. In this paper, we propose an original study of PLMs in the medical domain on French language. We compare, for the first time, the performance of PLMs trained on both public data from the web and private data from healthcare establishments. We also evaluate different learning strategies on a set of biomedical tasks. Finally, we release the first specialized PLMs for the biomedical field in French, called DrBERT, as well as the largest corpus of medical data under free license on which these models are trained. # 1. DrBERT models **DrBERT** is a French RoBERTa trained on a open source corpus of French medical crawled textual data called NACHOS. Models with different amount of data from differents public and private sources are trained using the CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/jean-zay/) French supercomputer. Only the weights of the models trained using exclusively open-sources data are publicly released to prevent any personnal information leak and to follow the european GDPR laws : | Model name | Corpus | Number of layers | Attention Heads | Embedding Dimension | Sequence Length | Model URL | | :------: | :---: | :---: | :---: | :---: | :---: | :---: | | `DrBERT-7-GB-cased-Large` | NACHOS 7 GB | 24 | 16 | 1024 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-7GB-Large) | | `DrBERT-7-GB-cased` | NACHOS 7 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-7GB) | | `DrBERT-4-GB-cased` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB) | | `DrBERT-4-GB-cased-CP-CamemBERT` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-CamemBERT) | | `DrBERT-4-GB-cased-CP-PubMedBERT` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-PubMedBERT) | # 2. Using DrBERT You can use DrBERT with [Hugging Face's Transformers library](https://github.com/huggingface/transformers) as follow. Loading the model and tokenizer : ```python from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Dr-BERT/DrBERT-7GB-Large") model = AutoModel.from_pretrained("Dr-BERT/DrBERT-7GB-Large") ``` Perform the mask filling task : ```python from transformers import pipeline fill_mask = pipeline("fill-mask", model="Dr-BERT/DrBERT-7GB-Large", tokenizer="Dr-BERT/DrBERT-7GB-Large") results = fill_mask("La patiente est atteinte d'une <mask>") ``` # 3. Pre-training DrBERT tokenizer and model from scratch by using HuggingFace Transformers Library ## 3.1 Install dependencies ```bash accelerate @ git+https://github.com/huggingface/accelerate@66edfe103a0de9607f9b9fdcf6a8e2132486d99b datasets==2.6.1 sentencepiece==0.1.97 protobuf==3.20.1 evaluate==0.2.2 tensorboard==2.11.0 torch >= 1.3 ``` ## 3.2 Download NACHOS Dataset text file Download the full NACHOS dataset from [Zenodo]() and place it the the `from_scratch` or `continued_pretraining` directory. ## 3.3 Build your own tokenizer from scratch based on NACHOS Note : This step is required only in the case of an from scratch pre-training, if you want to do a continued pre-training you just have to download the model and the tokenizer that correspond to the model you want to continue the training from. In this case, you simply have to go to the HuggingFace Hub, select a model (for example [RoBERTa-base](https://huggingface.co/roberta-base)). Finally, you have to download the entire model / tokenizer repository by clicking on the `Use In Transformers` button and get the Git link `git clone https://huggingface.co/roberta-base`. Build the tokenizer from scratch on your data of the file `./corpus.txt` by using `./build_tokenizer.sh`. ## 3.4 Preprocessing and tokenization of the dataset First, replace the field `tokenizer_path` of the shell script to match the path of your tokenizer directory downloaded before using HuggingFace Git or the one you have build. Run `./preprocessing_dataset.sh` to generate the tokenized dataset by using the givent tokenizer. ## 3.5 Model training First, change the number of GPUs `--ntasks=128` you are needing to match your computational capabilities in the shell script called `run_training.sh`. In our case, we used 128 V100 32 GB GPUs from 32 nodes of 4 GPUs (`--ntasks-per-node=4` and `--gres=gpu:4`) during 20 hours (`--time=20:00:00`). If you are using Jean Zay, you also need to change the `-A` flag to match one of your `@gpu` profile capable of running the job. You also need to move **ALL** of your datasets, tokenizer, script and outputs on the `$SCRATCH` disk space to preserve others users of suffuring of IO issues. ### 3.5.1 Pre-training from scratch Once the SLURM parameters updated, you have to change name of the model architecture in the flag `--model_type="camembert"` and to update the `--config_overrides=` according to the specifications of the architecture you are trying to train. In our case, RoBERTa had a `514` sequence length, a vocabulary of `32005` (32K tokens of the tokenizer and 5 of the model architecture) tokens, the identifier of the beginning-of-sentence token (BOS) and end-of-sentence token (EOS) are respectivly `5` and `6`. Change the Then, go to `./from_scratch/` directory. Run `sbatch ./run_training.sh` to send the training job in the SLURM queue. ### 3.5.2 continue pre-training Once the SLURM parameters updated, you have to change path of the model / tokenizer you want to start from `--model_name_or_path=` / `--tokenizer_name=` to the path of the model downloaded from HuggingFace's Git in the section 3.3. Then, go to `./continued_pretraining/` directory. Run `sbatch ./run_training.sh` to send the training job in the SLURM queue. # 4. Fine-tuning on a downstream task You just need to change the name of the model to `Dr-BERT/DrBERT-7GB` in any of the examples given by HuggingFace's team [here](https://huggingface.co/docs/transformers/tasks/sequence_classification). # Citation BibTeX ```bibtex @inproceedings{labrak2023drbert, title = {{DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains}}, author = {Labrak, Yanis and Bazoge, Adrien and Dufour, Richard and Rouvier, Mickael and Morin, Emmanuel and Daille, Béatrice and Gourraud, Pierre-Antoine}, booktitle = {Proceedings of the 61th Annual Meeting of the Association for Computational Linguistics (ACL'23), Long Paper}, month = july, year = 2023, address = {Toronto, Canada}, publisher = {Association for Computational Linguistics} } ```
Dr-BERT/DrBERT-4GB
Dr-BERT
2023-05-28T17:38:07Z
4
1
transformers
[ "transformers", "pytorch", "tensorboard", "camembert", "fill-mask", "medical", "chemistry", "biomedical", "life science", "fr", "dataset:Dr-BERT/NACHOS", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2022-12-25T19:21:28Z
--- license: apache-2.0 datasets: - Dr-BERT/NACHOS language: - fr library_name: transformers tags: - medical - chemistry - biomedical - life science --- <p align="center"> <img src="https://github.com/qanastek/DrBERT/blob/main/assets/logo.png?raw=true" alt="drawing" width="250"/> </p> # DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains In recent years, pre-trained language models (PLMs) achieve the best performance on a wide range of natural language processing (NLP) tasks. While the first models were trained on general domain data, specialized ones have emerged to more effectively treat specific domains. In this paper, we propose an original study of PLMs in the medical domain on French language. We compare, for the first time, the performance of PLMs trained on both public data from the web and private data from healthcare establishments. We also evaluate different learning strategies on a set of biomedical tasks. Finally, we release the first specialized PLMs for the biomedical field in French, called DrBERT, as well as the largest corpus of medical data under free license on which these models are trained. # 1. DrBERT models **DrBERT** is a French RoBERTa trained on a open source corpus of French medical crawled textual data called NACHOS. Models with different amount of data from differents public and private sources are trained using the CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/jean-zay/) French supercomputer. Only the weights of the models trained using exclusively open-sources data are publicly released to prevent any personnal information leak and to follow the european GDPR laws : | Model name | Corpus | Number of layers | Attention Heads | Embedding Dimension | Sequence Length | Model URL | | :------: | :---: | :---: | :---: | :---: | :---: | :---: | | `DrBERT-7-GB-cased-Large` | NACHOS 7 GB | 24 | 16 | 1024 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-7GB-Large) | | `DrBERT-7-GB-cased` | NACHOS 7 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-7GB) | | `DrBERT-4-GB-cased` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB) | | `DrBERT-4-GB-cased-CP-CamemBERT` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-CamemBERT) | | `DrBERT-4-GB-cased-CP-PubMedBERT` | NACHOS 4 GB | 12 | 12 | 768 | 512 | [HuggingFace](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-PubMedBERT) | # 2. Using DrBERT You can use DrBERT with [Hugging Face's Transformers library](https://github.com/huggingface/transformers) as follow. Loading the model and tokenizer : ```python from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Dr-BERT/DrBERT-7GB") model = AutoModel.from_pretrained("Dr-BERT/DrBERT-7GB") ``` Perform the mask filling task : ```python from transformers import pipeline fill_mask = pipeline("fill-mask", model="Dr-BERT/DrBERT-7GB", tokenizer="Dr-BERT/DrBERT-7GB") results = fill_mask("La patiente est atteinte d'une <mask>") ``` # 3. Pre-training DrBERT tokenizer and model from scratch by using HuggingFace Transformers Library ## 3.1 Install dependencies ```bash accelerate @ git+https://github.com/huggingface/accelerate@66edfe103a0de9607f9b9fdcf6a8e2132486d99b datasets==2.6.1 sentencepiece==0.1.97 protobuf==3.20.1 evaluate==0.2.2 tensorboard==2.11.0 torch >= 1.3 ``` ## 3.2 Download NACHOS Dataset text file Download the full NACHOS dataset from [Zenodo]() and place it the the `from_scratch` or `continued_pretraining` directory. ## 3.3 Build your own tokenizer from scratch based on NACHOS Note : This step is required only in the case of an from scratch pre-training, if you want to do a continued pre-training you just have to download the model and the tokenizer that correspond to the model you want to continue the training from. In this case, you simply have to go to the HuggingFace Hub, select a model (for example [RoBERTa-base](https://huggingface.co/roberta-base)). Finally, you have to download the entire model / tokenizer repository by clicking on the `Use In Transformers` button and get the Git link `git clone https://huggingface.co/roberta-base`. Build the tokenizer from scratch on your data of the file `./corpus.txt` by using `./build_tokenizer.sh`. ## 3.4 Preprocessing and tokenization of the dataset First, replace the field `tokenizer_path` of the shell script to match the path of your tokenizer directory downloaded before using HuggingFace Git or the one you have build. Run `./preprocessing_dataset.sh` to generate the tokenized dataset by using the givent tokenizer. ## 3.5 Model training First, change the number of GPUs `--ntasks=128` you are needing to match your computational capabilities in the shell script called `run_training.sh`. In our case, we used 128 V100 32 GB GPUs from 32 nodes of 4 GPUs (`--ntasks-per-node=4` and `--gres=gpu:4`) during 20 hours (`--time=20:00:00`). If you are using Jean Zay, you also need to change the `-A` flag to match one of your `@gpu` profile capable of running the job. You also need to move **ALL** of your datasets, tokenizer, script and outputs on the `$SCRATCH` disk space to preserve others users of suffuring of IO issues. ### 3.5.1 Pre-training from scratch Once the SLURM parameters updated, you have to change name of the model architecture in the flag `--model_type="camembert"` and to update the `--config_overrides=` according to the specifications of the architecture you are trying to train. In our case, RoBERTa had a `514` sequence length, a vocabulary of `32005` (32K tokens of the tokenizer and 5 of the model architecture) tokens, the identifier of the beginning-of-sentence token (BOS) and end-of-sentence token (EOS) are respectivly `5` and `6`. Change the Then, go to `./from_scratch/` directory. Run `sbatch ./run_training.sh` to send the training job in the SLURM queue. ### 3.5.2 continue pre-training Once the SLURM parameters updated, you have to change path of the model / tokenizer you want to start from `--model_name_or_path=` / `--tokenizer_name=` to the path of the model downloaded from HuggingFace's Git in the section 3.3. Then, go to `./continued_pretraining/` directory. Run `sbatch ./run_training.sh` to send the training job in the SLURM queue. # 4. Fine-tuning on a downstream task You just need to change the name of the model to `Dr-BERT/DrBERT-7GB` in any of the examples given by HuggingFace's team [here](https://huggingface.co/docs/transformers/tasks/sequence_classification). # Citation BibTeX ```bibtex @inproceedings{labrak2023drbert, title = {{DrBERT: A Robust Pre-trained Model in French for Biomedical and Clinical domains}}, author = {Labrak, Yanis and Bazoge, Adrien and Dufour, Richard and Rouvier, Mickael and Morin, Emmanuel and Daille, Béatrice and Gourraud, Pierre-Antoine}, booktitle = {Proceedings of the 61th Annual Meeting of the Association for Computational Linguistics (ACL'23), Long Paper}, month = july, year = 2023, address = {Toronto, Canada}, publisher = {Association for Computational Linguistics} } ```
edata/q-FrozenLake-v1-4x4-noSlippery
edata
2023-05-28T17:37:17Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T17:37:12Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="edata/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
platzi/platzi-vit-model-joel-orellana
platzi
2023-05-28T17:34:34Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "generated_from_trainer", "dataset:beans", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-05-28T17:18:39Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - beans metrics: - accuracy model-index: - name: platzi-vit-model-joel-orellana results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans config: default split: validation args: default metrics: - name: Accuracy type: accuracy value: 0.9924812030075187 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-joel-orellana This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0283 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1313 | 3.85 | 500 | 0.0283 | 0.9925 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
ChristianMDahl/segFormer_ver1_horizontal
ChristianMDahl
2023-05-28T17:31:58Z
31
0
transformers
[ "transformers", "tf", "segformer", "generated_from_keras_callback", "license:other", "endpoints_compatible", "region:us" ]
null
2023-05-28T07:00:34Z
--- license: other tags: - generated_from_keras_callback model-index: - name: ChristianMDahl/segFormer_ver1_horizontal results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ChristianMDahl/segFormer_ver1_horizontal This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1767 - Validation Loss: 0.1918 - Epoch: 19 ## Model description Model for **horizontal** lines ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 6e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 0.3302 | 0.2673 | 0 | | 0.2515 | 0.2329 | 1 | | 0.2335 | 0.2197 | 2 | | 0.2226 | 0.2125 | 3 | | 0.2153 | 0.2083 | 4 | | 0.2105 | 0.2039 | 5 | | 0.2061 | 0.2023 | 6 | | 0.2025 | 0.2013 | 7 | | 0.1995 | 0.2015 | 8 | | 0.1960 | 0.1976 | 9 | | 0.1938 | 0.1966 | 10 | | 0.1909 | 0.1973 | 11 | | 0.1882 | 0.1936 | 12 | | 0.1865 | 0.1951 | 13 | | 0.1845 | 0.1942 | 14 | | 0.1826 | 0.1953 | 15 | | 0.1810 | 0.1934 | 16 | | 0.1794 | 0.1928 | 17 | | 0.1782 | 0.1919 | 18 | | 0.1767 | 0.1918 | 19 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.10.1 - Datasets 2.12.0 - Tokenizers 0.13.0.dev0
Seogmin/my_awesome_model
Seogmin
2023-05-28T17:21:17Z
105
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-28T17:18:13Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy model-index: - name: my_awesome_model results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 1.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.0021 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 63 | 0.0032 | 1.0 | | No log | 2.0 | 126 | 0.0021 | 1.0 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
dimi1357/poca-SoccerTwos
dimi1357
2023-05-28T17:20:46Z
0
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "unity-ml-agents", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-SoccerTwos", "region:us" ]
reinforcement-learning
2023-05-28T17:20:41Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: dimi1357/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Word2vec/polyglot_words_embeddings_afr
Word2vec
2023-05-28T17:13:58Z
0
0
null
[ "word2vec", "af", "license:gpl-3.0", "region:us" ]
null
2023-05-19T21:59:34Z
--- tags: - word2vec language: af license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
Word2vec/polyglot_words_embeddings_zhc
Word2vec
2023-05-28T17:03:30Z
0
0
null
[ "word2vec", "zhc", "license:gpl-3.0", "region:us" ]
null
2023-05-19T22:09:23Z
--- tags: - word2vec language: zhc license: gpl-3.0 --- ## Description Word embedding model trained by Al-Rfou et al. ## How to use? ``` import pickle from numpy import dot from numpy.linalg import norm from huggingface_hub import hf_hub_download words, embeddings = pickle.load(open(hf_hub_download(repo_id="Word2vec/polyglot_words_embeddings_en", filename="words_embeddings_en.pkl"), 'rb'),encoding="latin1") word = "Irish" a = embeddings[words.index(word)] most_similar = [] for i in range(len(embeddings)): if i != words.index(word): b = embeddings[i] cos_sim = dot(a, b)/(norm(a)*norm(b)) most_similar.append(cos_sim) else: most_similar.append(0) words[most_similar.index(max(most_similar))] ``` ## Citation ``` @InProceedings{polyglot:2013:ACL-CoNLL, author = {Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven}, title = {Polyglot: Distributed Word Representations for Multilingual NLP}, booktitle = {Proceedings of the Seventeenth Conference on Computational Natural Language Learning}, month = {August}, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, pages = {183--192}, url = {http://www.aclweb.org/anthology/W13-3520} } ```
thackerhelik/Reinforce-Cartpole-v1
thackerhelik
2023-05-28T16:56:26Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T16:56:16Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
arturboyun/finetuning-sentiment-model-3000-samples
arturboyun
2023-05-28T16:51:35Z
107
0
transformers
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-28T16:02:30Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8833333333333333 - name: F1 type: f1 value: 0.8837209302325582 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2907 - Accuracy: 0.8833 - F1: 0.8837 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
smartik/mbart-large-50-finetuned-ua-gec-2.1
smartik
2023-05-28T16:50:37Z
3
0
transformers
[ "transformers", "pytorch", "tensorboard", "mbart", "text2text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2023-05-28T15:12:13Z
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: mbart-large-50-finetuned-ua-gec-2.1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-50-finetuned-ua-gec-2.1 This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5376 - Rouge1: 18.2963 - Rouge2: 10.2365 - Rougel: 18.2593 - Rougelsum: 18.2759 - Gen Len: 28.6107 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 0.0765 | 1.0 | 1010 | 0.4070 | 18.2963 | 10.2365 | 18.2593 | 18.2759 | 28.522 | | 0.046 | 2.0 | 2020 | 0.4710 | 18.2963 | 10.2365 | 18.2593 | 18.2759 | 28.578 | | 0.0291 | 3.0 | 3030 | 0.4885 | 18.2833 | 10.2052 | 18.2454 | 18.263 | 28.5793 | | 0.0188 | 4.0 | 4040 | 0.5145 | 18.2963 | 10.2365 | 18.2593 | 18.2759 | 28.6127 | | 0.0117 | 5.0 | 5050 | 0.5376 | 18.2963 | 10.2365 | 18.2593 | 18.2759 | 28.6107 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
dimi1357/rl_course_vizdoom_health_gathering_supreme
dimi1357
2023-05-28T16:47:22Z
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T16:36:32Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 11.18 +/- 5.56 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r dimi1357/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Abhilashvj/CIRCL_website_classifier_test
Abhilashvj
2023-05-28T16:44:04Z
52
1
transformers
[ "transformers", "pytorch", "resnet", "image-classification", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-05-28T16:16:41Z
--- license: apache-2.0 pipeline_tag: image-classification metrics: - accuracy - f1 --- # Model Card for Model ID <!-- This model can be used to classify website screenshots. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Word2vec/fauconnier_frWac_non_lem_no_postag_no_phrase_500_skip_cut200
Word2vec
2023-05-28T16:42:43Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T20:30:13Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: no, pos: no, phrase: no, train: skip, dim: 500, cutoff: 200 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWac_no_postag_no_phrase_700_skip_cut50
Word2vec
2023-05-28T16:39:59Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T20:25:03Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: yes, pos: no, phrase: no, train: skip, dim: 700, cutoff: 50 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWac_no_postag_no_phrase_500_cbow_cut100
Word2vec
2023-05-28T16:39:32Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T20:23:23Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: yes, pos: no, phrase: no, train: cbow, dim: 500, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWac_no_postag_no_phrase_500_skip_cut100
Word2vec
2023-05-28T16:38:57Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T20:24:07Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: yes, pos: no, phrase: no, train: skip, dim: 500, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
asenella/mmnist_MMVAEPlusconfig2_seed_3_ratio_05_c
asenella
2023-05-28T16:38:07Z
0
0
null
[ "multivae", "en", "license:apache-2.0", "region:us" ]
null
2023-05-25T09:40:14Z
--- language: en tags: - multivae license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from multivae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name") ```
Word2vec/fauconnier_frWac_non_lem_no_postag_no_phrase_200_cbow_cut0
Word2vec
2023-05-28T16:38:06Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T20:19:42Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: no, pos: no, phrase: no, train: cbow, dim: 200, cutoff: 0 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWac_non_lem_no_postag_no_phrase_200_skip_cut100
Word2vec
2023-05-28T16:33:16Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T19:14:54Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: no, pos: no, phrase: no, train: skip, dim: 200, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWac_non_lem_no_postag_no_phrase_200_cbow_cut100
Word2vec
2023-05-28T16:30:53Z
0
0
null
[ "word2vec", "fr", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T19:14:01Z
--- tags: - word2vec language: fr license: cc-by-3.0 --- ### Description A French word2vec model trained on [FrWac](https://wacky.sslmit.unibo.it/doku.php?id=corpora) by Fauconnier with the following hyperparameters: lem: no, pos: no, phrase: no, train: cbow, dim: 200, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
ImPavloh/Streamers-AI-Voices
ImPavloh
2023-05-28T16:29:58Z
0
0
null
[ "music", "audio-to-audio", "license:other", "region:us" ]
audio-to-audio
2023-05-28T16:16:20Z
--- license: other pipeline_tag: audio-to-audio tags: - music --- # Modelos de Streamers utilizados en [VoiceIt](https://huggingface.co/spaces/ImPavloh/voiceit) Estos modelos de Streamers son utilizados en [VoiceIt](https://voiceit.pavloh.com), una plataforma desarrollada por [Pavloh](https://twitter.com/ImPavloh) que permite la transformación de voz a voz. # Tecnología empleada Este proyecto ha sido creado utilizando SoftVC VITS Singing Voice Conversion (versión 4.0), una tecnología de vanguardia para la conversión de voz cantada. Todos los modelos de este repositorio han sido creados por mi, hay mejores modelos en [este repositorio](https://huggingface.co/QuickWick/Music-AI-Voices) creados por otra gente. Si necesitas más información o tienes alguna duda, no dudes en ponerte en contacto conmigo [Pavloh](https://twitter.com/ImPavloh). ## ⚠️ Importante | Las voces generadas no deben tener derechos de autor.
Word2vec/fauconnier_frWiki_no_lem_no_postag_no_phrase_1000_cbow_cut100
Word2vec
2023-05-28T16:24:11Z
0
0
null
[ "word2vec", "fr", "dataset:wikipedia", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T17:41:24Z
--- tags: - word2vec language: fr license: cc-by-3.0 datasets: - wikipedia --- ### Description A French word2vec model trained on [frwiki](https://dumps.wikimedia.org/frwiki/) by Fauconnier with the following hyperparameters: lem: no, pos: no, phrase: no, train: cbow, dim: 1000, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
asenella/mmnist_MMVAEPlusconfig2_seed_2_ratio_05_c
asenella
2023-05-28T16:23:55Z
0
0
null
[ "multivae", "en", "license:apache-2.0", "region:us" ]
null
2023-05-25T09:34:35Z
--- language: en tags: - multivae license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from multivae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name") ```
Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut100
Word2vec
2023-05-28T16:23:29Z
0
0
null
[ "word2vec", "fr", "dataset:wikipedia", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T17:29:24Z
--- tags: - word2vec language: fr license: cc-by-3.0 datasets: - wikipedia --- ### Description A French word2vec model trained on [frwiki](https://dumps.wikimedia.org/frwiki/) by Fauconnier with the following hyperparameters: lem: yes, pos: no, phrase: no, train: skip, dim: 1000, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWiki_no_lem_no_postag_no_phrase_1000_skip_cut100
Word2vec
2023-05-28T16:22:42Z
0
0
null
[ "word2vec", "fr", "dataset:wikipedia", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T17:34:29Z
--- tags: - word2vec language: fr license: cc-by-3.0 datasets: - wikipedia --- ### Description A French word2vec model trained on [frwiki](https://dumps.wikimedia.org/frwiki/) by Fauconnier with the following hyperparameters: lem: np, pos: no, phrase: no, train: skip, dim: 1000, cutoff: 100 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200
Word2vec
2023-05-28T16:19:32Z
0
0
null
[ "word2vec", "fr", "dataset:wikipedia", "license:cc-by-3.0", "region:us" ]
null
2023-05-16T17:28:26Z
--- tags: - word2vec language: fr license: cc-by-3.0 datasets: - wikipedia --- ### Description A French word2vec model trained on [frwiki](https://dumps.wikimedia.org/frwiki/) by Fauconnier with the following hyperparameters: lem: yes, pos: no, phrase: no, train: skip, dim: 1000, cutoff: 200 ### How to use? ``` from gensim.models import KeyedVectors from huggingface_hub import hf_hub_download model = KeyedVectors.load_word2vec_format("Word2vec/fauconnier_frWiki_no_phrase_no_postag_1000_skip_cut200.bin", binary=True, unicode_errors="ignore") model.most_similar("intéressant_a") ``` ### Citation ``` @misc{fauconnier_2015, author = {Fauconnier, Jean-Philippe}, title = {French Word Embeddings}, url = {http://fauconnier.github.io}, year = {2015}} ```
Rock910/Reinforce-CartPole-v1
Rock910
2023-05-28T16:06:57Z
0
0
null
[ "CartPole-v1", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
reinforcement-learning
2023-05-27T21:03:46Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Yhyu13/baize-v2-13b-gptq-4bit
Yhyu13
2023-05-28T15:54:40Z
5
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-05-27T15:51:08Z
--- license: apache-2.0 --- GPTQ 4-bit no actor version for compatibility that works in textgen-webui Generated by using scripts from https://gitee.com/yhyu13/llama_-tools Original weight : https://huggingface.co/project-baize/baize-v2-7b Baize is a lora training framework that allows fine-tuning LLaMA models on commondity GPUs. Checkout my 7B baize gptq 4bit here : https://huggingface.co/Yhyu13/baize-v2-7b-gptq-4bit
YakovElm/Hyperledger15Classic_512
YakovElm
2023-05-28T15:51:00Z
61
0
transformers
[ "transformers", "tf", "bert", "text-classification", "generated_from_keras_callback", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-28T15:50:25Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: Hyperledger15Classic_512 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Hyperledger15Classic_512 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2806 - Train Accuracy: 0.9035 - Validation Loss: 0.3198 - Validation Accuracy: 0.8807 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': 1.0, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 3e-05, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.3217 | 0.8952 | 0.3253 | 0.8807 | 0 | | 0.2967 | 0.9035 | 0.3233 | 0.8807 | 1 | | 0.2806 | 0.9035 | 0.3198 | 0.8807 | 2 | ### Framework versions - Transformers 4.29.2 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
dimi1357/LunarLander-v2
dimi1357
2023-05-28T15:36:18Z
0
0
null
[ "tensorboard", "LunarLander-v2", "ppo", "deep-reinforcement-learning", "reinforcement-learning", "custom-implementation", "deep-rl-course", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T15:30:58Z
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -232.16 +/- 167.18 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters
casque/eimifukada
casque
2023-05-28T15:28:16Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-05-28T15:26:45Z
--- license: creativeml-openrail-m ---
maharishiva/ppo_v2-LunarLander-v2
maharishiva
2023-05-28T15:26:59Z
4
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T15:26:42Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.01 +/- 22.09 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
danorel/q-FrozenLake-v1-4x4-noSlippery
danorel
2023-05-28T15:16:27Z
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
2023-05-28T15:16:25Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="danorel/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Disnutius/Elsex
Disnutius
2023-05-28T15:15:06Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-05-28T15:13:48Z
--- license: creativeml-openrail-m ---
Kurapika993/Toxic_classifier_bert
Kurapika993
2023-05-28T15:11:36Z
4
0
transformers
[ "transformers", "bert", "fill-mask", "text-classification", "en", "dataset:jigsaw_toxicity_pred", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-05-17T18:49:19Z
--- license: apache-2.0 datasets: - jigsaw_toxicity_pred metrics: - accuracy pipeline_tag: text-classification language: - en library_name: transformers ---
MocktaiLEngineer/mt5-small-finetuned-QMSum-01
MocktaiLEngineer
2023-05-28T15:00:06Z
8
0
transformers
[ "transformers", "pytorch", "tensorboard", "mt5", "text2text-generation", "summarization", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
2023-05-28T12:47:21Z
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-QMSum-01 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-QMSum-01 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.8022 - Rouge1: 18.0578 - Rouge2: 4.3867 - Rougel: 14.448 - Rougelsum: 16.1248 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 5.8153 | 1.0 | 548 | 3.0610 | 11.8231 | 3.1478 | 9.9085 | 10.7642 | | 3.5393 | 2.0 | 1096 | 2.9173 | 16.1634 | 4.0585 | 13.0541 | 14.6676 | | 3.2879 | 3.0 | 1644 | 2.8507 | 16.6082 | 4.1563 | 13.4818 | 14.9447 | | 3.163 | 4.0 | 2192 | 2.8268 | 16.9681 | 4.1602 | 13.7462 | 15.0741 | | 3.0699 | 5.0 | 2740 | 2.8256 | 17.8647 | 4.5317 | 14.4077 | 15.8516 | | 3.0156 | 6.0 | 3288 | 2.8175 | 17.7178 | 4.3329 | 14.3377 | 15.8622 | | 2.9692 | 7.0 | 3836 | 2.7987 | 18.3523 | 4.6726 | 14.6873 | 16.413 | | 2.9531 | 8.0 | 4384 | 2.8022 | 18.0578 | 4.3867 | 14.448 | 16.1248 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
KHEW/MALA
KHEW
2023-05-28T14:53:01Z
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
2023-05-28T14:50:31Z
--- license: creativeml-openrail-m ---
thanut/skin
thanut
2023-05-28T14:47:09Z
0
0
adapter-transformers
[ "adapter-transformers", "license:afl-3.0", "region:us" ]
null
2023-05-28T14:39:36Z
--- license: afl-3.0 metrics: - accuracy library_name: adapter-transformers ---
amjadfqs/swin-base-patch4-window7-224-in22k-finetuned-brain-tumor-final_10
amjadfqs
2023-05-28T14:37:25Z
4
0
transformers
[ "transformers", "pytorch", "tensorboard", "swin", "image-classification", "generated_from_trainer", "dataset:imagefolder", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-05-28T05:43:48Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy - precision model-index: - name: swin-base-patch4-window7-224-in22k-finetuned-brain-tumor-final_10 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9375490966221524 - name: Precision type: precision value: 0.9451238954076366 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-in22k-finetuned-brain-tumor-final_10 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2175 - Accuracy: 0.9375 - F1 Score: 0.9383 - Precision: 0.9451 - Sensitivity: 0.9381 - Specificity: 0.9843 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 100 - eval_batch_size: 100 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 400 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Score | Precision | Sensitivity | Specificity | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|:---------:|:-----------:|:-----------:| | 1.3428 | 0.99 | 19 | 0.7059 | 0.7467 | 0.7535 | 0.7951 | 0.7464 | 0.9332 | | 0.3308 | 1.97 | 38 | 0.2314 | 0.9183 | 0.9194 | 0.9239 | 0.9191 | 0.9792 | | 0.1601 | 2.96 | 57 | 0.2024 | 0.9305 | 0.9314 | 0.9349 | 0.9306 | 0.9824 | | 0.0976 | 4.0 | 77 | 0.3376 | 0.8904 | 0.8943 | 0.9126 | 0.8930 | 0.9724 | | 0.0585 | 4.99 | 96 | 0.3893 | 0.8830 | 0.8853 | 0.9115 | 0.8854 | 0.9706 | | 0.0432 | 5.97 | 115 | 0.2559 | 0.9214 | 0.9239 | 0.9330 | 0.9237 | 0.9802 | | 0.0313 | 6.96 | 134 | 0.2175 | 0.9375 | 0.9383 | 0.9451 | 0.9381 | 0.9843 | | 0.0176 | 8.0 | 154 | 0.2309 | 0.9313 | 0.9326 | 0.9386 | 0.9320 | 0.9827 | | 0.0152 | 8.99 | 173 | 0.2358 | 0.9328 | 0.9339 | 0.9416 | 0.9336 | 0.9831 | | 0.0089 | 9.87 | 190 | 0.2116 | 0.9360 | 0.9374 | 0.9437 | 0.9372 | 0.9839 | ### Framework versions - Transformers 4.29.2 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
BayesBayes/codeparrot-ds
BayesBayes
2023-05-28T14:33:56Z
4
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2023-05-26T22:22:53Z
--- license: mit tags: - generated_from_trainer model-index: - name: codeparrot-ds results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # codeparrot-ds This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.29.1 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.3
omegaodin/replit-replit-code-v1-3b
omegaodin
2023-05-28T14:31:56Z
0
0
adapter-transformers
[ "adapter-transformers", "code", "es", "dataset:bigcode/the-stack-dedup", "license:apache-2.0", "region:us" ]
null
2023-05-28T14:29:41Z
--- license: apache-2.0 datasets: - bigcode/the-stack-dedup language: - es metrics: - accuracy library_name: adapter-transformers tags: - code ---