mauroibz commited on
Commit
1a1c08b
·
verified ·
1 Parent(s): 1f69378

Upload tasks_groups.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. tasks_groups.json +35 -0
tasks_groups.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_groups": {
3
+ "latam_pr": {
4
+ "name": "Portuguese LATAM",
5
+ "description": "Portuguese language tasks for Latin America",
6
+ "long_description": "Suite of selected tasks from the Portuguese LATAM group designed to evaluate the performance of models in the Portuguese language. Based on the work of the Open Portuguese LLM Leaderboard, these tasks were carefully selected to measure the capabilities of language models in understanding Portuguese. The tasks cover a wide range of linguistic abilities, from basic comprehension to complex reasoning in Portuguese. The evaluation suite includes tasks like ASSIN2 for textual entailment and semantic similarity, BLUEX for university entrance exams, and ENEM for standardized testing comprehension. This comprehensive set of benchmarks helps assess how well language models can process and generate Portuguese text across different contexts and difficulty levels.",
7
+ "repository": "our fork of https://github.com/eduagarcia/lm-evaluation-harness-pt",
8
+ "subtasks": [
9
+ "assin2_rte",
10
+ "assin2_sts",
11
+ "bluex",
12
+ "enem_challenge",
13
+ "faquad_nli",
14
+ "oab_exams"
15
+ ]
16
+ }
17
+ ,
18
+ "latam_es": {
19
+ "name": "Spanish LATAM",
20
+ "description": "Spanish language tasks for Latin America",
21
+ "long_description": "Suite of selected tasks from the Spanish Bench available in the lm-evaluation-harness from the team at IberoBench (https://aclanthology.org/2025.coling-main.699/) and SomosNLP's Spanish Leaderboard (https://github.com/somosnlp/lm-evaluation-harness) designed to evaluate the performance of models in the Spanish language. The tasks cover a wide range of linguistic abilities, from basic comprehension to complex reasoning in Spanish. The evaluation suite includes tasks like COPA for choice of plausible alternatives, ESCOLA for Spanish Corpus of Linguistic Acceptability, MGSM for Multilingual Grade School Math, OpenBookQA for open-domain question answering, PAWS for paraphrase adversaries from word scrambling, TELEIA for Teleia Spanish language assessment, WNLI for Winograd Natural Language Inference, and XNLI for Cross-lingual Natural Language Inference. This comprehensive set of benchmarks helps assess how well language models can process and generate Spanish text across different contexts and difficulty levels.",
22
+ "repository": "Our fork of https://github.com/EleutherAI/lm-evaluation-harness",
23
+ "subtasks": [
24
+ "copa_es",
25
+ "escola",
26
+ "mgsm_direct_es_spanish_bench",
27
+ "openbookqa_es",
28
+ "paws_es_spanish_bench",
29
+ "teleia",
30
+ "wnli_es",
31
+ "xnli_es_spanish_bench"
32
+ ]
33
+ }
34
+ }
35
+ }