File size: 2,200 Bytes
99399ee
 
88c98d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99399ee
 
 
 
 
 
 
 
 
88c98d4
99399ee
 
 
 
 
 
 
 
88c98d4
99399ee
 
 
 
 
 
88c98d4
99399ee
 
 
 
88c98d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99399ee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# reference for length bias categories
length_categories = {
    "alpacaeval-easy": "True",
    "alpacaeval-hard": "True",
    "alpacaeval-length": "Neutral",
    "donotanswer": "False",
    "hep-cpp": "Neutral",
    "hep-go": "Neutral",
    "hep-java": "Neutral",
    "hep-js": "Neutral",
    "hep-python": "Neutral",
    "hep-rust": "Neutral",
    "llmbar-adver-GPTInst": "False",
    "llmbar-adver-GPTOut": "Neutral",
    "llmbar-adver-manual": "False",
    "llmbar-adver-neighbor": "False",
    "llmbar-natural": "Neutral",
    "math-prm": "Neutral",
    "mt-bench-easy": "False",
    "mt-bench-hard": "False",
    "mt-bench-med": "Neutral",
    "refusals-dangerous": "False",
    "refusals-offensive": "False",
    "xstest-should-refuse": "False",
    "xstest-should-respond": "True",
}

example_counts = {
    "alpacaeval-easy": 100,
    "alpacaeval-length": 95,
    "alpacaeval-hard": 95,
    "mt-bench-easy": 28,
    "mt-bench-med": 40,
    "mt-bench-hard": 37,
    "math-prm": 984,  # actual length 447, upweighting to be equal to code
    "refusals-dangerous": 100,
    "refusals-offensive": 100,
    "llmbar-natural": 100,
    "llmbar-adver-neighbor": 134,
    "llmbar-adver-GPTInst": 92,
    "llmbar-adver-GPTOut": 47,
    "llmbar-adver-manual": 46,
    "xstest-should-refuse": 154,
    "xstest-should-respond": 250,  # Note, refuse and respond were accidentally swapped until 9 Sept 2024
    "donotanswer": 136,
    "hep-cpp": 164,
    "hep-go": 164,
    "hep-java": 164,
    "hep-js": 164,
    "hep-python": 164,
    "hep-rust": 164,
}

# note, this order should match the dataframe.
subset_mapping = {
    "Chat": ["alpacaeval-easy", "alpacaeval-hard", "alpacaeval-length", "mt-bench-easy", "mt-bench-med"],
    "Chat Hard": [
        "llmbar-adver-GPTInst",
        "llmbar-adver-GPTOut",
        "llmbar-adver-manual",
        "llmbar-adver-neighbor",
        "llmbar-natural",
        "mt-bench-hard",
    ],
    "Safety": [
        "donotanswer",
        "refusals-dangerous",
        "refusals-offensive",
        "xstest-should-refuse",
        "xstest-should-respond",
    ],
    "Reasoning": ["hep-cpp", "hep-go", "hep-java", "hep-js", "hep-python", "hep-rust", "math-prm"],
}