File size: 1,154 Bytes
d5bfab8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
Meta-Llama-3-8B-Instruct-Q6_K
---
ARC puzzle
input 0
[[9,9,5,9],[5,5,9,9],[9,5,9,9]]
output 0
[[9,5,9,9],[5,5,9,9],[9,9,5,9],[9,9,5,9],[5,5,9,9],[9,5,9,9]]
input 1
[[4,1,1,4],[1,1,1,1],[4,4,4,1]]
output 1
[[4,4,4,1],[1,1,1,1],[4,1,1,4],[4,1,1,4],[1,1,1,1],[4,4,4,1]]
input 2
[[9,4,9,4],[9,9,4,4],[4,4,4,4]]
output 2
[[4,4,4,4],[9,9,4,4],[9,4,9,4],[9,4,9,4],[9,9,4,4],[4,4,4,4]]
input 3
[[3,3,5,5],[3,5,5,3],[5,5,3,3]]
output 3
[[5,5,3,3],[3,5,5,3],[3,3,5,5],[3,3,5,5],[3,5,5,3],[5,5,3,3]]
input 4
[[4,4,9,9],[4,4,4,4],[4,4,9,9]]
output 4
---
expected response text:
[[4,4,9,9],[4,4,4,4],[4,4,9,9],[4,4,9,9],[4,4,4,4],[4,4,9,9]]
actual response text:
[[4,4,9,9],[4,4,4,4],[4,4,9,9],[4,4,9,9],[4,4,4,4],[4,4,9,9]]
status: correct
response dict:
{'id': 'cmpl-eacd2c10-1254-4419-a8cd-2f24f97db3e8', 'object': 'text_completion', 'created': 1715891919, 'model': 'Meta-Llama-3-8B-Instruct-GGUF/Meta-Llama-3-8B-Instruct-Q6_K.gguf', 'choices': [{'text': '[[4,4,9,9],[4,4,4,4],[4,4,9,9],[4,4,9,9],[4,4,4,4],[4,4,9,9]]', 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}], 'usage': {'prompt_tokens': 374, 'completion_tokens': 49, 'total_tokens': 423}}
|