| eval-run:mmlu.qwen-2-5-72b.2024-09 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.qwen-2-5-72b.2024-09 | benchmark:human-eval | Benchmark |
| eval-run:human-eval.qwen-2-5-coder-32b.2024-11 | benchmark:human-eval | Benchmark |
| eval-run:livecodebench.qwen-2-5-coder-32b.2024-11 | benchmark:livecodebench | Benchmark |
| eval-run:mbpp.qwen-2-5-coder-32b.2024-11 | benchmark:mbpp | Benchmark |
| eval-run:swe-bench-verified.claude-haiku-4-5.2025-10 | benchmark:swe-bench-verified | Benchmark |
| eval-run:gpqa.claude-haiku-4-5.2025-10 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.claude-sonnet-4-6.2025-11 | benchmark:human-eval | Benchmark |
| eval-run:mmlu.claude-sonnet-4-6.2025-11 | benchmark:mmlu | Benchmark |
| eval-run:bfcl.claude-sonnet-4-5.2025-09 | benchmark:berkeley-function-calling | Benchmark |
| eval-run:gpqa-diamond.claude-opus-4-5.2025-09 | benchmark:gpqa | Benchmark |
| eval-run:os-world.claude-sonnet-4-5.2025-09 | benchmark:os-world | Benchmark |
| eval-run:truthful-qa.claude-opus-4-5.2025-09 | benchmark:truthful-qa | Benchmark |
| eval-run:human-eval-plus.claude-sonnet-4-5.2025-09 | benchmark:bigcode-evalplus | Benchmark |
| eval-run:harmbench.claude-opus-4-5.2025-09 | benchmark:harmbench | Benchmark |
| eval-run:arc-challenge.claude-sonnet-4-5.2025-09 | benchmark:arc-challenge | Benchmark |
| eval-run:mmlu.deepseek-v3.2024-12 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.deepseek-v3.2024-12 | benchmark:human-eval | Benchmark |
| eval-run:swe-bench.deepseek-v3.2024-12 | benchmark:swe-bench-verified | Benchmark |
| eval-run:mmlu.deepseek-r1.2025-01 | benchmark:mmlu | Benchmark |
| eval-run:math.deepseek-r1.2025-01 | benchmark:math | Benchmark |
| eval-run:gpqa.deepseek-r1.2025-01 | benchmark:gpqa | Benchmark |
| eval-run:gpqa.gemini-2-5-pro.2025-06 | benchmark:mmlu | Benchmark |
| eval-run:livecodebench.gemini-2-5-pro.2025-06 | benchmark:livecodebench | Benchmark |
| eval-run:swe-bench-verified.gemini-2-5-flash.2025-06 | benchmark:swe-bench-verified | Benchmark |
| eval-run:gpqa-diamond.gemini-2-5-pro.2025-06 | benchmark:gpqa | Benchmark |
| eval-run:android-world.gemini-2-5-pro.2025-06 | benchmark:android-world | Benchmark |
| eval-run:mgsm.gemini-2-5-pro.2025-06 | benchmark:mgsm | Benchmark |
| eval-run:gpqa-diamond.gemini-3-1-pro.2026-02-19 | benchmark:gpqa | Benchmark |
| eval-run:gpqa-diamond.gemini-3-pro.2025-11-18 | benchmark:gpqa | Benchmark |
| eval-run:swe-bench-verified.llama-4-405b.2024-07 | benchmark:swe-bench-verified | Benchmark |
| eval-run:human-eval.llama-4-405b.2024-07 | benchmark:human-eval | Benchmark |
| eval-run:mmlu.llama-4-405b.2024-07 | benchmark:mmlu | Benchmark |
| eval-run:swe-bench.llama-3-1-405b.2024-07 | benchmark:swe-bench-verified | Benchmark |
| eval-run:mmlu.llama-3-1-405b.2024-07 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.llama-3-1-405b.2024-07 | benchmark:human-eval | Benchmark |
| eval-run:mmlu.llama-3-3-70b.2024-12 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.llama-3-3-70b.2024-12 | benchmark:human-eval | Benchmark |
| eval-run:mmlu.mistral-large-2.2024-07 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.mistral-large-2.2024-07 | benchmark:human-eval | Benchmark |
| eval-run:human-eval.codestral-25-01.2025-01 | benchmark:human-eval | Benchmark |
| eval-run:multipl-e.codestral-25-01.2025-01 | benchmark:multipl-e | Benchmark |
| eval-run:gpqa.gpt-5.2025-08 | benchmark:mmlu | Benchmark |
| eval-run:human-eval.gpt-5.2025-08 | benchmark:human-eval | Benchmark |
| eval-run:mmlu.o1.2024-12 | benchmark:mmlu | Benchmark |
| eval-run:math.o3.2025-04 | benchmark:math | Benchmark |
| eval-run:bfcl.gpt-5.2025-08 | benchmark:berkeley-function-calling | Benchmark |
| eval-run:gpqa-diamond.gpt-5.2025-08 | benchmark:gpqa | Benchmark |
| eval-run:human-eval-plus.gpt-5.2025-08 | benchmark:bigcode-evalplus | Benchmark |
| eval-run:gpqa-diamond.gpt-5-4.2026-03-17 | benchmark:gpqa | Benchmark |
| eval-run:gpqa-diamond.gpt-5-4-mini.2026-03-17 | benchmark:gpqa | Benchmark |
| eval-run:mmlu.phi-3-medium.2024-05 | benchmark:mmlu | Benchmark |
| eval-run:mmlu.gemma-2-27b.2024-06 | benchmark:mmlu | Benchmark |
| eval-run:gsm8k.gemma-2-27b.2024-06 | benchmark:gsm8k | Benchmark |
| eval-run:mmlu.command-r-plus.2024-08 | benchmark:mmlu | Benchmark |
| eval-run:swe-bench-verified.claude-opus-4-5.2025-09 | benchmark:swe-bench-verified | Benchmark |
| eval-run:swe-bench-verified.claude-opus-4-7.2026-01 | benchmark:swe-bench-verified | Benchmark |
| eval-run:gpqa.claude-sonnet-4-5.2025-09 | benchmark:mmlu | Benchmark |
| eval-run:livecodebench.gpt-5.2025-08 | benchmark:livecodebench | Benchmark |
| eval-run:swe-bench-verified.o3.2025-04 | benchmark:swe-bench-verified | Benchmark |
| eval-run:swe-bench-verified.gemini-2-5-pro.2025-06 | benchmark:swe-bench-verified | Benchmark |
| eval-run:gsm8k.claude-sonnet-4-5.2025-09 | benchmark:gsm8k | Benchmark |
| eval-run:hellaswag.claude-opus-4-5.2025-09 | benchmark:hellaswag | Benchmark |
| eval-run:math.gpt-5.2025-08 | benchmark:math | Benchmark |
| eval-run:evalplus.gpt-5.2025-08 | benchmark:bigcode-evalplus | Benchmark |
| eval-run:terminal-bench.claude-sonnet-4-5.2025-09 | benchmark:terminal-bench | Benchmark |
| eval-run:gaia.claude-code.2025 | benchmark:gaia | Benchmark |
| eval-run:swe-bench.claude-code@1.x.2025-04-29 | benchmark:swe-bench-verified | Benchmark |
| eval-run:swe-bench-verified.claude-sonnet-4-5.2025-09 | benchmark:swe-bench-verified | Benchmark |
| eval-run:swe-bench-verified.gpt-5.2025-08 | benchmark:swe-bench-verified | Benchmark |