| eval-result:mmlu.qwen-2-5-72b.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.qwen-2-5-72b.001 | benchmark:human-eval | Benchmark |
| eval-result:human-eval.qwen-2-5-coder-32b.001 | benchmark:human-eval | Benchmark |
| eval-result:livecodebench.qwen-2-5-coder-32b.001 | benchmark:livecodebench | Benchmark |
| eval-result:mbpp.qwen-2-5-coder-32b.001 | benchmark:mbpp | Benchmark |
| eval-result:swe-bench-verified.claude-haiku-4-5.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:gpqa.claude-haiku-4-5.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.claude-sonnet-4-6.001 | benchmark:human-eval | Benchmark |
| eval-result:mmlu.claude-sonnet-4-6.001 | benchmark:mmlu | Benchmark |
| eval-result:bfcl.claude-sonnet-4-5.001 | benchmark:berkeley-function-calling | Benchmark |
| eval-result:gpqa-diamond.claude-opus-4-5.001 | benchmark:gpqa | Benchmark |
| eval-result:os-world.claude-sonnet-4-5.001 | benchmark:os-world | Benchmark |
| eval-result:truthful-qa.claude-opus-4-5.001 | benchmark:truthful-qa | Benchmark |
| eval-result:human-eval-plus.claude-sonnet-4-5.001 | benchmark:bigcode-evalplus | Benchmark |
| eval-result:harmbench.claude-opus-4-5.001 | benchmark:harmbench | Benchmark |
| eval-result:arc-challenge.claude-sonnet-4-5.001 | benchmark:arc-challenge | Benchmark |
| eval-result:mmlu.deepseek-v3.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.deepseek-v3.001 | benchmark:human-eval | Benchmark |
| eval-result:swe-bench.deepseek-v3.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:mmlu.deepseek-r1.001 | benchmark:mmlu | Benchmark |
| eval-result:math.deepseek-r1.001 | benchmark:math | Benchmark |
| eval-result:gpqa.deepseek-r1.001 | benchmark:gpqa | Benchmark |
| eval-result:gpqa.gemini-2-5-pro.001 | benchmark:mmlu | Benchmark |
| eval-result:livecodebench.gemini-2-5-pro.001 | benchmark:livecodebench | Benchmark |
| eval-result:swe-bench-verified.gemini-2-5-flash.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:gpqa-diamond.gemini-2-5-pro.001 | benchmark:gpqa | Benchmark |
| eval-result:android-world.gemini-2-5-pro.001 | benchmark:android-world | Benchmark |
| eval-result:mgsm.gemini-2-5-pro.001 | benchmark:mgsm | Benchmark |
| eval-result:gpqa-diamond.gemini-3-1-pro.2026-02-19.accuracy | benchmark:gpqa | Benchmark |
| eval-result:gpqa-diamond.gemini-3-pro.2025-11-18.accuracy | benchmark:gpqa | Benchmark |
| eval-result:swe-bench-verified.llama-4-405b.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:human-eval.llama-4-405b.001 | benchmark:human-eval | Benchmark |
| eval-result:mmlu.llama-4-405b.001 | benchmark:mmlu | Benchmark |
| eval-result:swe-bench.llama-3-1-405b.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:mmlu.llama-3-1-405b.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.llama-3-1-405b.001 | benchmark:human-eval | Benchmark |
| eval-result:mmlu.llama-3-3-70b.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.llama-3-3-70b.001 | benchmark:human-eval | Benchmark |
| eval-result:mmlu.mistral-large-2.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.mistral-large-2.001 | benchmark:human-eval | Benchmark |
| eval-result:human-eval.codestral-25-01.001 | benchmark:human-eval | Benchmark |
| eval-result:multipl-e.codestral-25-01.001 | benchmark:multipl-e | Benchmark |
| eval-result:gpqa.gpt-5.001 | benchmark:mmlu | Benchmark |
| eval-result:human-eval.gpt-5.001 | benchmark:human-eval | Benchmark |
| eval-result:mmlu.o1.001 | benchmark:mmlu | Benchmark |
| eval-result:math.o3.001 | benchmark:math | Benchmark |
| eval-result:bfcl.gpt-5.001 | benchmark:berkeley-function-calling | Benchmark |
| eval-result:gpqa-diamond.gpt-5.001 | benchmark:gpqa | Benchmark |
| eval-result:human-eval-plus.gpt-5.001 | benchmark:bigcode-evalplus | Benchmark |
| eval-result:gpqa-diamond.gpt-5-4.2026-03-17.accuracy | benchmark:gpqa | Benchmark |
| eval-result:gpqa-diamond.gpt-5-4-mini.2026-03-17.accuracy | benchmark:gpqa | Benchmark |
| eval-result:mmlu.phi-3-medium.001 | benchmark:mmlu | Benchmark |
| eval-result:mmlu.gemma-2-27b.001 | benchmark:mmlu | Benchmark |
| eval-result:gsm8k.gemma-2-27b.001 | benchmark:gsm8k | Benchmark |
| eval-result:mmlu.command-r-plus.001 | benchmark:mmlu | Benchmark |
| eval-result:swe-bench-verified.claude-opus-4-5.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.claude-opus-4-7.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:gpqa.claude-sonnet-4-5.001 | benchmark:mmlu | Benchmark |
| eval-result:swe-bench-verified.gpt-5.headline | benchmark:swe-bench-verified | Benchmark |
| eval-result:livecodebench.gpt-5.001 | benchmark:livecodebench | Benchmark |
| eval-result:swe-bench-verified.o3.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.gemini-2-5-pro.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:gsm8k.claude-sonnet-4-5.001 | benchmark:gsm8k | Benchmark |
| eval-result:hellaswag.claude-opus-4-5.001 | benchmark:hellaswag | Benchmark |
| eval-result:math.gpt-5.001 | benchmark:math | Benchmark |
| eval-result:evalplus.gpt-5.001 | benchmark:bigcode-evalplus | Benchmark |
| eval-result:terminal-bench.claude-sonnet-4-5.001 | benchmark:terminal-bench | Benchmark |
| eval-result:gaia.claude-code.001 | benchmark:gaia | Benchmark |
| eval-result:swe-bench.claude-code.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.claude-sonnet-4-5.high-compute.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.claude-sonnet-4-5.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.gpt-5.headline.001 | benchmark:swe-bench-verified | Benchmark |
| eval-result:swe-bench-verified.gpt-5.001 | benchmark:swe-bench-verified | Benchmark |