II.
Benchmark JSON
Structured · livebenchmark:mmlu
MMLU json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "benchmark:mmlu",
"_kind": "Benchmark",
"_file": "benchmarks/benchmarks/mmlu.yaml",
"_cluster": "benchmarks",
"attributes": {
"displayName": "MMLU",
"homepageUrl": "https://github.com/hendrycks/test",
"kind": "knowledge",
"targetsKind": "ModelVersion",
"description": "Massive Multitask Language Understanding — 57-subject knowledge benchmark.\n"
},
"outgoingEdges": [
{
"from": "benchmark:mmlu",
"to": "skill-area:general-knowledge-reasoning",
"kind": "covers",
"attributes": {
"attributes": {
"coverage": "full",
"weight": 0.1
}
}
}
],
"incomingEdges": [
{
"from": "eval-result:mmlu.qwen-2-5-72b.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:gpqa.claude-haiku-4-5.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.claude-sonnet-4-6.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.deepseek-v3.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.deepseek-r1.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:gpqa.gemini-2-5-pro.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.llama-4-405b.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.llama-3-1-405b.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.llama-3-3-70b.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.mistral-large-2.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:gpqa.gpt-5.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.o1.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.phi-3-medium.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.gemma-2-27b.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:mmlu.command-r-plus.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:gpqa.claude-sonnet-4-5.001",
"to": "benchmark:mmlu",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-run:mmlu.qwen-2-5-72b.2024-09",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.claude-sonnet-4-6.2025-11",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.deepseek-v3.2024-12",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.deepseek-r1.2025-01",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.llama-4-405b.2024-07",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.llama-3-1-405b.2024-07",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.llama-3-3-70b.2024-12",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.mistral-large-2.2024-07",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.o1.2024-12",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.phi-3-medium.2024-05",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.gemma-2-27b.2024-06",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "eval-run:mmlu.command-r-plus.2024-08",
"to": "benchmark:mmlu",
"kind": "for_benchmark"
},
{
"from": "scope-boundary:mmlu.scope",
"to": "benchmark:mmlu",
"kind": "bounds_subject"
}
]
}