II.
Benchmark JSON
Structured · livebenchmark:human-eval
HumanEval json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "benchmark:human-eval",
"_kind": "Benchmark",
"_file": "benchmarks/benchmarks/human-eval.yaml",
"_cluster": "benchmarks",
"attributes": {
"displayName": "HumanEval",
"homepageUrl": "https://github.com/openai/human-eval",
"kind": "function-completion",
"targetsKind": "ModelVersion",
"description": "Hand-written programming problems for evaluating code generation.\n"
},
"outgoingEdges": [
{
"from": "benchmark:human-eval",
"to": "skill-area:python-implementation",
"kind": "covers",
"attributes": {
"attributes": {
"coverage": "full",
"weight": 0.5
}
}
}
],
"incomingEdges": [
{
"from": "eval-result:human-eval.qwen-2-5-72b.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.qwen-2-5-coder-32b.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.claude-sonnet-4-6.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.deepseek-v3.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.llama-4-405b.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.llama-3-1-405b.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.llama-3-3-70b.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.mistral-large-2.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.codestral-25-01.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-result:human-eval.gpt-5.001",
"to": "benchmark:human-eval",
"kind": "scored_against",
"attributes": {}
},
{
"from": "eval-run:human-eval.qwen-2-5-72b.2024-09",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.qwen-2-5-coder-32b.2024-11",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.claude-sonnet-4-6.2025-11",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.deepseek-v3.2024-12",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.llama-4-405b.2024-07",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.llama-3-1-405b.2024-07",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.llama-3-3-70b.2024-12",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.mistral-large-2.2024-07",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.codestral-25-01.2025-01",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "eval-run:human-eval.gpt-5.2025-08",
"to": "benchmark:human-eval",
"kind": "for_benchmark"
},
{
"from": "scope-boundary:human-eval.scope",
"to": "benchmark:human-eval",
"kind": "bounds_subject"
}
]
}