{
"id": "scope-boundary:human-eval.scope",
"_kind": "ScopeBoundary",
"_file": "sourceref-scope/scope-boundaries/human-eval.yaml",
"_cluster": "sourceref-scope",
"attributes": {
"subjectId": "benchmark:human-eval",
"inScope": "Self-contained Python function-completion tasks scored by\npass-at-k against unit tests. 164 hand-crafted problems with\ndocstring + signature input.\n",
"outOfScope": "Multi-file projects, languages other than Python (use MultiPL-E /\nHumanEvalX for cross-language), repository-scale tasks (use SWE-bench),\nmulti-turn / agentic tasks, and natural-language reasoning benchmarks.\n",
"outOfScopeReasonIds": [
"out-of-scope-reason:future-phase",
"out-of-scope-reason:implementation-detail"
]
},
"outgoingEdges": [
{
"from": "scope-boundary:human-eval.scope",
"to": "benchmark:human-eval",
"kind": "bounds_subject"
}
],
"incomingEdges": []
}