{
"id": "benchmark:harmbench",
"_kind": "Benchmark",
"_file": "benchmarks/benchmarks/benchmarks-safety.yaml",
"_cluster": "benchmarks",
"attributes": {
"displayName": "HarmBench",
"homepageUrl": "https://www.harmbench.org/",
"kind": "model-only",
"targetsKind": "ModelVersion",
"description": "HarmBench (Mazeika et al., CAIS 2024) is a standardized evaluation\nframework for automated red-teaming of LLMs across harmful behaviors\n(cyber, chemical/biological, misinformation, harassment, illegal),\npairing attack methods with target models on a fixed test bank.\n"
},
"outgoingEdges": [
{
"from": "benchmark:harmbench",
"to": "skill-area:safety-redteaming",
"kind": "covers",
"attributes": {
"attributes": {
"coverage": "full",
"weight": 0.3
}
}
},
{
"from": "benchmark:harmbench",
"to": "content-policy:default-acceptable-use",
"kind": "evaluates_policy",
"attributes": {}
},
{
"from": "benchmark:harmbench",
"to": "content-policy:eu-ai-act-aligned",
"kind": "evaluates_policy",
"attributes": {}
}
],
"incomingEdges": [
{
"from": "eval-run:harmbench.claude-opus-4-5.2025-09",
"to": "benchmark:harmbench",
"kind": "for_benchmark"
}
]
}