II.
SkillArea JSON
Structured · liveskill-area:safety-redteaming
Safety Red-Teaming json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "skill-area:safety-redteaming",
"_kind": "SkillArea",
"_file": "domain/skill-areas/skill-areas-benchmark-coverage.yaml",
"_cluster": "domain",
"attributes": {
"displayName": "Safety Red-Teaming",
"description": "Adversarial probing of model and agent safety — eliciting policy\nviolations, jailbreak resistance, and harmful-content refusals under\nstructured attack taxonomies. Covers the HarmBench class of safety\nevaluations.\n",
"domains": [
"domain:security"
],
"expertiseLevels": [
"intermediate",
"expert",
"authoritative"
]
},
"outgoingEdges": [
{
"from": "skill-area:safety-redteaming",
"to": "domain:security",
"kind": "applies_to",
"attributes": {
"confidence": "primary"
}
}
],
"incomingEdges": [
{
"from": "benchmark:bias-bench",
"to": "skill-area:safety-redteaming",
"kind": "covers",
"attributes": {}
},
{
"from": "benchmark:harmbench",
"to": "skill-area:safety-redteaming",
"kind": "covers",
"attributes": {
"attributes": {
"coverage": "full",
"weight": 0.3
}
}
},
{
"from": "benchmark:jailbreakbench",
"to": "skill-area:safety-redteaming",
"kind": "covers",
"attributes": {}
},
{
"from": "benchmark:advbench",
"to": "skill-area:safety-redteaming",
"kind": "covers",
"attributes": {}
},
{
"from": "skill-area:AI-safety-alignment",
"to": "skill-area:safety-redteaming",
"kind": "prerequisite_for_learning",
"attributes": {
"strength": "recommended"
}
},
{
"from": "skill-area:ai-agent-development",
"to": "skill-area:safety-redteaming",
"kind": "prerequisite_for_learning",
"attributes": {
"strength": "recommended"
}
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:safety-redteaming",
"kind": "requires_skill_area"
},
{
"from": "lib-agent:ai-agents-conversational--bias-fairness-analyst",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-agent:ai-agents-conversational--prompt-injection-defender",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-agent:ai-agents-conversational--safety-auditor",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 1
}
},
{
"from": "lib-skill:ai-agents-conversational--constitutional-ai-prompts",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-skill:ai-agents-conversational--content-moderation-api",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-skill:ai-agents-conversational--nemo-guardrails",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-skill:ai-agents-conversational--prompt-injection-detector",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
},
{
"from": "lib-skill:security-research--aiml-security",
"to": "skill-area:safety-redteaming",
"kind": "lib_requires_skill_area",
"attributes": {
"weight": 0.7
}
}
]
}