II.
StackProfile JSON
Structured · livestack-profile:ai-safety-guardrails
AI Safety / Guardrails Stack (Python, OPA, FastAPI, Redis, Prometheus) json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "stack-profile:ai-safety-guardrails",
"_kind": "StackProfile",
"_file": "domain/stack-profiles/deep-stacks-4.yaml",
"_cluster": "domain",
"attributes": {
"displayName": "AI Safety / Guardrails Stack (Python, OPA, FastAPI, Redis, Prometheus)",
"description": "An AI safety and guardrails platform that sits between LLM applications\nand model endpoints, enforcing content policies, detecting prompt\ninjection attempts, and applying output filtering. Open Policy Agent\n(OPA) evaluates declarative safety rules against request and response\npayloads. FastAPI serves the guardrail proxy with Redis caching\npreviously evaluated inputs for latency reduction. Prometheus tracks\nblock rates, false positive rates, and policy evaluation latency.\nPydantic validates safety rule schemas. Targeted at enterprises\ndeploying customer-facing AI features that require content safety\ncompliance. The tradeoff is the tension between safety and usability —\naggressive filtering reduces harmful outputs but increases false\npositives that degrade the user experience, requiring continuous\npolicy calibration.\n",
"composes": [
"language:python",
"tool:opa",
"framework:fastapi",
"library:redis-py",
"tool:prometheus",
"library:pydantic"
]
},
"outgoingEdges": [
{
"from": "stack-profile:ai-safety-guardrails",
"to": "language:python",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "tool:opa",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "framework:fastapi",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "library:redis-py",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "tool:prometheus",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "library:pydantic",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "library:httpx",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "tool:docker",
"kind": "composed_of"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "role:ml-engineer",
"kind": "used_by_role"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "role:security-engineer",
"kind": "used_by_role"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "role:backend-engineer",
"kind": "used_by_role"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "workflow:ai-safety-guardrail-maintenance",
"kind": "follows_workflow"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "workflow:prompt-engineering-iteration",
"kind": "follows_workflow"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "domain:ml-ai",
"kind": "applies_to"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "domain:cybersecurity",
"kind": "applies_to"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:safety-redteaming",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:policy-enforcement",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:prompt-engineering",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:backend-api-design",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:ai-safety-guardrails",
"to": "skill-area:observability-instrumentation",
"kind": "requires_skill_area"
}
],
"incomingEdges": []
}