iiRecord
Agentic AI Atlas · content-moderation-safety
lib-process:ai-agents-conversational--content-moderation-safetya5c.ai
II.
LibraryProcess JSON

lib-process:ai-agents-conversational--content-moderation-safety

Structured · live

content-moderation-safety json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · generated-library/processes.yamlCluster · generated-library
Record JSON
{
  "id": "lib-process:ai-agents-conversational--content-moderation-safety",
  "_kind": "LibraryProcess",
  "_file": "generated-library/processes.yaml",
  "_cluster": "generated-library",
  "attributes": {
    "displayName": "content-moderation-safety",
    "description": "Content Moderation and Safety Filters - Process for implementing content filtering for both inputs and outputs\nincluding toxicity detection, PII redaction, hallucination detection, and abuse prevention.",
    "libraryPath": "library/specializations/ai-agents-conversational/content-moderation-safety.js",
    "specialization": "ai-agents-conversational",
    "references": [
      "- OpenAI Moderation: https://platform.openai.com/docs/guides/moderation\n- Perspective API: https://developers.perspectiveapi.com/\n- Azure Content Safety: https://azure.microsoft.com/en-us/products/ai-services/ai-content-safety"
    ],
    "example": "const result = await orchestrate('specializations/ai-agents-conversational/content-moderation-safety', {\n  systemName: 'chat-moderation',\n  contentTypes: ['text', 'images'],\n  moderationLevel: 'strict'\n});",
    "usesAgents": [
      "safety-auditor",
      "toxicity-developer",
      "pii-developer",
      "hallucination-developer",
      "abuse-prevention-developer",
      "alert-developer",
      "pipeline-developer"
    ]
  },
  "outgoingEdges": [
    {
      "from": "lib-process:ai-agents-conversational--content-moderation-safety",
      "to": "domain:software-engineering",
      "kind": "lib_applies_to_domain",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--content-moderation-safety",
      "to": "workflow:agent-evaluation-cycle",
      "kind": "lib_implements_workflow",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--content-moderation-safety",
      "to": "specialization:ai-agents-conversational",
      "kind": "lib_belongs_to_specialization",
      "attributes": {
        "weight": 0.9
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--content-moderation-safety",
      "to": "lib-agent:ai-agents-conversational--safety-auditor",
      "kind": "uses_agent",
      "attributes": {
        "weight": 0.8
      }
    }
  ],
  "incomingEdges": []
}