iiRecord
Agentic AI Atlas · bias-detection-fairness
lib-process:ai-agents-conversational--bias-detection-fairnessa5c.ai
II.
LibraryProcess JSON

lib-process:ai-agents-conversational--bias-detection-fairness

Structured · live

bias-detection-fairness json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · generated-library/processes.yamlCluster · generated-library
Record JSON
{
  "id": "lib-process:ai-agents-conversational--bias-detection-fairness",
  "_kind": "LibraryProcess",
  "_file": "generated-library/processes.yaml",
  "_cluster": "generated-library",
  "attributes": {
    "displayName": "bias-detection-fairness",
    "description": "Bias Detection and Fairness Audit - Process for auditing AI agents and chatbots for bias,\nimplementing fairness testing, diversity in training data, and establishing monitoring and correction mechanisms.",
    "libraryPath": "library/specializations/ai-agents-conversational/bias-detection-fairness.js",
    "specialization": "ai-agents-conversational",
    "references": [
      "- AI Fairness 360: https://aif360.mybluemix.net/\n- Fairlearn: https://fairlearn.org/\n- NIST AI RMF: https://www.nist.gov/itl/ai-risk-management-framework"
    ],
    "example": "const result = await orchestrate('specializations/ai-agents-conversational/bias-detection-fairness', {\n  systemName: 'hiring-assistant',\n  biasCategories: ['gender', 'race', 'age', 'disability'],\n  fairnessMetrics: ['demographic-parity', 'equal-opportunity']\n});",
    "usesAgents": [
      "bias-fairness-analyst",
      "dataset-creator",
      "counterfactual-tester",
      "red-teamer",
      "metrics-calculator",
      "mitigation-developer",
      "monitoring-developer"
    ]
  },
  "outgoingEdges": [
    {
      "from": "lib-process:ai-agents-conversational--bias-detection-fairness",
      "to": "domain:software-engineering",
      "kind": "lib_applies_to_domain",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--bias-detection-fairness",
      "to": "workflow:agent-evaluation-cycle",
      "kind": "lib_implements_workflow",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--bias-detection-fairness",
      "to": "specialization:ai-agents-conversational",
      "kind": "lib_belongs_to_specialization",
      "attributes": {
        "weight": 0.9
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--bias-detection-fairness",
      "to": "lib-agent:ai-agents-conversational--bias-fairness-analyst",
      "kind": "uses_agent",
      "attributes": {
        "weight": 0.8
      }
    }
  ],
  "incomingEdges": []
}