iiRecord
Agentic AI Atlas · Model Fairness Audit
workflow:model-fairness-audita5c.ai
II.
Workflow JSON

workflow:model-fairness-audit

Structured · live

Model Fairness Audit json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · workflows/workflows/workflows-data-science-deep.yamlCluster · workflows
Record JSON
{
  "id": "workflow:model-fairness-audit",
  "_kind": "Workflow",
  "_file": "workflows/workflows/workflows-data-science-deep.yaml",
  "_cluster": "workflows",
  "attributes": {
    "displayName": "Model Fairness Audit",
    "workflowKind": "governance",
    "triggerType": "scheduled",
    "typicalCadence": "per-model",
    "complexity": "cross-team",
    "description": "Audits deployed ML models for demographic bias and fairness violations —\ncomputing disparate impact ratios, equalized odds, and demographic parity\nmetrics across protected attributes using Fairlearn or AIF360; comparing\nsubgroup performance differentials; documenting mitigation strategies\napplied (reweighing, threshold adjustment, post-processing); and certifying\ncompliance with organizational fairness policies before production\npromotion. Excludes model training and feature engineering.\n"
  },
  "outgoingEdges": [
    {
      "from": "workflow:model-fairness-audit",
      "to": "role:data-scientist",
      "kind": "involves_role",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "role:ml-engineer",
      "kind": "involves_role",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "role:security-reviewer",
      "kind": "involves_role",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "skill-area:ml-fine-tuning",
      "kind": "requires_skill_area",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "skill-area:eval-driven-development",
      "kind": "requires_skill_area",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "domain:data-science",
      "kind": "applies_to_domain",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "domain:ml-ops",
      "kind": "applies_to_domain",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "responsibility:ai-safety-guardrails",
      "kind": "triggers_responsibility",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "responsibility:data-quality-monitoring",
      "kind": "triggers_responsibility",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "org-unit:ml-team",
      "kind": "performed_by_org_unit",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "org-unit:ai-enablement",
      "kind": "performed_by_org_unit",
      "attributes": {}
    },
    {
      "from": "workflow:model-fairness-audit",
      "to": "org-unit:security-team",
      "kind": "performed_by_org_unit",
      "attributes": {}
    }
  ],
  "incomingEdges": []
}