II.
Workflow JSON
Structured · liveworkflow:legal-ai-bias-audit
Legal AI Bias Audit json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "workflow:legal-ai-bias-audit",
"_kind": "Workflow",
"_file": "workflows/workflows/workflows-thin-domains-fill.yaml",
"_cluster": "workflows",
"attributes": {
"displayName": "Legal AI Bias Audit",
"workflowKind": "governance",
"triggerType": "scheduled",
"typicalCadence": "semi-annually",
"complexity": "cross-team",
"description": "Audits AI systems used in legal workflows for bias and fairness --\nevaluating contract review model performance disparities across\ndocument types, jurisdictions, and languages, testing e-discovery\nrelevance models for demographic and topic bias in responsive\ndocument identification, assessing predictive coding consistency\nbetween reviewers and model predictions, reviewing litigation outcome\nprediction models for protected-class correlation leakage, auditing\nlegal research tool ranking algorithms for citation bias, evaluating\nexplainability and transparency of AI-assisted legal reasoning, and\nverifying compliance with emerging AI governance regulations (EU AI\nAct, state-level requirements). Produces bias audit report, fairness\nmetric dashboard, and mitigation recommendations. Excludes model\nretraining.\n"
},
"outgoingEdges": [
{
"from": "workflow:legal-ai-bias-audit",
"to": "role:ml-engineer",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "role:security-reviewer",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "role:data-scientist",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "skill-area:ml-fine-tuning",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "skill-area:data-quality",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "domain:legaltech",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "domain:legal",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "domain:data-science",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "responsibility:security-review",
"kind": "triggers_responsibility",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "responsibility:data-quality-monitoring",
"kind": "triggers_responsibility",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "org-unit:ml-platform-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "org-unit:legal-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:legal-ai-bias-audit",
"to": "org-unit:compliance-team",
"kind": "performed_by_org_unit",
"attributes": {}
}
],
"incomingEdges": []
}