II.
Workflow JSON
Structured · liveworkflow:data-pipeline-monitoring
Data Pipeline Monitoring json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "workflow:data-pipeline-monitoring",
"_kind": "Workflow",
"_file": "workflows/workflows/workflows-data.yaml",
"_cluster": "workflows",
"attributes": {
"displayName": "Data Pipeline Monitoring",
"workflowKind": "data",
"triggerType": "event-driven",
"typicalCadence": "continuous",
"complexity": "single-team",
"description": "Monitors data pipeline health, detects schema drift or data-quality\nanomalies, and alerts owners for investigation and repair.\n"
},
"outgoingEdges": [
{
"from": "workflow:data-pipeline-monitoring",
"to": "role:data-scientist",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "role:ml-engineer",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:python-data-pipelines",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:kafka-stream-processing",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:observability-pipeline",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "domain:data-science",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "domain:ml-ops",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "responsibility:respond-incidents",
"kind": "triggers_responsibility",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:data-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:ml-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:ml-platform-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:data-platform-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "role:data-scientist",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "role:ml-engineer",
"kind": "involves_role",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:python-data-pipelines",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:kafka-stream-processing",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "skill-area:observability-pipeline",
"kind": "requires_skill_area",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "domain:data-science",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "domain:ml-ops",
"kind": "applies_to_domain",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "responsibility:respond-incidents",
"kind": "triggers_responsibility",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:data-team",
"kind": "performed_by_org_unit",
"attributes": {}
},
{
"from": "workflow:data-pipeline-monitoring",
"to": "org-unit:ml-team",
"kind": "performed_by_org_unit",
"attributes": {}
}
],
"incomingEdges": [
{
"from": "stack-profile:data-pipeline-orchestration",
"to": "workflow:data-pipeline-monitoring",
"kind": "follows_workflow"
},
{
"from": "tool:databricks",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Jobs, notebooks, and warehouse metadata support pipeline monitoring."
}
},
{
"from": "tool:snowflake",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Warehouse freshness and query metadata support data pipeline monitoring."
}
},
{
"from": "tool:dbt-labs",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Ora surfaced dbt Labs for data engineering orchestration and warehouse workflows."
}
},
{
"from": "tool-server:mcp-fivetran",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Fivetran connector metadata is a direct data-pipeline monitoring input."
}
},
{
"from": "tool-server:mcp-snowflake",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Warehouse state and query results project pipeline health and data freshness."
}
},
{
"from": "tool-server:mcp-segment",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Event stream health and schema metadata feed data pipeline review."
}
},
{
"from": "tool-server:mcp-dbt-candidate",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "dbt tests, lineage, and run results are pipeline-monitoring projection points."
}
},
{
"from": "tool-server:mcp-tinybird-candidate",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Tinybird is relevant to live data-product monitoring and analytics pipelines."
}
},
{
"from": "tool-server:mcp-power-bi-candidate",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Dashboard health and dataset refreshes are common analytics-monitoring projection points."
}
},
{
"from": "tool-server:mcp-databricks-candidate",
"to": "workflow:data-pipeline-monitoring",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Databricks jobs and SQL warehouses are common data-pipeline monitoring surfaces."
}
}
]
}