II.
StackProfile JSON
Structured · livestack-profile:data-lakehouse
Data Lakehouse Stack (Databricks, Spark, Delta Lake, dbt, Airflow) json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "stack-profile:data-lakehouse",
"_kind": "StackProfile",
"_file": "domain/stack-profiles/deep-stacks-1.yaml",
"_cluster": "domain",
"attributes": {
"displayName": "Data Lakehouse Stack (Databricks, Spark, Delta Lake, dbt, Airflow)",
"description": "A unified analytics platform that combines the flexibility of a data\nlake with the ACID transaction guarantees and performance of a data\nwarehouse. Databricks provides the managed Spark runtime and Delta Lake\ntable format for schema evolution, time travel, and MERGE operations on\ncloud object storage.\n\ndbt handles the transformation layer, turning raw ingested tables into\ntested, documented analytical models. Airflow orchestrates the end-to-end\npipeline from ingestion through transformation to consumption. Python\nserves as the glue language for custom operators, UDFs, and notebooks.\nChoose this stack when you need a single copy of data serving both BI\ndashboards and ML feature engineering, and when your data volumes exceed\nwhat a traditional warehouse handles cost-effectively.\n",
"composes": [
"tool:databricks",
"tool:airflow",
"language:python",
"language:sql",
"framework:spark-java"
]
},
"outgoingEdges": [
{
"from": "stack-profile:data-lakehouse",
"to": "tool:databricks",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "tool:airflow",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "language:python",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "language:sql",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "framework:spark-java",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "tool:prometheus",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "tool:docker",
"kind": "composed_of"
},
{
"from": "stack-profile:data-lakehouse",
"to": "role:data-engineer",
"kind": "used_by_role"
},
{
"from": "stack-profile:data-lakehouse",
"to": "role:analytics-engineer",
"kind": "used_by_role"
},
{
"from": "stack-profile:data-lakehouse",
"to": "role:data-scientist",
"kind": "used_by_role"
},
{
"from": "stack-profile:data-lakehouse",
"to": "workflow:data-pipeline-deployment",
"kind": "follows_workflow"
},
{
"from": "stack-profile:data-lakehouse",
"to": "workflow:dbt-model-review",
"kind": "follows_workflow"
},
{
"from": "stack-profile:data-lakehouse",
"to": "domain:data-engineering",
"kind": "applies_to"
},
{
"from": "stack-profile:data-lakehouse",
"to": "domain:data-science",
"kind": "applies_to"
},
{
"from": "stack-profile:data-lakehouse",
"to": "skill-area:etl-pipelines",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:data-lakehouse",
"to": "skill-area:data-warehouse-modeling",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:data-lakehouse",
"to": "skill-area:spark-jobs",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:data-lakehouse",
"to": "skill-area:dbt-modeling",
"kind": "requires_skill_area"
},
{
"from": "stack-profile:data-lakehouse",
"to": "skill-area:data-quality",
"kind": "requires_skill_area"
}
],
"incomingEdges": []
}