II.
Capability JSON
Structured · livecapability:supports-batch-api
Supports batch API json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "capability:supports-batch-api",
"_kind": "Capability",
"_file": "capabilities/capabilities/supports-batch-api.yaml",
"_cluster": "capabilities",
"attributes": {
"displayName": "Supports batch API",
"description": "The provider exposes an asynchronous batch inference surface that\naccepts a file of requests, processes them within a vendor-specified\nSLA window (typically 24h), and returns results at a discounted rate.\nOpenAI Batch API and Anthropic Message Batches are the canonical\nexamples.\n",
"appliesToNodeKinds": [
"Provider"
],
"category": "provider-feature"
},
"outgoingEdges": [],
"incomingEdges": [
{
"from": "model:claude-haiku-4-5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.5.0 <4.6.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-opus-3@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.0.0 <4.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-opus-4-5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.5.0 <4.6.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-opus-4-6@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.6.0 <4.7.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-opus-4-7@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.7.0 <4.8.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-sonnet-3-5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.5.0 <3.6.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-sonnet-4-5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.5.0 <4.6.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:claude-sonnet-4-6@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.6.0 <4.7.0",
"level": "full",
"evidenceSourceIds": [
"evidence:anthropic-models-page"
]
}
},
{
"from": "model:gemini-2-0-flash@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2.0.0 <2.1.0",
"level": "full",
"notes": "Gemini Batch Mode (batchPredict / file-batches) provides 50% cost reduction.\n",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-2-5-flash@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2.5.0 <2.6.0",
"level": "full",
"notes": "Gemini Batch Mode (batchPredict / file-batches) provides 50% cost reduction.\n",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-2-5-pro@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2.5.0 <2.6.0",
"level": "full",
"notes": "Gemini Batch Mode (batchPredict / file-batches) provides 50% cost reduction.\n",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-3-1-deep-think@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.1.0 <3.2.0",
"level": "full",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-3-1-flash-lite@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.1.0 <3.2.0",
"level": "full",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-3-1-pro@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.1.0 <3.2.0",
"level": "full",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-3-flash@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.0.0 <3.1.0",
"level": "full",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gemini-3-pro@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.0.0 <3.1.0",
"level": "full",
"evidenceSourceIds": [
"evidence:google-models-page"
]
}
},
{
"from": "model:gpt-4o-mini@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.0.0 <5.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "model:gpt-4o@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=4.0.0 <5.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "model:gpt-5-mini@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=5.0.0 <6.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "model:gpt-5.4-mini@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=5.4.0 <5.5.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-api-pricing-2026-05"
]
}
},
{
"from": "model:gpt-5.4@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=5.4.0 <5.5.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-api-pricing-2026-05"
]
}
},
{
"from": "model:gpt-5.5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=5.5.0 <5.6.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-api-pricing-2026-05"
]
}
},
{
"from": "model:gpt-5@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=5.0.0 <6.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "model:o1@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=1.0.0 <2.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "model:o3@current",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=3.0.0 <4.0.0",
"level": "full",
"evidenceSourceIds": [
"evidence:openai-models-page"
]
}
},
{
"from": "provider:anthropic",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2024-01-01"
}
},
{
"from": "provider:azure-openai",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2024-06-01"
}
},
{
"from": "provider:openai",
"to": "capability:supports-batch-api",
"kind": "supports",
"attributes": {
"versionRange": ">=2024-01-01"
}
}
]
}