{
"id": "model-transport:openai-chat-completions",
"_kind": "ModelTransportProtocol",
"_file": "compute/model-transport-protocols/openai-chat-completions.yaml",
"_cluster": "compute",
"attributes": {
"displayName": "OpenAI Chat Completions",
"vendor": "OpenAI",
"specUrl": "https://platform.openai.com/docs/api-reference/chat",
"streamingFraming": "sse",
"toolUseSchema": "Tool calls appear in the assistant message's `tool_calls` array with\n`id`, `type: \"function\"`, and `function: { name, arguments }`. Tool\nresults are submitted as messages with `role: \"tool\"` and\n`tool_call_id` referencing the original call.\n",
"thinkingChannel": "none",
"cacheControl": "implicit",
"firstSpecVersion": "2023-06-13",
"currentSpecVersion": "2024-10-01",
"status": "standard",
"requestBodyShape": "POST /v1/chat/completions\nJSON body (top-level fields):\n - `model` (required, string)\n - `messages` (required, array of `{role: \"system\"|\"user\"|\"assistant\"|\"tool\", content, name?, tool_call_id?, tool_calls?}`)\n - `tools` (optional, array of `{type: \"function\", function: {name, description, parameters}}`)\n - `tool_choice` (optional, \"auto\"|\"none\"|\"required\"|`{type:\"function\", function:{name}}`)\n - `temperature`, `top_p`, `n`, `stop`, `max_tokens`, `max_completion_tokens` (optional)\n - `stream` (optional, bool)\n - `stream_options` (optional, `{include_usage: bool}`)\n - `response_format` (optional, json_schema / json_object / text)\n - `prompt_cache_key` (optional, string)\n - `user` (optional, string — abuse-monitoring tag)\n",
"responseBodyShape": "Non-streaming response (HTTP 200 application/json):\n {\n \"id\": \"chatcmpl-...\",\n \"object\": \"chat.completion\",\n \"created\": int,\n \"model\": \"...\",\n \"choices\": [\n { \"index\": 0,\n \"message\": { \"role\": \"assistant\", \"content\": string|null,\n \"tool_calls\"?: [...], \"refusal\"?: string|null },\n \"finish_reason\": \"stop\"|\"length\"|\"tool_calls\"|\"content_filter\" }\n ],\n \"usage\": {\n \"prompt_tokens\": int, \"completion_tokens\": int, \"total_tokens\": int,\n \"prompt_tokens_details\": { \"cached_tokens\": int },\n \"completion_tokens_details\": { \"reasoning_tokens\"?: int }\n }\n }\n",
"streamingEventTypes": [
"chat.completion.chunk",
"[DONE]"
],
"toolCallWireFormat": "On the assistant message:\n \"tool_calls\": [\n { \"id\": \"call_...\",\n \"type\": \"function\",\n \"function\": { \"name\": \"<tool_name>\", \"arguments\": \"<JSON-encoded string>\" } }\n ]\n",
"toolResultWireFormat": "A subsequent message with role=tool:\n { \"role\": \"tool\",\n \"tool_call_id\": \"call_...\",\n \"content\": \"<string>\" }\n",
"errorEnvelope": "Non-2xx response, `application/json`:\n { \"error\": { \"type\": string, \"code\": string|null, \"message\": string,\n \"param\": string|null } }\nSame envelope as the Responses API.\n",
"cacheControlWireFormat": "Implicit automatic prompt caching for prompts >= 1024 tokens; cache\nstate reported in `usage.prompt_tokens_details.cached_tokens`.\nOptional explicit scoping via top-level `prompt_cache_key`.\n",
"rateLimitSignaling": "Same as OpenAI Responses: `x-ratelimit-{limit,remaining,reset}-{requests,tokens}`\nheaders on every response; `retry-after` on HTTP 429.\n",
"reasoningWireFormat": "No first-class reasoning channel on Chat Completions. For reasoning\nmodels served via this surface, internal reasoning tokens are\nsurfaced only as a count in `usage.completion_tokens_details.reasoning_tokens`;\nthe reasoning text itself is not returned. Use the Responses API for\nreasoning summaries / encrypted reasoning state.\n",
"authHeaderFormat": "`Authorization: Bearer <OPENAI_API_KEY>`\nOptional: `OpenAI-Organization`, `OpenAI-Project`.\n",
"versioningHeader": "No general API version header; surface version is implied by the\n`/v1/chat/completions` path.\n"
},
"outgoingEdges": [
{
"from": "model-transport:openai-chat-completions",
"to": "layer:3-transport",
"kind": "realizes",
"attributes": {}
},
{
"from": "model-transport:openai-chat-completions",
"to": "provider:openai",
"kind": "served_by"
},
{
"from": "model-transport:openai-chat-completions",
"to": "model:gpt-5@current",
"kind": "spoken_by"
}
],
"incomingEdges": [
{
"from": "agent-core-impl:a5c.core@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:codex-sdk.core@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:qwen.core@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks",
"attributes": {}
},
{
"from": "model-transport:openai-compat",
"to": "model-transport:openai-chat-completions",
"kind": "alias_of",
"attributes": {}
},
{
"from": "model:gpt-4o-mini@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-4o@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-5-mini@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-5.4-mini@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-5.4@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-5.5@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:gpt-5@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:o1@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
},
{
"from": "model:o3@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks"
}
]
}