Agentic AI Atlasby a5c.ai
OverviewWikiGraphFor AgentsEdgesSearchWorkspace
/
GitHubDocsDiscord
iiRecord
Agentic AI Atlas · OpenAI Responses
model-transport:openai-responsesa5c.ai
Search record views/
Record · tabs

Available views

II.Record viewspp. 1 - 1
overviewjsongraph
II.
ModelTransportProtocol JSON

model-transport:openai-responses

Structured · live

OpenAI Responses json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · compute/model-transport-protocols/openai-responses.yamlCluster · compute
Record JSON
{
  "id": "model-transport:openai-responses",
  "_kind": "ModelTransportProtocol",
  "_file": "compute/model-transport-protocols/openai-responses.yaml",
  "_cluster": "compute",
  "attributes": {
    "displayName": "OpenAI Responses",
    "vendor": "OpenAI",
    "specUrl": "https://platform.openai.com/docs/api-reference/responses",
    "streamingFraming": "sse",
    "toolUseSchema": "Tool calls appear as `function_call` items in the response output\narray with `call_id`, `name`, and `arguments` (JSON string). Tool\nresults are returned as `function_call_output` items referencing the\nsame `call_id`.\n",
    "thinkingChannel": "item",
    "cacheControl": "implicit",
    "firstSpecVersion": "2025-03-11",
    "currentSpecVersion": "2025-03-11",
    "status": "standard",
    "requestBodyShape": "POST /v1/responses\nJSON body (top-level fields):\n  - `model` (required, string)\n  - `input` (required, string or array of input items — messages,\n    function_call_output, file/image refs, ...)\n  - `instructions` (optional, string — system-style preamble)\n  - `tools` (optional, array; built-in tools like `web_search`, `file_search`,\n    `code_interpreter`, plus `{type: \"function\", name, description, parameters}`)\n  - `tool_choice` (optional)\n  - `reasoning` (optional, `{effort: \"low\"|\"medium\"|\"high\", summary?: \"auto\"|\"concise\"|\"detailed\"}`)\n  - `max_output_tokens`, `temperature`, `top_p` (optional)\n  - `stream` (optional, bool)\n  - `previous_response_id` (optional, string — server-side state chaining)\n  - `store` (optional, bool — whether the server should retain this response)\n  - `metadata` (optional, map<string,string>)\n  - `prompt_cache_key` (optional, string — opt-in caching scope key)\n",
    "responseBodyShape": "Non-streaming response (HTTP 200 application/json):\n  {\n    \"id\": \"resp_...\",\n    \"object\": \"response\",\n    \"created_at\": int,\n    \"model\": \"...\",\n    \"status\": \"completed\"|\"in_progress\"|\"failed\"|\"incomplete\",\n    \"output\": [\n    ],\n    \"output_text\": string,\n    \"usage\": {\n      \"input_tokens\": int,\n      \"output_tokens\": int,\n      \"total_tokens\": int,\n      \"input_tokens_details\": { \"cached_tokens\": int },\n      \"output_tokens_details\": { \"reasoning_tokens\": int }\n    },\n    \"error\": null|{...}\n  }\n",
    "streamingEventTypes": [
      "response.created",
      "response.in_progress",
      "response.output_item.added",
      "response.output_item.done",
      "response.content_part.added",
      "response.content_part.done",
      "response.output_text.delta",
      "response.output_text.done",
      "response.function_call_arguments.delta",
      "response.function_call_arguments.done",
      "response.reasoning_summary_text.delta",
      "response.reasoning_summary_text.done",
      "response.completed",
      "response.failed",
      "response.incomplete",
      "error"
    ],
    "toolCallWireFormat": "A `function_call` item in `response.output[]`:\n  { \"type\": \"function_call\",\n    \"id\": \"fc_...\",\n    \"call_id\": \"call_...\",\n    \"name\": \"<tool_name>\",\n    \"arguments\": \"<JSON-encoded string>\" }\n",
    "toolResultWireFormat": "A `function_call_output` item provided as input on the next request:\n  { \"type\": \"function_call_output\",\n    \"call_id\": \"call_...\",\n    \"output\": \"<string>\" }\n",
    "errorEnvelope": "Non-2xx response, `application/json`:\n  { \"error\": { \"type\": string, \"code\": string|null, \"message\": string,\n               \"param\": string|null } }\nCommon types: `invalid_request_error`, `authentication_error`,\n`permission_error`, `rate_limit_exceeded`, `server_error`.\nHTTP status: 400/401/403/404/429/500/503.\n",
    "cacheControlWireFormat": "Implicit automatic prompt caching for prompts >= 1024 tokens; cache\nstate is reported in `usage.input_tokens_details.cached_tokens`.\nOptional explicit scoping via top-level `prompt_cache_key` (free-form\nstring used by the server to bucket cache entries per project/tenant).\n",
    "rateLimitSignaling": "Response headers on every request:\n  - `x-ratelimit-limit-requests`, `x-ratelimit-remaining-requests`,\n    `x-ratelimit-reset-requests`\n  - `x-ratelimit-limit-tokens`, `x-ratelimit-remaining-tokens`,\n    `x-ratelimit-reset-tokens`\nOn HTTP 429: `retry-after` header (seconds).\n",
    "reasoningWireFormat": "A `reasoning` item in `response.output[]`:\n  { \"type\": \"reasoning\",\n    \"id\": \"rs_...\",\n    \"summary\": [ { \"type\": \"summary_text\", \"text\": \"...\" }, ... ],\n    \"encrypted_content\"?: \"<opaque>\",\n    \"status\": \"completed\" }\nRaw chain-of-thought tokens are not exposed; the `summary` items\ncarry user-visible reasoning. `encrypted_content` (when present) is\nechoed back via `previous_response_id` or input to preserve state.\n",
    "authHeaderFormat": "`Authorization: Bearer <OPENAI_API_KEY>`\nOptional: `OpenAI-Organization: <org-id>`, `OpenAI-Project: <proj-id>`.\n",
    "versioningHeader": "No general API version header; the `/v1/...` path is the surface version.\nPer-feature betas are opted into with `OpenAI-Beta: <feature>=v<n>`\nheaders (e.g. assistants/threads betas).\n"
  },
  "outgoingEdges": [
    {
      "from": "model-transport:openai-responses",
      "to": "layer:3-transport",
      "kind": "realizes",
      "attributes": {}
    },
    {
      "from": "model-transport:openai-responses",
      "to": "provider:openai",
      "kind": "served_by"
    },
    {
      "from": "model-transport:openai-responses",
      "to": "model:gpt-5@current",
      "kind": "spoken_by"
    },
    {
      "from": "model-transport:openai-responses",
      "to": "transport-proxy:agent-mux-proxy",
      "kind": "bridged_by"
    }
  ],
  "incomingEdges": [
    {
      "from": "agent-core-impl:a5c.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:codex-app-server.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:codex.core@1.x",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:codex-sdk.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:codex-websocket.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:cursor.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:hermes.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:langgraph.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:omp.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:openai-agents-sdk.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:openclaw.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:opencode.core@1.x",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:paperclip.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:pi.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:pikiclaw.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:symphony.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "agent-core-impl:vibe-kanban.core@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks",
      "attributes": {}
    },
    {
      "from": "model:gpt-4o-mini@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-4o@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-5-mini@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-5.4-mini@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-5.4@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-5.5@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:gpt-5@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:o1@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "model:o3@current",
      "to": "model-transport:openai-responses",
      "kind": "speaks"
    },
    {
      "from": "transport-proxy:agent-mux-proxy",
      "to": "model-transport:openai-responses",
      "kind": "bridges",
      "attributes": {
        "direction": "dst"
      }
    }
  ]
}

Shortcuts

Back to overview
Open graph tab