II.
ModelTransportProtocol JSON
Structured · livemodel-transport:gemini-generate-content
Gemini generateContent json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "model-transport:gemini-generate-content",
"_kind": "ModelTransportProtocol",
"_file": "compute/model-transport-protocols/gemini-generate-content.yaml",
"_cluster": "compute",
"attributes": {
"displayName": "Gemini generateContent",
"vendor": "Google",
"specUrl": "https://ai.google.dev/api/generate-content",
"streamingFraming": "sse",
"toolUseSchema": "Tool calls appear as `functionCall` parts within `candidates[].content.parts`,\neach with `name` and `args`. Tool results are returned as\n`functionResponse` parts referencing the same `name`.\n",
"thinkingChannel": "part",
"cacheControl": "explicit",
"firstSpecVersion": "v1",
"currentSpecVersion": "v1",
"status": "standard",
"requestBodyShape": "POST /v1beta/models/{model}:generateContent\nPOST /v1beta/models/{model}:streamGenerateContent\nJSON body (top-level fields):\n - `contents` (required, array of `{role: \"user\"|\"model\", parts: Part[]}`)\n - `systemInstruction` (optional, `{parts: Part[]}`)\n - `tools` (optional, array — `{functionDeclarations: [...]}` and/or\n built-ins like `googleSearch`, `codeExecution`)\n - `toolConfig` (optional, `{functionCallingConfig: {mode, allowedFunctionNames?}}`)\n - `safetySettings` (optional)\n - `generationConfig` (optional, `{temperature, topP, topK, maxOutputTokens,\n stopSequences, responseMimeType, responseSchema, thinkingConfig?}`)\n - `cachedContent` (optional, string — name of a server-side cached content resource)\nA `Part` is one of: `{text}`, `{inlineData: {mimeType, data}}`,\n`{fileData: {mimeType, fileUri}}`, `{functionCall}`, `{functionResponse}`,\n`{thought: true, ...}`.\n",
"responseBodyShape": "Non-streaming response (HTTP 200 application/json):\n {\n \"candidates\": [\n { \"content\": { \"role\": \"model\", \"parts\": [ Part, ... ] },\n \"finishReason\": \"STOP\"|\"MAX_TOKENS\"|\"SAFETY\"|\"RECITATION\"|\"OTHER\",\n \"safetyRatings\": [...],\n \"index\": 0 }\n ],\n \"promptFeedback\"?: { \"blockReason\": ..., \"safetyRatings\": [...] },\n \"usageMetadata\": {\n \"promptTokenCount\": int,\n \"candidatesTokenCount\": int,\n \"totalTokenCount\": int,\n \"cachedContentTokenCount\"?: int\n }\n }\n",
"streamingEventTypes": [
"GenerateContentResponse"
],
"toolCallWireFormat": "A `functionCall` part on `candidates[].content.parts[]`:\n { \"functionCall\": { \"name\": \"<tool_name>\", \"args\": { ... } } }\nNote: there is no per-call id field; correlation is by `name` and\nposition within the conversation.\n",
"toolResultWireFormat": "A `functionResponse` part in a subsequent user-role content turn:\n { \"functionResponse\": { \"name\": \"<tool_name>\",\n \"response\": { ... } } }\n",
"errorEnvelope": "Non-2xx response, `application/json` (Google API error envelope):\n { \"error\": { \"code\": int,\n \"message\": string,\n \"status\": \"INVALID_ARGUMENT\"|\"PERMISSION_DENIED\"|\"RESOURCE_EXHAUSTED\"|\"INTERNAL\"|...,\n \"details\": [ ... ] } }\n",
"cacheControlWireFormat": "Server-side `cachedContent` resource model.\n 1. Client POSTs to `/v1beta/cachedContents` with `{model, contents,\n systemInstruction?, tools?, ttl}` and receives `{name: \"cachedContents/<id>\"}`.\n 2. On subsequent generateContent requests, set top-level\n `cachedContent: \"cachedContents/<id>\"` to reference it.\nCache hit accounting reported as `usageMetadata.cachedContentTokenCount`.\n",
"rateLimitSignaling": "HTTP 429 `RESOURCE_EXHAUSTED` on quota exceedance. `retry-after` header\nis returned with the recommended back-off (seconds). No documented\nper-tenant remaining-budget headers on the response.\nwhether Vertex variant exposes additional quota headers.\n",
"reasoningWireFormat": "Reasoning is carried as additional `parts` on the model candidate\nannotated with `thought: true` (e.g. `{ \"text\": \"...\", \"thought\": true }`).\nBehavior is gated by `generationConfig.thinkingConfig` (e.g.\n`{includeThoughts: true, thinkingBudget?}`).\nfield names against current `/v1beta` spec — naming has shifted.\n",
"authHeaderFormat": "AI Studio (`generativelanguage.googleapis.com`):\n `x-goog-api-key: <GEMINI_API_KEY>` (or `?key=...` query param)\nVertex AI (`*-aiplatform.googleapis.com`):\n `Authorization: Bearer <gcloud-oauth-token>`\n",
"versioningHeader": "Path-versioned: `/v1`, `/v1beta`. No protocol-version header on the\nrequest; new features land first under `/v1beta`.\n"
},
"outgoingEdges": [
{
"from": "model-transport:gemini-generate-content",
"to": "layer:3-transport",
"kind": "realizes",
"attributes": {}
},
{
"from": "model-transport:gemini-generate-content",
"to": "provider:google",
"kind": "served_by"
},
{
"from": "model-transport:gemini-generate-content",
"to": "model:gemini-2-5-pro@current",
"kind": "spoken_by"
},
{
"from": "model-transport:gemini-generate-content",
"to": "agent-core-impl:cursor.core@current",
"kind": "spoken_by",
"attributes": {}
},
{
"from": "model-transport:gemini-generate-content",
"to": "agent-core-impl:opencode.core@1.x",
"kind": "spoken_by",
"attributes": {}
}
],
"incomingEdges": [
{
"from": "agent-core-impl:a5c.core@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:cursor.core@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:gemini-cli.core@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:opencode.core@1.x",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "model:gemini-2-0-flash@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-2-5-flash@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-2-5-pro@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-3-1-deep-think@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-3-1-flash-lite@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-3-1-pro@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-3-flash@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
},
{
"from": "model:gemini-3-pro@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks"
}
]
}