LangGraph Graph Core (current)
agent-core-impl:langgraph.core@current
AgentCoreImplagent-stack/core-impls/langgraph-core-current.yaml·Open in Graph → {
"id": "agent-core-impl:langgraph.core@current",
"_kind": "AgentCoreImpl",
"_file": "agent-stack/core-impls/langgraph-core-current.yaml",
"_cluster": "agent-stack",
"attributes": {
"displayName": "LangGraph Graph Core (current)",
"agentVersionId": "agent-version:langgraph@current",
"packageRef": "source-ref:langgraph-github",
"loopIteratorPolicy": "custom",
"loopIteratorNotes": "The core loop is an explicit graph/state-machine authored by the application:\nnodes transform state, edges route control, and terminal conditions are part\nof the graph definition rather than a fixed CLI loop.\n",
"contextManagementStrategy": "user-managed",
"compactionTriggerNotes": "Context and memory are represented in graph state, stores, checkpointers, or\nmodel-call middleware chosen by the graph author.\n",
"subagentInvokerPolicy": "nested-loop",
"subagentInvokerNotes": "Custom multi-agent patterns can be encoded as subgraphs, supervisor graphs,\nhandoff nodes, or Deep Agents-style subagent harnesses.\n",
"resultSynthesisPolicy": "post-processed",
"resultSynthesisNotes": "Final outputs are graph-defined: the terminal state can be converted to chat\nmessages, structured outputs, events, or application-specific payloads.\n",
"stopDetectionStrategy": "structured-end-event",
"transportClientLibrary": "langchain-chat-model-abstraction",
"supportedTransportProtocols": [
"model-transport:openai-chat-completions",
"model-transport:openai-responses",
"model-transport:anthropic-messages",
"model-transport:gemini-generate-content",
"model-transport:openai-compat"
],
"parallelToolCallHandling": "native",
"streamingFidelity": "full",
"thinkingChannelHandling": "model-dependent",
"notes": "Represents LangGraph's framework-level core, not a single product loop. The\ngraph author chooses which model provider integrations and node semantics are\nactive for a concrete application.\n"
},
"outgoingEdges": [
{
"from": "agent-core-impl:langgraph.core@current",
"to": "layer:4-agent-core",
"kind": "realizes",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "agent:langgraph",
"kind": "composes",
"attributes": {
"role": "core"
}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:openai-chat-completions",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:openai-responses",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:anthropic-messages",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:gemini-generate-content",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "model-transport:openai-compat",
"kind": "speaks",
"attributes": {}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "source-ref:langgraph-docs",
"kind": "sourced_from"
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "source-ref:langchain-agents-docs",
"kind": "sourced_from"
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "capability:supports-tool-use",
"kind": "supports",
"attributes": {
"versionRange": ">=0.0.0",
"level": "full"
}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "capability:streaming",
"kind": "supports",
"attributes": {
"versionRange": ">=0.0.0",
"level": "full"
}
},
{
"from": "agent-core-impl:langgraph.core@current",
"to": "capability:can-resume",
"kind": "supports",
"attributes": {
"versionRange": ">=0.0.0",
"level": "full"
}
}
],
"incomingEdges": [
{
"from": "agent-version:langgraph@current",
"to": "agent-core-impl:langgraph.core@current",
"kind": "composed_of",
"attributes": {
"role": "core"
}
}
]
}