iiRecord
Agentic AI Atlas · ONNX Runtime
tool:onnx-runtimea5c.ai
II.
Tool JSON

tool:onnx-runtime

Structured · live

ONNX Runtime json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · domain/tools/tools-testing-frontend-aiml.yamlCluster · domain
Record JSON
{
  "id": "tool:onnx-runtime",
  "_kind": "Tool",
  "_file": "domain/tools/tools-testing-frontend-aiml.yaml",
  "_cluster": "domain",
  "attributes": {
    "displayName": "ONNX Runtime",
    "homepageUrl": "https://onnxruntime.ai",
    "kind": "other",
    "description": "Cross-platform, high-performance ML inference engine for ONNX models.\nRuns on CPU, CUDA, DirectML, CoreML, ROCm, and other execution providers;\nsupports quantization and graph optimisations. Used for deploying models\ntrained in PyTorch, TensorFlow, or scikit-learn after export to the open\nONNX interchange format.\n"
  },
  "outgoingEdges": [
    {
      "from": "tool:onnx-runtime",
      "to": "language:cpp",
      "kind": "belongs_to_language"
    },
    {
      "from": "tool:onnx-runtime",
      "to": "skill-area:model-serving",
      "kind": "tool_used_by",
      "attributes": {}
    },
    {
      "from": "tool:onnx-runtime",
      "to": "skill-area:model-optimisation",
      "kind": "tool_used_by",
      "attributes": {}
    },
    {
      "from": "tool:onnx-runtime",
      "to": "skill-area:model-serving",
      "kind": "used_for"
    },
    {
      "from": "tool:onnx-runtime",
      "to": "skill-area:ai-evaluation",
      "kind": "used_for"
    },
    {
      "from": "tool:onnx-runtime",
      "to": "tool:vllm",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — ONNX Runtime is cross-platform multi-model; vLLM is LLM-specific GPU"
      }
    },
    {
      "from": "tool:onnx-runtime",
      "to": "tool:tensorrt",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — ONNX Runtime is cross-platform; TensorRT is NVIDIA GPU-only"
      }
    },
    {
      "from": "tool:onnx-runtime",
      "to": "tool:triton-inference",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — ONNX Runtime is an inference engine; Triton is a serving platform"
      }
    }
  ],
  "incomingEdges": [
    {
      "from": "specialization:ml-inference-serving",
      "to": "tool:onnx-runtime",
      "kind": "uses_tool"
    },
    {
      "from": "tool:vllm",
      "to": "tool:onnx-runtime",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — vLLM is LLM-specific GPU; ONNX Runtime is cross-platform multi-model"
      }
    },
    {
      "from": "tool:tensorrt",
      "to": "tool:onnx-runtime",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — TensorRT is NVIDIA GPU-only; ONNX Runtime is cross-platform"
      }
    },
    {
      "from": "tool:triton-inference",
      "to": "tool:onnx-runtime",
      "kind": "alternative_to",
      "attributes": {
        "comparison": "ML inference — Triton is a serving platform; ONNX Runtime is an inference engine"
      }
    }
  ]
}