II.
Tool JSON
Structured · livetool:fireworks-ai
Fireworks AI json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "tool:fireworks-ai",
"_kind": "Tool",
"_file": "domain/tools/tools-ora-operations-discovery.yaml",
"_cluster": "domain",
"attributes": {
"displayName": "Fireworks AI",
"homepageUrl": "https://fireworks.ai",
"kind": "api-tool",
"description": "LLM inference and model-serving API surfaced by Ora for LLMOps, prompt evaluation, and model monitoring workflows.\n",
"ora": {
"domain": "fireworks.ai",
"score": 75,
"grade": "B",
"category": "AI & ML",
"tags": [
"llm",
"inference",
"ai api",
"fast inference",
"model serving"
],
"scoreUrl": "https://ora.run/score/fireworks.ai"
}
},
"outgoingEdges": [
{
"from": "tool:fireworks-ai",
"to": "source-ref:ora-discover-api",
"kind": "sourced_from"
},
{
"from": "tool:fireworks-ai",
"to": "role:ml-engineer",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Hosted inference supports model-serving and evaluation workflows."
}
},
{
"from": "tool:fireworks-ai",
"to": "workflow:agent-evaluation-cycle",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Model API outputs can be compared across prompts, eval suites, and agent runs."
}
},
{
"from": "tool:fireworks-ai",
"to": "workflow:llm-cost-optimization",
"kind": "supports_work",
"attributes": {
"confidence": "medium",
"evidence": "Inference provider choice affects cost, latency, and throughput trade-offs."
}
}
],
"incomingEdges": [
{
"from": "agent-readiness-score:ora.fireworks-ai",
"to": "tool:fireworks-ai",
"kind": "scores_agent_readiness_of",
"attributes": {
"observedVia": "ora-discover-api"
}
},
{
"from": "tool-server:mcp-fireworks-ai-candidate",
"to": "tool:fireworks-ai",
"kind": "integrates_with",
"attributes": {}
}
]
}