II.
Tool JSON
Structured · livetool:openai
OpenAI json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "tool:openai",
"_kind": "Tool",
"_file": "domain/tools/tools-ora-operations-discovery.yaml",
"_cluster": "domain",
"attributes": {
"displayName": "OpenAI",
"homepageUrl": "https://openai.com",
"kind": "api-tool",
"description": "AI platform surfaced by Ora for LLM, embeddings, image, prompt-evaluation, and agentic workflow development.\n",
"ora": {
"domain": "openai.com",
"score": 64,
"grade": "C",
"category": "AI & ML",
"tags": [
"llm",
"gpt",
"chat",
"embeddings",
"ai api",
"language model",
"completions",
"image generation"
],
"scoreUrl": "https://ora.run/score/openai.com"
}
},
"outgoingEdges": [
{
"from": "tool:openai",
"to": "source-ref:ora-discover-api",
"kind": "sourced_from"
},
{
"from": "tool:openai",
"to": "role:ml-engineer",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "LLM and embedding APIs support agent development and evaluation."
}
},
{
"from": "tool:openai",
"to": "workflow:agent-evaluation-cycle",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Model responses and tool-use traces can be measured through agent evaluation cycles."
}
},
{
"from": "tool:openai",
"to": "workflow:prompt-engineering-iteration",
"kind": "supports_work",
"attributes": {
"confidence": "high",
"evidence": "Prompt and response behavior are direct inputs to prompt iteration."
}
}
],
"incomingEdges": [
{
"from": "agent-readiness-score:ora.openai-com",
"to": "tool:openai",
"kind": "scores_agent_readiness_of",
"attributes": {
"observedVia": "ora-discover-api"
}
}
]
}