II.
Benchmark JSON
Structured · livebenchmark:lmsys-arena
Chatbot Arena (LMSYS) json
Inspect the normalized record payload exactly as the atlas UI reads it.
{
"id": "benchmark:lmsys-arena",
"_kind": "Benchmark",
"_file": "benchmarks/benchmarks/benchmarks-leaderboards.yaml",
"_cluster": "benchmarks",
"attributes": {
"displayName": "Chatbot Arena (LMSYS)",
"homepageUrl": "https://lmarena.ai/",
"kind": "agent-leaderboard",
"targetsKind": "ModelVersion",
"description": "Chatbot Arena (LMSYS / LMArena) is a crowdsourced LLM evaluation\nplatform that ranks models via blinded pairwise human votes,\nproducing an Elo-style leaderboard.\n"
},
"outgoingEdges": [
{
"from": "benchmark:lmsys-arena",
"to": "skill-area:general-knowledge-reasoning",
"kind": "covers",
"attributes": {}
},
{
"from": "benchmark:lmsys-arena",
"to": "domain:ml-ai",
"kind": "applies_to",
"attributes": {}
}
],
"incomingEdges": []
}