iiRecord
Agentic AI Atlas · conversation-quality-testing
lib-process:ai-agents-conversational--conversation-quality-testinga5c.ai
II.
LibraryProcess JSON

lib-process:ai-agents-conversational--conversation-quality-testing

Structured · live

conversation-quality-testing json

Inspect the normalized record payload exactly as the atlas UI reads it.

File · generated-library/processes.yamlCluster · generated-library
Record JSON
{
  "id": "lib-process:ai-agents-conversational--conversation-quality-testing",
  "_kind": "LibraryProcess",
  "_file": "generated-library/processes.yaml",
  "_cluster": "generated-library",
  "attributes": {
    "displayName": "conversation-quality-testing",
    "description": "Conversation Quality Testing and Metrics - Process for measuring conversational AI quality including\nintent accuracy, dialogue success rate, user satisfaction (CSAT), response appropriateness, and conversation coherence.",
    "libraryPath": "library/specializations/ai-agents-conversational/conversation-quality-testing.js",
    "specialization": "ai-agents-conversational",
    "references": [
      "- DSTC: https://dstc.community/\n- Conversation Quality Metrics: https://aclanthology.org/2020.nlp4convai-1.8/"
    ],
    "example": "const result = await orchestrate('specializations/ai-agents-conversational/conversation-quality-testing', {\n  systemName: 'customer-chatbot',\n  qualityMetrics: ['intent-accuracy', 'dialogue-success', 'csat', 'coherence'],\n  testDataset: { conversations: 100 }\n});",
    "usesAgents": [
      "data-preparer",
      "llm-judge",
      "dialogue-tester",
      "coherence-evaluator",
      "feedback-analyst",
      "report-generator"
    ]
  },
  "outgoingEdges": [
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "domain:software-engineering",
      "kind": "lib_applies_to_domain",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "workflow:code-review",
      "kind": "lib_implements_workflow",
      "attributes": {
        "weight": 1
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "workflow:code-review",
      "kind": "lib_implements_workflow",
      "attributes": {
        "weight": 0.7
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "workflow:prompt-engineering-iteration",
      "kind": "lib_implements_workflow",
      "attributes": {
        "weight": 0.5
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "specialization:ai-agents-conversational",
      "kind": "lib_belongs_to_specialization",
      "attributes": {
        "weight": 0.9
      }
    },
    {
      "from": "lib-process:ai-agents-conversational--conversation-quality-testing",
      "to": "lib-agent:ai-agents-conversational--llm-judge",
      "kind": "uses_agent",
      "attributes": {
        "weight": 0.8
      }
    }
  ],
  "incomingEdges": []
}