Architecture: - Graph engine (engine.py) loads graph definitions, instantiates nodes - Versioned nodes: input_v1, thinker_v1, output_v1, memorizer_v1, director_v1 - NODE_REGISTRY for dynamic node lookup by name - Graph API: /api/graph/active, /api/graph/list, /api/graph/switch - Graph definition: graphs/v1_current.py (7 nodes, 13 edges, 3 edge types) S3* Audit system: - Workspace mismatch detection (server vs browser controls) - Code-without-tools retry (Thinker wrote code but no tool calls) - Intent-without-action retry (request intent but Thinker only produced text) - Dashboard feedback: browser sends workspace state on every message - Sensor continuous comparison on 5s tick State machines: - create_machine / add_state / reset_machine / destroy_machine via function calling - Local transitions (go:) resolve without LLM round-trip - Button persistence across turns Database tools: - query_db tool via pymysql to MariaDB K3s pod (eras2_production) - Table rendering in workspace (tab-separated parsing) - Director pre-planning with Opus for complex data requests - Error retry with corrected SQL Frontend: - Cytoscape.js pipeline graph with real-time node animations - Overlay scrollbars (CSS-only, no reflow) - Tool call/result trace events - S3* audit events in trace Testing: - 167 integration tests (11 test suites) - 22 node-level unit tests (test_nodes/) - Three test levels: node unit, graph integration, scenario Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
63 lines
2.5 KiB
Python
63 lines
2.5 KiB
Python
"""Unit tests for InputNode v1 — structured JSON analyst."""
|
|
|
|
from harness import HudCapture, make_envelope, make_history, NodeTestRunner
|
|
|
|
from agent.nodes.input_v1 import InputNode
|
|
|
|
|
|
async def test_greeting_is_social_trivial():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
cmd = await node.process(make_envelope("hi there!"), [], memory_context="")
|
|
assert cmd.analysis.intent == "social", f"intent={cmd.analysis.intent}"
|
|
assert cmd.analysis.complexity == "trivial", f"complexity={cmd.analysis.complexity}"
|
|
|
|
|
|
async def test_german_detected():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
cmd = await node.process(make_envelope("Wie spaet ist es?"), [], memory_context="")
|
|
assert cmd.analysis.language in ("de", "mixed"), f"language={cmd.analysis.language}"
|
|
|
|
|
|
async def test_request_classified():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
cmd = await node.process(make_envelope("create a counter with buttons"), [], memory_context="")
|
|
assert cmd.analysis.intent in ("request", "action"), f"intent={cmd.analysis.intent}"
|
|
assert cmd.analysis.complexity in ("simple", "complex"), f"complexity={cmd.analysis.complexity}"
|
|
|
|
|
|
async def test_frustrated_tone():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
cmd = await node.process(make_envelope("this is broken, nothing works and I'm sick of it"), [], memory_context="")
|
|
assert cmd.analysis.tone in ("frustrated", "urgent"), f"tone={cmd.analysis.tone}"
|
|
|
|
|
|
async def test_emits_perceived_hud():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
await node.process(make_envelope("hello"), [], memory_context="")
|
|
assert hud.has("perceived"), f"events: {[e.get('event') for e in hud.events]}"
|
|
|
|
|
|
async def test_source_text_preserved():
|
|
hud = HudCapture()
|
|
node = InputNode(send_hud=hud)
|
|
cmd = await node.process(make_envelope("show me 5 customers"), [], memory_context="")
|
|
assert cmd.source_text == "show me 5 customers", f"source_text={cmd.source_text}"
|
|
|
|
|
|
if __name__ == "__main__":
|
|
runner = NodeTestRunner()
|
|
print("\n=== InputNode v1 ===")
|
|
runner.test("greeting is social+trivial", test_greeting_is_social_trivial())
|
|
runner.test("german detected", test_german_detected())
|
|
runner.test("request classified", test_request_classified())
|
|
runner.test("frustrated tone", test_frustrated_tone())
|
|
runner.test("emits perceived HUD", test_emits_perceived_hud())
|
|
runner.test("source text preserved", test_source_text_preserved())
|
|
p, f = runner.summary()
|
|
print(f"\n {p} passed, {f} failed")
|