Architecture: - director_v2: always-on brain, produces DirectorPlan with tool_sequence - thinker_v2: pure executor, runs tools from DirectorPlan - interpreter_v1: factual result summarizer, no hallucination - v2_director_drives graph: Input -> Director -> Thinker -> Output Infrastructure: - Split into 3 pods: cog-frontend (nginx), cog-runtime (FastAPI), cog-mcp (SSE proxy) - MCP survives runtime restarts (separate pod, proxies via HTTP) - Async send pipeline: /api/send/check -> /api/send -> /api/result with progress - Zero-downtime rolling updates (maxUnavailable: 0) - Dynamic graph visualization (fetched from API, not hardcoded) Tests: 22 new mocked unit tests (director_v2: 7, thinker_v2: 8, interpreter_v1: 7) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
102 lines
4.7 KiB
Python
102 lines
4.7 KiB
Python
"""Run all node-level unit tests."""
|
|
|
|
import sys
|
|
import time
|
|
from pathlib import Path
|
|
|
|
# Ensure we can import from parent
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
sys.path.insert(0, str(Path(__file__).parent))
|
|
|
|
from harness import NodeTestRunner
|
|
|
|
# Import all test modules
|
|
import test_input_v1
|
|
import test_thinker_v1
|
|
import test_memorizer_v1
|
|
import test_director_v1
|
|
import test_director_v2
|
|
import test_thinker_v2
|
|
import test_interpreter_v1
|
|
|
|
runner = NodeTestRunner()
|
|
t0 = time.time()
|
|
|
|
print("\n" + "=" * 60)
|
|
print(" Node-Level Unit Tests")
|
|
print("=" * 60)
|
|
|
|
# Input v1
|
|
print("\n--- InputNode v1 ---")
|
|
runner.test("greeting is social+trivial", test_input_v1.test_greeting_is_social_trivial())
|
|
runner.test("german detected", test_input_v1.test_german_detected())
|
|
runner.test("request classified", test_input_v1.test_request_classified())
|
|
runner.test("frustrated tone", test_input_v1.test_frustrated_tone())
|
|
runner.test("emits perceived HUD", test_input_v1.test_emits_perceived_hud())
|
|
runner.test("source text preserved", test_input_v1.test_source_text_preserved())
|
|
|
|
# Thinker v1
|
|
print("\n--- ThinkerNode v1 ---")
|
|
runner.test("simple response", test_thinker_v1.test_simple_response())
|
|
runner.test("no code in response", test_thinker_v1.test_no_code_in_response())
|
|
runner.test("emits tool calls for buttons", test_thinker_v1.test_emits_tool_calls_for_buttons())
|
|
runner.test("query_db called for DB question", test_thinker_v1.test_query_db_called())
|
|
runner.test("S3* audit mechanism", test_thinker_v1.test_s3_audit_code_without_tools())
|
|
runner.test("decided HUD emitted", test_thinker_v1.test_decided_hud_emitted())
|
|
|
|
# Memorizer v1
|
|
print("\n--- MemorizerNode v1 ---")
|
|
runner.test("extracts mood", test_memorizer_v1.test_extracts_mood())
|
|
runner.test("extracts language", test_memorizer_v1.test_extracts_language())
|
|
runner.test("facts preserved across updates", test_memorizer_v1.test_facts_preserved_across_updates())
|
|
runner.test("topic tracked", test_memorizer_v1.test_topic_tracked())
|
|
runner.test("emits updated HUD", test_memorizer_v1.test_emits_updated_hud())
|
|
|
|
# Director v1
|
|
print("\n--- DirectorNode v1 ---")
|
|
runner.test("detects casual mode", test_director_v1.test_detects_casual_mode())
|
|
runner.test("detects frustrated style", test_director_v1.test_detects_frustrated_style())
|
|
runner.test("produces plan for complex request", test_director_v1.test_produces_plan_for_complex_request())
|
|
runner.test("directive has required fields", test_director_v1.test_directive_has_required_fields())
|
|
runner.test("context line includes plan", test_director_v1.test_context_line_includes_plan())
|
|
|
|
# Director v2
|
|
print("\n--- DirectorNode v2 ---")
|
|
runner.test("returns DirectorPlan", test_director_v2.test_returns_director_plan())
|
|
runner.test("direct response for simple", test_director_v2.test_direct_response_for_simple())
|
|
runner.test("multi-step plan", test_director_v2.test_multi_step_plan())
|
|
runner.test("emits HUD events", test_director_v2.test_emits_hud_events())
|
|
runner.test("still updates style directive", test_director_v2.test_still_updates_style_directive())
|
|
runner.test("history included in context", test_director_v2.test_history_included_in_context())
|
|
runner.test("bad JSON returns fallback", test_director_v2.test_bad_json_returns_fallback())
|
|
|
|
# Thinker v2
|
|
print("\n--- ThinkerNode v2 ---")
|
|
runner.test("executes emit_actions", test_thinker_v2.test_executes_emit_actions())
|
|
runner.test("executes set_state", test_thinker_v2.test_executes_set_state())
|
|
runner.test("executes query_db", test_thinker_v2.test_executes_query_db())
|
|
runner.test("direct response no tools", test_thinker_v2.test_direct_response_no_tools())
|
|
runner.test("no autonomous tool calls", test_thinker_v2.test_no_autonomous_tool_calls())
|
|
runner.test("multi tool sequence", test_thinker_v2.test_multi_tool_sequence())
|
|
runner.test("emits HUD per tool", test_thinker_v2.test_emits_hud_per_tool())
|
|
runner.test("create_machine tool", test_thinker_v2.test_create_machine_tool())
|
|
|
|
# Interpreter v1
|
|
print("\n--- InterpreterNode v1 ---")
|
|
runner.test("summarizes DB result", test_interpreter_v1.test_summarizes_db_result())
|
|
runner.test("handles empty result", test_interpreter_v1.test_handles_empty_result())
|
|
runner.test("handles tabular data", test_interpreter_v1.test_handles_tabular_data())
|
|
runner.test("no hallucination guard", test_interpreter_v1.test_no_hallucination_guard())
|
|
runner.test("emits HUD", test_interpreter_v1.test_emits_hud())
|
|
runner.test("bad JSON fallback", test_interpreter_v1.test_bad_json_fallback())
|
|
runner.test("python tool output", test_interpreter_v1.test_python_tool_output())
|
|
|
|
# Summary
|
|
elapsed = time.time() - t0
|
|
p, f = runner.summary()
|
|
print(f"\n{'=' * 60}")
|
|
print(f" TOTAL: {p} passed, {f} failed ({elapsed:.1f}s)")
|
|
print(f"{'=' * 60}")
|
|
|
|
sys.exit(0 if f == 0 else 1)
|