Nico 3a9c2795cf v0.15.2: ES6 module refactor, 2-row layout, dashboard test, PA routing fix
Frontend refactored to ES6 modules (no bundler):
  js/main.js    — entry point, wires all modules
  js/auth.js    — OIDC login, token management
  js/ws.js      — /ws, /ws/test, /ws/trace connections + HUD handler
  js/chat.js    — messages, send, streaming
  js/graph.js   — Cytoscape visualization + animation
  js/trace.js   — trace panel
  js/dashboard.js — workspace controls rendering
  js/awareness.js — state panel, sensors, meters
  js/tests.js   — test status display
  js/util.js    — shared utilities

New 2-row layout:
  Top:    test status | connection status
  Middle: Workspace | Node Details | Graph
  Bottom: Chat | Awareness | Trace

PA routing: routes ALL tool requests to expert (DB, UI, buttons, machines)
Dashboard integration test: 15/15

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-29 17:58:47 +02:00

164 lines
7.0 KiB
Python

"""Personal Assistant Node: routes to domain experts, holds user context."""
import json
import logging
from .base import Node
from ..llm import llm_call
from ..types import Command, PARouting
log = logging.getLogger("runtime")
class PANode(Node):
name = "pa_v1"
model = "anthropic/claude-haiku-4.5"
max_context_tokens = 4000
SYSTEM = """You are the Personal Assistant (PA) — the user's companion in this cognitive runtime.
You manage the user's dashboard and route work to domain experts.
Listener: {identity} on {channel}
Available experts:
{experts}
Experts have these tools:
- query_db — SQL queries on their domain database
- emit_actions — create buttons on the dashboard
- create_machine / add_state / reset_machine / destroy_machine — interactive UI components
- set_state — persistent key-value store
- emit_display — formatted data display
YOUR JOB:
1. Understand what the user wants
2. Route to the expert for ANY task that needs tools (DB, UI, buttons, machines, counters, reports)
3. Only respond directly for social chat (greetings, thanks, bye, small talk)
Output ONLY valid JSON:
{{
"expert": "{expert_names} | none",
"job": "Self-contained task. Include ALL context — the expert has NO conversation history. Describe what to query, what UI to build, what the user expects to see.",
"thinking_message": "Short message for user while expert works, in their language",
"response_hint": "If expert=none, your direct response to the user.",
"language": "de | en | mixed"
}}
Rules:
- expert=none ONLY for social chat (hi, thanks, bye, how are you)
- ANY request to create, build, show, query, investigate, count, list, describe, summarize → route to expert
- The job MUST be fully self-contained. The expert has NO history.
- Include relevant facts from memory AND conversation context in the job.
- For summaries/reports: include the key topics, findings, and actions from the conversation in the job so the expert can write a proper summary.
- thinking_message: natural, in user's language. e.g. "Moment, ich schaue nach..."
- If the user mentions data, tables, customers, devices, buttons, counters → expert
- When unsure which expert: pick the one whose domain matches best
{memory_context}"""
EXPERT_DESCRIPTIONS = {
"eras": "eras — heating/energy domain. Database: eras2_production (customers, devices, billing, consumption). Can also build dashboard UI (buttons, machines, counters, tables) for energy data workflows.",
"plankiste": "plankiste — Kita planning domain. Database: plankiste_test (children, care schedules, offers, pricing). Can build dashboard UI for education workflows and generate Angebote.",
}
def __init__(self, send_hud):
super().__init__(send_hud)
self.directive: dict = {"mode": "assistant", "style": "helpful and concise"}
self._available_experts: list[str] = []
def set_available_experts(self, experts: list[str]):
"""Called by frame engine to tell PA which experts are in this graph."""
self._available_experts = experts
def get_context_line(self) -> str:
d = self.directive
return f"PA: {d['mode']} mode. {d['style']}."
async def route(self, command: Command, history: list[dict],
memory_context: str = "", identity: str = "unknown",
channel: str = "unknown") -> PARouting:
"""Decide which expert handles this request."""
await self.hud("thinking", detail="routing request")
# Build expert list for prompt
expert_lines = []
for name in self._available_experts:
desc = self.EXPERT_DESCRIPTIONS.get(name, f"{name} — domain expert")
expert_lines.append(f"- {desc}")
if not expert_lines:
expert_lines.append("- (no experts available — handle everything directly)")
expert_names = " | ".join(self._available_experts) if self._available_experts else "none"
messages = [
{"role": "system", "content": self.SYSTEM.format(
memory_context=memory_context, identity=identity, channel=channel,
experts="\n".join(expert_lines), expert_names=expert_names)},
]
# Summarize recent history (PA sees full context)
recent = history[-16:]
if recent:
lines = []
for msg in recent:
role = msg.get("role", "?")
content = msg.get("content", "")[:200]
lines.append(f" {role}: {content}")
messages.append({"role": "user", "content": "Recent conversation:\n" + "\n".join(lines)})
messages.append({"role": "assistant", "content": "OK, I have the context. I will include relevant details in the job description."})
a = command.analysis
messages.append({"role": "user",
"content": f"Route this message (intent={a.intent}, lang={a.language}, tone={a.tone}):\n{command.source_text}"})
messages = self.trim_context(messages)
await self.hud("context", messages=messages, tokens=self.last_context_tokens,
max_tokens=self.max_context_tokens, fill_pct=self.context_fill_pct)
raw = await llm_call(self.model, messages)
log.info(f"[pa] raw: {raw[:300]}")
routing = self._parse_routing(raw, command)
await self.hud("routed", expert=routing.expert, job=routing.job[:100],
direct=routing.expert == "none")
# Update directive style based on tone
if command.analysis.tone == "frustrated":
self.directive["style"] = "patient and empathetic"
elif command.analysis.tone == "playful":
self.directive["style"] = "light and fun"
else:
self.directive["style"] = "helpful and concise"
return routing
def _parse_routing(self, raw: str, command: Command) -> PARouting:
"""Parse LLM JSON into PARouting with fallback."""
text = raw.strip()
if text.startswith("```"):
text = text.split("\n", 1)[1] if "\n" in text else text[3:]
if text.endswith("```"):
text = text[:-3]
text = text.strip()
try:
data = json.loads(text)
expert = data.get("expert", "none")
# Validate expert is available
if expert != "none" and expert not in self._available_experts:
log.warning(f"[pa] expert '{expert}' not available, falling back to none")
expert = "none"
return PARouting(
expert=expert,
job=data.get("job", ""),
thinking_message=data.get("thinking_message", ""),
response_hint=data.get("response_hint", ""),
language=data.get("language", command.analysis.language),
)
except (json.JSONDecodeError, Exception) as e:
log.error(f"[pa] parse failed: {e}, raw: {text[:200]}")
return PARouting(
expert="none",
response_hint=command.source_text,
language=command.analysis.language,
)