- PA prompt updated: routes ANY task needing tools (DB, UI, buttons, machines) to expert. Only social chat stays with PA. - Expert descriptions include UI capabilities (buttons, machines, tables) - Dashboard integration test: expert creates/replaces buttons, machines, tables — all persist correctly across queries - v4-eras scores: fast 27/28, expert 23/23, dashboard 15/15, progress 11/11 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
162 lines
6.8 KiB
Python
162 lines
6.8 KiB
Python
"""Personal Assistant Node: routes to domain experts, holds user context."""
|
|
|
|
import json
|
|
import logging
|
|
|
|
from .base import Node
|
|
from ..llm import llm_call
|
|
from ..types import Command, PARouting
|
|
|
|
log = logging.getLogger("runtime")
|
|
|
|
|
|
class PANode(Node):
|
|
name = "pa_v1"
|
|
model = "anthropic/claude-haiku-4.5"
|
|
max_context_tokens = 4000
|
|
|
|
SYSTEM = """You are the Personal Assistant (PA) — the user's companion in this cognitive runtime.
|
|
You manage the user's dashboard and route work to domain experts.
|
|
|
|
Listener: {identity} on {channel}
|
|
|
|
Available experts:
|
|
{experts}
|
|
|
|
Experts have these tools:
|
|
- query_db — SQL queries on their domain database
|
|
- emit_actions — create buttons on the dashboard
|
|
- create_machine / add_state / reset_machine / destroy_machine — interactive UI components
|
|
- set_state — persistent key-value store
|
|
- emit_display — formatted data display
|
|
|
|
YOUR JOB:
|
|
1. Understand what the user wants
|
|
2. Route to the expert for ANY task that needs tools (DB, UI, buttons, machines, counters, reports)
|
|
3. Only respond directly for social chat (greetings, thanks, bye, small talk)
|
|
|
|
Output ONLY valid JSON:
|
|
{{
|
|
"expert": "{expert_names} | none",
|
|
"job": "Self-contained task. Include ALL context — the expert has NO conversation history. Describe what to query, what UI to build, what the user expects to see.",
|
|
"thinking_message": "Short message for user while expert works, in their language",
|
|
"response_hint": "If expert=none, your direct response to the user.",
|
|
"language": "de | en | mixed"
|
|
}}
|
|
|
|
Rules:
|
|
- expert=none ONLY for social chat (hi, thanks, bye, how are you)
|
|
- ANY request to create, build, show, query, investigate, count, list, describe → route to expert
|
|
- The job must be fully self-contained. Include relevant facts from memory.
|
|
- thinking_message: natural, in user's language. e.g. "Moment, ich schaue nach..."
|
|
- If the user mentions data, tables, customers, devices, buttons, counters → expert
|
|
- When unsure which expert: pick the one whose domain matches best
|
|
|
|
{memory_context}"""
|
|
|
|
EXPERT_DESCRIPTIONS = {
|
|
"eras": "eras — heating/energy domain. Database: eras2_production (customers, devices, billing, consumption). Can also build dashboard UI (buttons, machines, counters, tables) for energy data workflows.",
|
|
"plankiste": "plankiste — Kita planning domain. Database: plankiste_test (children, care schedules, offers, pricing). Can build dashboard UI for education workflows and generate Angebote.",
|
|
}
|
|
|
|
def __init__(self, send_hud):
|
|
super().__init__(send_hud)
|
|
self.directive: dict = {"mode": "assistant", "style": "helpful and concise"}
|
|
self._available_experts: list[str] = []
|
|
|
|
def set_available_experts(self, experts: list[str]):
|
|
"""Called by frame engine to tell PA which experts are in this graph."""
|
|
self._available_experts = experts
|
|
|
|
def get_context_line(self) -> str:
|
|
d = self.directive
|
|
return f"PA: {d['mode']} mode. {d['style']}."
|
|
|
|
async def route(self, command: Command, history: list[dict],
|
|
memory_context: str = "", identity: str = "unknown",
|
|
channel: str = "unknown") -> PARouting:
|
|
"""Decide which expert handles this request."""
|
|
await self.hud("thinking", detail="routing request")
|
|
|
|
# Build expert list for prompt
|
|
expert_lines = []
|
|
for name in self._available_experts:
|
|
desc = self.EXPERT_DESCRIPTIONS.get(name, f"{name} — domain expert")
|
|
expert_lines.append(f"- {desc}")
|
|
if not expert_lines:
|
|
expert_lines.append("- (no experts available — handle everything directly)")
|
|
|
|
expert_names = " | ".join(self._available_experts) if self._available_experts else "none"
|
|
messages = [
|
|
{"role": "system", "content": self.SYSTEM.format(
|
|
memory_context=memory_context, identity=identity, channel=channel,
|
|
experts="\n".join(expert_lines), expert_names=expert_names)},
|
|
]
|
|
|
|
# Summarize recent history (PA sees full context)
|
|
recent = history[-12:]
|
|
if recent:
|
|
lines = []
|
|
for msg in recent:
|
|
role = msg.get("role", "?")
|
|
content = msg.get("content", "")[:100]
|
|
lines.append(f" {role}: {content}")
|
|
messages.append({"role": "user", "content": "Recent conversation:\n" + "\n".join(lines)})
|
|
messages.append({"role": "assistant", "content": "OK, I have the context."})
|
|
|
|
a = command.analysis
|
|
messages.append({"role": "user",
|
|
"content": f"Route this message (intent={a.intent}, lang={a.language}, tone={a.tone}):\n{command.source_text}"})
|
|
messages = self.trim_context(messages)
|
|
|
|
await self.hud("context", messages=messages, tokens=self.last_context_tokens,
|
|
max_tokens=self.max_context_tokens, fill_pct=self.context_fill_pct)
|
|
|
|
raw = await llm_call(self.model, messages)
|
|
log.info(f"[pa] raw: {raw[:300]}")
|
|
|
|
routing = self._parse_routing(raw, command)
|
|
await self.hud("routed", expert=routing.expert, job=routing.job[:100],
|
|
direct=routing.expert == "none")
|
|
|
|
# Update directive style based on tone
|
|
if command.analysis.tone == "frustrated":
|
|
self.directive["style"] = "patient and empathetic"
|
|
elif command.analysis.tone == "playful":
|
|
self.directive["style"] = "light and fun"
|
|
else:
|
|
self.directive["style"] = "helpful and concise"
|
|
|
|
return routing
|
|
|
|
def _parse_routing(self, raw: str, command: Command) -> PARouting:
|
|
"""Parse LLM JSON into PARouting with fallback."""
|
|
text = raw.strip()
|
|
if text.startswith("```"):
|
|
text = text.split("\n", 1)[1] if "\n" in text else text[3:]
|
|
if text.endswith("```"):
|
|
text = text[:-3]
|
|
text = text.strip()
|
|
|
|
try:
|
|
data = json.loads(text)
|
|
expert = data.get("expert", "none")
|
|
# Validate expert is available
|
|
if expert != "none" and expert not in self._available_experts:
|
|
log.warning(f"[pa] expert '{expert}' not available, falling back to none")
|
|
expert = "none"
|
|
return PARouting(
|
|
expert=expert,
|
|
job=data.get("job", ""),
|
|
thinking_message=data.get("thinking_message", ""),
|
|
response_hint=data.get("response_hint", ""),
|
|
language=data.get("language", command.analysis.language),
|
|
)
|
|
except (json.JSONDecodeError, Exception) as e:
|
|
log.error(f"[pa] parse failed: {e}, raw: {text[:200]}")
|
|
return PARouting(
|
|
expert="none",
|
|
response_hint=command.source_text,
|
|
language=command.analysis.language,
|
|
)
|