- Memorizer tracks user_expectation (conversational/delegated/waiting_input/observing) - Output node adjusts phrasing per expectation - PA retry loop: reformulates job on expert failure (all retries exhausted or tool skip) - Machine state in PA context: get_machine_summary includes current state, buttons, stored data - Expert writes to machine state via update_machine + transition_machine - Expanded baked schema coverage - Awareness panel shows color-coded expectation state - Dashboard and workspace component updates Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
113 lines
4.1 KiB
Python
113 lines
4.1 KiB
Python
"""Thinker Node v2: pure executor — runs tools as directed by Director."""
|
|
|
|
import asyncio
|
|
import json
|
|
import logging
|
|
|
|
from .base import Node
|
|
from ..llm import llm_call
|
|
from ..db import run_db_query
|
|
from ..process import ProcessManager
|
|
from ..types import Command, DirectorPlan, ThoughtResult
|
|
|
|
log = logging.getLogger("runtime")
|
|
|
|
|
|
class ThinkerV2Node(Node):
|
|
name = "thinker_v2"
|
|
model = "google/gemini-2.0-flash-001" # Fast model — just executes
|
|
max_context_tokens = 4000
|
|
|
|
RESPONSE_SYSTEM = """You are the Thinker — a fast executor in a cognitive runtime.
|
|
The Director (a smart model) already decided what to do. You just executed the tools.
|
|
Now write a natural response to the user based on the results.
|
|
|
|
{hint}
|
|
|
|
Rules:
|
|
- Be concise and natural.
|
|
- If tool results contain data, summarize it clearly.
|
|
- NEVER apologize. NEVER say "I" — you are part of a team.
|
|
- Keep it short: 1-3 sentences for simple responses.
|
|
- For data: reference the numbers, don't repeat raw output."""
|
|
|
|
def __init__(self, send_hud, process_manager: ProcessManager = None):
|
|
super().__init__(send_hud)
|
|
self.pm = process_manager
|
|
|
|
async def process(self, command: Command, plan: DirectorPlan,
|
|
history: list[dict], memory_context: str = "") -> ThoughtResult:
|
|
"""Execute Director's plan and produce ThoughtResult."""
|
|
await self.hud("thinking", detail=f"executing plan: {plan.goal}")
|
|
|
|
actions = []
|
|
state_updates = {}
|
|
display_items = []
|
|
machine_ops = []
|
|
tool_used = ""
|
|
tool_output = ""
|
|
|
|
# Execute tool_sequence in order
|
|
for step in plan.tool_sequence:
|
|
tool = step.get("tool", "")
|
|
args = step.get("args", {})
|
|
await self.hud("tool_call", tool=tool, args=args)
|
|
|
|
if tool == "emit_actions":
|
|
actions.extend(args.get("actions", []))
|
|
elif tool == "set_state":
|
|
key = args.get("key", "")
|
|
if key:
|
|
state_updates[key] = args.get("value")
|
|
elif tool == "emit_display":
|
|
display_items.extend(args.get("items", []))
|
|
elif tool == "create_machine":
|
|
machine_ops.append({"op": "create", **args})
|
|
elif tool == "add_state":
|
|
machine_ops.append({"op": "add_state", **args})
|
|
elif tool == "reset_machine":
|
|
machine_ops.append({"op": "reset", **args})
|
|
elif tool == "destroy_machine":
|
|
machine_ops.append({"op": "destroy", **args})
|
|
elif tool == "query_db":
|
|
query = args.get("query", "")
|
|
database = args.get("database", "eras2_production")
|
|
try:
|
|
result = await asyncio.to_thread(run_db_query, query, database)
|
|
tool_used = "query_db"
|
|
tool_output = result
|
|
await self.hud("tool_result", tool="query_db", output=result[:200])
|
|
except Exception as e:
|
|
tool_used = "query_db"
|
|
tool_output = f"Error: {e}"
|
|
await self.hud("tool_result", tool="query_db", output=str(e)[:200])
|
|
|
|
# Generate text response
|
|
hint = plan.response_hint or f"Goal: {plan.goal}"
|
|
if tool_output:
|
|
hint += f"\nTool result:\n{tool_output[:500]}"
|
|
|
|
messages = [
|
|
{"role": "system", "content": self.RESPONSE_SYSTEM.replace("{hint}", hint)},
|
|
]
|
|
for msg in history[-8:]:
|
|
messages.append(msg)
|
|
messages.append({"role": "user", "content": command.source_text})
|
|
messages = self.trim_context(messages)
|
|
|
|
response = await llm_call(self.model, messages)
|
|
if not response:
|
|
response = "[no response]"
|
|
|
|
await self.hud("decided", instruction=response[:200])
|
|
|
|
return ThoughtResult(
|
|
response=response,
|
|
tool_used=tool_used,
|
|
tool_output=tool_output,
|
|
actions=actions,
|
|
state_updates=state_updates,
|
|
display_items=display_items,
|
|
machine_ops=machine_ops,
|
|
)
|