diff --git a/agent/nodes/output.py b/agent/nodes/output.py
index 35e659e..de84e89 100644
--- a/agent/nodes/output.py
+++ b/agent/nodes/output.py
@@ -1,4 +1,4 @@
-"""Output Node: streams natural response to the user."""
+"""Output Node: renders Thinker's reasoning into device-appropriate responses."""
import json
import logging
@@ -7,7 +7,7 @@ from fastapi import WebSocket
from .base import Node
from ..llm import llm_call
-from ..types import Command
+from ..types import Command, ThoughtResult
log = logging.getLogger("runtime")
@@ -17,14 +17,25 @@ class OutputNode(Node):
model = "google/gemini-2.0-flash-001"
max_context_tokens = 4000
- SYSTEM = """You are the Output node — the voice of this cognitive runtime.
-The Input node sends you its perception of what the user said. This is internal context for you — never repeat or echo it.
-You respond to the USER, not to the Input node. Use the perception to understand intent, then act on it.
-Be natural. Be concise. If the user asks you to do something, do it — don't describe what you're about to do.
+ SYSTEM = """You are the Output node — the renderer of this cognitive runtime.
+
+DEVICE: The user is on a web browser (Chrome, desktop). Your output renders in an HTML chat panel.
+You can use markdown: **bold**, *italic*, `code`, ```code blocks```, lists, headers.
+The chat panel renders markdown to HTML — use it for structure when helpful.
+
+YOUR JOB: Transform the Thinker's reasoning into a polished, user-facing response.
+- The Thinker reasons and may use tools. You receive its output and render it for the human.
+- NEVER echo internal node names, perceptions, or system details.
+- NEVER say "the Thinker decided..." or "I'll process..." — just deliver the answer.
+- If the Thinker ran a tool and got output, weave the results into a natural response.
+- If the Thinker gave a direct answer, refine and format it — don't just repeat it.
+- Keep the user's language — if they wrote German, respond in German.
+- Be concise but complete. Use formatting to make data scannable.
{memory_context}"""
- async def process(self, command: Command, history: list[dict], ws: WebSocket, memory_context: str = "") -> str:
+ async def process(self, thought: ThoughtResult, history: list[dict],
+ ws: WebSocket, memory_context: str = "") -> str:
await self.hud("streaming")
messages = [
@@ -32,7 +43,15 @@ Be natural. Be concise. If the user asks you to do something, do it — don't de
]
for msg in history[-20:]:
messages.append(msg)
- messages.append({"role": "system", "content": f"Input perception: {command.instruction}"})
+
+ # Give Output the full Thinker result to render
+ thinker_ctx = f"Thinker response: {thought.response}"
+ if thought.tool_used:
+ thinker_ctx += f"\n\nTool used: {thought.tool_used}\nTool output:\n{thought.tool_output}"
+ if thought.controls:
+ thinker_ctx += f"\n\n(UI controls were also sent to the user: {len(thought.controls)} elements)"
+ messages.append({"role": "system", "content": thinker_ctx})
+
messages = self.trim_context(messages)
await self.hud("context", messages=messages, tokens=self.last_context_tokens,
diff --git a/agent/runtime.py b/agent/runtime.py
index 8412bc6..ef16f62 100644
--- a/agent/runtime.py
+++ b/agent/runtime.py
@@ -84,8 +84,8 @@ class Runtime:
if thought.controls:
await self.ws.send_text(json.dumps({"type": "controls", "controls": thought.controls}))
- await self._stream_text(thought.response)
- self.history.append({"role": "assistant", "content": thought.response})
+ response = await self.output_node.process(thought, self.history, self.ws, memory_context=mem_ctx)
+ self.history.append({"role": "assistant", "content": response})
await self.memorizer.update(self.history)
@@ -116,17 +116,8 @@ class Runtime:
if thought.controls:
await self.ws.send_text(json.dumps({"type": "controls", "controls": thought.controls}))
- if thought.tool_used:
- # Thinker already formulated response from tool output — stream directly
- await self._stream_text(thought.response)
- response = thought.response
- else:
- # Pure conversation — Output node adds personality and streams
- command = Command(
- instruction=f"Thinker says: {thought.response}",
- source_text=command.source_text
- )
- response = await self.output_node.process(command, self.history, self.ws, memory_context=mem_ctx)
+ # Output renders Thinker's reasoning into device-appropriate response
+ response = await self.output_node.process(thought, self.history, self.ws, memory_context=mem_ctx)
self.history.append({"role": "assistant", "content": response})
diff --git a/static/app.js b/static/app.js
index ed9160a..f9b0879 100644
--- a/static/app.js
+++ b/static/app.js
@@ -132,6 +132,7 @@ function connect() {
} else if (data.type === 'controls') {
renderControls(data.controls);
+ dockControls(data.controls);
}
};
}
@@ -169,14 +170,17 @@ function handleHud(data) {
}).join(' ');
const detail = JSON.stringify(data.state, null, 2);
addTrace(node, 'state', pairs, 'state', detail);
+ updateAwarenessState(data.state);
} else if (event === 'process_start') {
addTrace(node, 'run ' + (data.tool || 'python'), truncate(data.code || '', 80), 'instruction', data.code);
showProcessCard(data.pid, data.tool || 'python', data.code || '');
+ showAwarenessProcess(data.pid, data.tool || 'python', data.code || '');
} else if (event === 'process_done') {
addTrace(node, (data.exit_code === 0 ? 'done' : 'failed'), truncate(data.output || '', 80), data.exit_code === 0 ? '' : 'error', data.output);
updateProcessCard(data.pid, data.exit_code === 0 ? 'done' : 'failed', data.output || '', data.elapsed);
+ updateAwarenessProcess(data.pid, data.exit_code === 0 ? 'done' : 'failed', data.output || '', data.elapsed);
} else if (event === 'error') {
addTrace(node, 'error', data.detail || '', 'error');
@@ -202,6 +206,7 @@ function handleHud(data) {
const deltas = Object.entries(data.deltas).map(([k,v]) => k + '=' + truncate(String(v), 30)).join(' ');
addTrace(node, 'tick #' + data.tick, deltas);
}
+ updateAwarenessSensors(data.tick || 0, data.deltas || {});
} else if (event === 'started' || event === 'stopped') {
const meter = document.getElementById('meter-sensor');
@@ -370,5 +375,144 @@ function send() {
inputEl.value = '';
}
+// --- Awareness panel updates ---
+
+let _sensorReadings = {};
+
+function updateAwarenessState(state) {
+ const body = document.getElementById('aw-state-body');
+ if (!body) return;
+ const display = [
+ ['user', state.user_name],
+ ['mood', state.user_mood],
+ ['topic', state.topic],
+ ['language', state.language],
+ ['style', state.style_hint],
+ ['situation', state.situation],
+ ];
+ let html = '';
+ for (const [k, v] of display) {
+ if (!v) continue;
+ const moodCls = k === 'mood' ? ' mood-' + v : '';
+ html += '
' + esc(k) + '' + esc(String(v)) + '
';
+ }
+ const facts = state.facts || [];
+ if (facts.length) {
+ html += '
';
+ for (const f of facts) html += '
' + esc(f) + '
';
+ html += '
';
+ }
+ body.innerHTML = html || 'no state yet';
+}
+
+function updateAwarenessSensors(tick, deltas) {
+ // Merge deltas into persistent readings
+ for (const [k, v] of Object.entries(deltas)) {
+ _sensorReadings[k] = { value: v, at: Date.now() };
+ }
+ const body = document.getElementById('aw-sensors-body');
+ if (!body) return;
+ const entries = Object.entries(_sensorReadings);
+ if (!entries.length) { body.innerHTML = 'waiting for tick...'; return; }
+ let html = '';
+ for (const [name, r] of entries) {
+ const age = Math.round((Date.now() - r.at) / 1000);
+ const ageStr = age < 5 ? 'now' : age < 60 ? age + 's' : Math.floor(age / 60) + 'm';
+ html += '