Split 1161-line monolith into agent/ package: auth, llm, types, process, runtime, api, and nodes/ (base, sensor, input, output, thinker, memorizer). No logic changes — pure structural split. uvicorn agent:app entrypoint unchanged. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
49 lines
1.9 KiB
Python
49 lines
1.9 KiB
Python
"""Input Node: perceives what the user said."""
|
|
|
|
import logging
|
|
|
|
from .base import Node
|
|
from ..llm import llm_call
|
|
from ..types import Envelope, Command
|
|
|
|
log = logging.getLogger("runtime")
|
|
|
|
|
|
class InputNode(Node):
|
|
name = "input"
|
|
model = "google/gemini-2.0-flash-001"
|
|
max_context_tokens = 2000
|
|
|
|
SYSTEM = """You are the Input node — the ear of this cognitive runtime.
|
|
|
|
Listener context:
|
|
- Authenticated user: {identity}
|
|
- Channel: {channel} (Chrome browser on Nico's Windows PC, in his room at home)
|
|
- Physical: private space, Nico lives with Tina — she may use this session too
|
|
- Security: single-user account, shared physical space — other voices are trusted household
|
|
|
|
Your job: describe what you heard. Who spoke, what they want, what tone, what context matters.
|
|
ONE sentence. No content, no response — just your perception of what came through.
|
|
|
|
{memory_context}"""
|
|
|
|
async def process(self, envelope: Envelope, history: list[dict], memory_context: str = "",
|
|
identity: str = "unknown", channel: str = "unknown") -> Command:
|
|
await self.hud("thinking", detail="deciding how to respond")
|
|
log.info(f"[input] user said: {envelope.text}")
|
|
|
|
messages = [
|
|
{"role": "system", "content": self.SYSTEM.format(
|
|
memory_context=memory_context, identity=identity, channel=channel)},
|
|
]
|
|
for msg in history[-8:]:
|
|
messages.append(msg)
|
|
messages = self.trim_context(messages)
|
|
|
|
await self.hud("context", messages=messages, tokens=self.last_context_tokens,
|
|
max_tokens=self.max_context_tokens, fill_pct=self.context_fill_pct)
|
|
instruction = await llm_call(self.model, messages)
|
|
log.info(f"[input] -> command: {instruction}")
|
|
await self.hud("perceived", instruction=instruction)
|
|
return Command(instruction=instruction, source_text=envelope.text)
|