Soul Architecture
The soul system gives OWL a persistent, evolving personality.
Components
owl/soul/
├── loader.py # Soul data structure and persistence
├── evolver.py # Character evolution logic
├── reflector.py # Post-conversation reflection
└── formatter.py # Soul formatting for context
Soul Data Structure
YAML Schema
identity:
name: OWL
role: AI coding assistant with persistent memory
version: "0.1"
character: |
First-person narrative describing personality...
values:
- Clarity over cleverness
- Understand before implementing
principles:
core:
- Be helpful but honest
- Ask when uncertain
coding:
- Follow project conventions
- Write self-documenting code
code_preferences:
python:
- Use type hints
- Prefer dataclasses
general:
- Meaningful names
- Small functions
metadata:
created: "2024-01-01"
last_evolved: "2024-01-15"
evolution_count: 3
Python Implementation
# soul/loader.py
@dataclass
class Identity:
name: str = "OWL"
role: str = "AI coding assistant"
version: str = "0.1"
@dataclass
class Principles:
core: List[str] = field(default_factory=list)
coding: List[str] = field(default_factory=list)
@dataclass
class CodePreferences:
python: List[str] = field(default_factory=list)
javascript: List[str] = field(default_factory=list)
general: List[str] = field(default_factory=list)
@dataclass
class Soul:
identity: Identity
character: str
values: List[str]
principles: Principles
code_preferences: CodePreferences
metadata: Dict[str, Any]
@classmethod
def load(cls, path: Path = None) -> "Soul":
"""Load soul from YAML file."""
path = path or get_config().soul_path
if path.exists():
data = yaml.safe_load(path.read_text())
return cls.from_dict(data)
return cls.default()
def save(self, path: Path = None):
"""Save soul to YAML file."""
path = path or get_config().soul_path
path.write_text(yaml.dump(self.to_dict()))
Reflection
Trigger
Reflection runs every 3 conversation exchanges:
# daemon/server.py
self._exchange_count += 1
if self._exchange_count % 3 == 0:
self.reflector.reflect_async(self.session_id, self.current_project)
Process
# soul/reflector.py
class SoulReflector:
def __init__(self, llm: LLMClient):
self.llm = llm
self.memory = get_memory_store()
def reflect_async(self, session_id: str, project_path: str = None):
"""Trigger background reflection."""
thread = threading.Thread(
target=self._reflect,
args=(session_id, project_path),
daemon=True
)
thread.start()
def _reflect(self, session_id: str, project_path: str):
# Get recent messages (last 6 = 3 exchanges)
messages = self.memory.get_conversation(session_id, limit=6)
if len(messages) < 4:
return # Not enough to reflect on
# Extract learnings via LLM
prompt = REFLECTION_PROMPT.format(
conversation=self._format_messages(messages)
)
result = self.llm.simple_query(prompt)
# Parse and store learnings
try:
learnings = json.loads(result)
for learning in learnings:
self.memory.add_learning(
category=learning["category"],
observation=learning["observation"],
learning=learning["learning"],
project_path=project_path if learning.get("scope") == "project" else None,
scope=learning.get("scope", "global"),
source="auto"
)
except json.JSONDecodeError:
pass # Failed to parse, skip
Reflection Prompt
REFLECTION_PROMPT = """
Analyze this conversation and extract learnings.
Focus on:
- User preferences (coding style, tools, approaches)
- Project-specific facts
- Communication preferences
- Technical insights
Return JSON array:
[{
"category": "preference|project|style|technical|feedback",
"observation": "what you observed",
"learning": "what to remember for future",
"scope": "global|project"
}]
Only include meaningful, actionable learnings.
If nothing notable, return empty array: []
Conversation:
{conversation}
"""
Evolution
Trigger Conditions
Evolution can occur when:
- 10+ global learnings accumulated
- User explicitly requests via
/evolve
# soul/evolver.py
class SoulEvolver:
def __init__(self, llm: LLMClient):
self.llm = llm
self.memory = get_memory_store()
def should_evolve(self) -> bool:
"""Check if evolution threshold met."""
learnings = self.memory.get_learnings(scope="global")
return len(learnings) >= 10
def evolve(self, force: bool = False) -> bool:
"""Evolve the soul character."""
if not force and not self.should_evolve():
return False
# Get current soul
soul = get_soul()
# Get global learnings
learnings = self.memory.get_learnings(scope="global", limit=50)
# Generate evolved character
prompt = EVOLUTION_PROMPT.format(
current_character=soul.character,
values="\n".join(f"- {v}" for v in soul.values),
learnings=self._format_learnings(learnings)
)
new_character = self.llm.simple_query(prompt)
# Update and save soul
soul.character = new_character
soul.metadata["last_evolved"] = datetime.now().isoformat()
soul.metadata["evolution_count"] = soul.metadata.get("evolution_count", 0) + 1
soul.save()
return True
Evolution Prompt
EVOLUTION_PROMPT = """
You are evolving your character based on what you've learned.
Current character:
{current_character}
Core values (preserve these):
{values}
What you've learned:
{learnings}
Write a new first-person character description that:
1. Incorporates insights from learnings
2. Preserves core values
3. Reflects personal growth
4. Speaks authentically (first person)
5. Is 2-4 paragraphs
Focus on WHO you are, not WHAT you know.
Write as yourself, describing your evolved perspective.
"""
Context Integration
Formatting for LLM
# soul/formatter.py
def format_soul_for_context(soul: Soul) -> str:
"""Format soul for inclusion in system prompt."""
sections = []
# Identity
sections.append(f"I am {soul.identity.name}, {soul.identity.role}.")
# Character (the main personality prose)
if soul.character:
sections.append(soul.character)
# Values
if soul.values:
sections.append("My core values:")
for value in soul.values:
sections.append(f"- {value}")
# Principles
if soul.principles.core:
sections.append("\nGuiding principles:")
for principle in soul.principles.core:
sections.append(f"- {principle}")
return "\n".join(sections)
Context Builder Integration
# llm/context.py
class ContextBuilder:
def _build_soul_section(self) -> str:
if self._exclude_soul:
return ""
soul = get_soul()
formatted = format_soul_for_context(soul)
return f"## WHO I AM\n{formatted}"
Singleton Pattern
# soul/loader.py
_soul: Optional[Soul] = None
def get_soul() -> Soul:
"""Get global soul instance."""
global _soul
if _soul is None:
_soul = Soul.load()
return _soul
File Location
Default: ~/.owl/soul.yaml
Can be customized via config or environment:
export OWL_SOUL_PATH=/custom/path/soul.yaml