soe-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. soe/builtin_tools/__init__.py +39 -0
  2. soe/builtin_tools/soe_add_signal.py +82 -0
  3. soe/builtin_tools/soe_call_tool.py +111 -0
  4. soe/builtin_tools/soe_copy_context.py +80 -0
  5. soe/builtin_tools/soe_explore_docs.py +290 -0
  6. soe/builtin_tools/soe_get_available_tools.py +42 -0
  7. soe/builtin_tools/soe_get_context.py +50 -0
  8. soe/builtin_tools/soe_get_workflows.py +63 -0
  9. soe/builtin_tools/soe_inject_node.py +86 -0
  10. soe/builtin_tools/soe_inject_workflow.py +105 -0
  11. soe/builtin_tools/soe_list_contexts.py +73 -0
  12. soe/builtin_tools/soe_remove_node.py +72 -0
  13. soe/builtin_tools/soe_remove_workflow.py +62 -0
  14. soe/builtin_tools/soe_update_context.py +54 -0
  15. soe/docs/_config.yml +10 -0
  16. soe/docs/advanced_patterns/guide_fanout_and_aggregations.md +318 -0
  17. soe/docs/advanced_patterns/guide_inheritance.md +435 -0
  18. soe/docs/advanced_patterns/hybrid_intelligence.md +237 -0
  19. soe/docs/advanced_patterns/index.md +49 -0
  20. soe/docs/advanced_patterns/operational.md +781 -0
  21. soe/docs/advanced_patterns/self_evolving_workflows.md +385 -0
  22. soe/docs/advanced_patterns/swarm_intelligence.md +211 -0
  23. soe/docs/builtins/context.md +164 -0
  24. soe/docs/builtins/explore_docs.md +135 -0
  25. soe/docs/builtins/tools.md +164 -0
  26. soe/docs/builtins/workflows.md +199 -0
  27. soe/docs/guide_00_getting_started.md +341 -0
  28. soe/docs/guide_01_tool.md +206 -0
  29. soe/docs/guide_02_llm.md +143 -0
  30. soe/docs/guide_03_router.md +146 -0
  31. soe/docs/guide_04_patterns.md +475 -0
  32. soe/docs/guide_05_agent.md +159 -0
  33. soe/docs/guide_06_schema.md +397 -0
  34. soe/docs/guide_07_identity.md +540 -0
  35. soe/docs/guide_08_child.md +612 -0
  36. soe/docs/guide_09_ecosystem.md +690 -0
  37. soe/docs/guide_10_infrastructure.md +427 -0
  38. soe/docs/guide_11_builtins.md +118 -0
  39. soe/docs/index.md +104 -0
  40. soe/docs/primitives/backends.md +281 -0
  41. soe/docs/primitives/context.md +256 -0
  42. soe/docs/primitives/node_reference.md +259 -0
  43. soe/docs/primitives/primitives.md +331 -0
  44. soe/docs/primitives/signals.md +865 -0
  45. soe/docs_index.py +1 -1
  46. soe/lib/__init__.py +0 -0
  47. soe/lib/child_context.py +46 -0
  48. soe/lib/context_fields.py +51 -0
  49. soe/lib/inheritance.py +172 -0
  50. soe/lib/jinja_render.py +113 -0
  51. soe/lib/operational.py +51 -0
  52. soe/lib/parent_sync.py +71 -0
  53. soe/lib/register_event.py +75 -0
  54. soe/lib/schema_validation.py +134 -0
  55. soe/lib/yaml_parser.py +14 -0
  56. soe/local_backends/__init__.py +18 -0
  57. soe/local_backends/factory.py +124 -0
  58. soe/local_backends/in_memory/context.py +38 -0
  59. soe/local_backends/in_memory/conversation_history.py +60 -0
  60. soe/local_backends/in_memory/identity.py +52 -0
  61. soe/local_backends/in_memory/schema.py +40 -0
  62. soe/local_backends/in_memory/telemetry.py +38 -0
  63. soe/local_backends/in_memory/workflow.py +33 -0
  64. soe/local_backends/storage/context.py +57 -0
  65. soe/local_backends/storage/conversation_history.py +82 -0
  66. soe/local_backends/storage/identity.py +118 -0
  67. soe/local_backends/storage/schema.py +96 -0
  68. soe/local_backends/storage/telemetry.py +72 -0
  69. soe/local_backends/storage/workflow.py +56 -0
  70. soe/nodes/__init__.py +13 -0
  71. soe/nodes/agent/__init__.py +10 -0
  72. soe/nodes/agent/factory.py +134 -0
  73. soe/nodes/agent/lib/loop_handlers.py +150 -0
  74. soe/nodes/agent/lib/loop_state.py +157 -0
  75. soe/nodes/agent/lib/prompts.py +65 -0
  76. soe/nodes/agent/lib/tools.py +35 -0
  77. soe/nodes/agent/stages/__init__.py +12 -0
  78. soe/nodes/agent/stages/parameter.py +37 -0
  79. soe/nodes/agent/stages/response.py +54 -0
  80. soe/nodes/agent/stages/router.py +37 -0
  81. soe/nodes/agent/state.py +111 -0
  82. soe/nodes/agent/types.py +66 -0
  83. soe/nodes/agent/validation/__init__.py +11 -0
  84. soe/nodes/agent/validation/config.py +95 -0
  85. soe/nodes/agent/validation/operational.py +24 -0
  86. soe/nodes/child/__init__.py +3 -0
  87. soe/nodes/child/factory.py +61 -0
  88. soe/nodes/child/state.py +59 -0
  89. soe/nodes/child/validation/__init__.py +11 -0
  90. soe/nodes/child/validation/config.py +126 -0
  91. soe/nodes/child/validation/operational.py +28 -0
  92. soe/nodes/lib/conditions.py +71 -0
  93. soe/nodes/lib/context.py +24 -0
  94. soe/nodes/lib/conversation_history.py +77 -0
  95. soe/nodes/lib/identity.py +64 -0
  96. soe/nodes/lib/llm_resolver.py +142 -0
  97. soe/nodes/lib/output.py +68 -0
  98. soe/nodes/lib/response_builder.py +91 -0
  99. soe/nodes/lib/signal_emission.py +79 -0
  100. soe/nodes/lib/signals.py +54 -0
  101. soe/nodes/lib/tools.py +100 -0
  102. soe/nodes/llm/__init__.py +7 -0
  103. soe/nodes/llm/factory.py +103 -0
  104. soe/nodes/llm/state.py +76 -0
  105. soe/nodes/llm/types.py +12 -0
  106. soe/nodes/llm/validation/__init__.py +11 -0
  107. soe/nodes/llm/validation/config.py +89 -0
  108. soe/nodes/llm/validation/operational.py +23 -0
  109. soe/nodes/router/__init__.py +3 -0
  110. soe/nodes/router/factory.py +37 -0
  111. soe/nodes/router/state.py +32 -0
  112. soe/nodes/router/validation/__init__.py +11 -0
  113. soe/nodes/router/validation/config.py +58 -0
  114. soe/nodes/router/validation/operational.py +16 -0
  115. soe/nodes/tool/factory.py +66 -0
  116. soe/nodes/tool/lib/__init__.py +11 -0
  117. soe/nodes/tool/lib/conditions.py +35 -0
  118. soe/nodes/tool/lib/failure.py +28 -0
  119. soe/nodes/tool/lib/parameters.py +67 -0
  120. soe/nodes/tool/state.py +66 -0
  121. soe/nodes/tool/types.py +27 -0
  122. soe/nodes/tool/validation/__init__.py +15 -0
  123. soe/nodes/tool/validation/config.py +132 -0
  124. soe/nodes/tool/validation/operational.py +16 -0
  125. soe/validation/__init__.py +18 -0
  126. soe/validation/config.py +195 -0
  127. soe/validation/jinja.py +54 -0
  128. soe/validation/operational.py +110 -0
  129. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/METADATA +5 -5
  130. soe_ai-0.1.3.dist-info/RECORD +137 -0
  131. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/WHEEL +1 -1
  132. soe_ai-0.1.1.dist-info/RECORD +0 -10
  133. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/licenses/LICENSE +0 -0
  134. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,540 @@
1
+
2
+ # SOE Guide: Chapter 7 - Identity
3
+
4
+ ## Introduction to Identity
5
+
6
+ **Identity** enables two powerful features:
7
+
8
+ 1. **Stateful LLM interactions** — Persisting conversation history across calls
9
+ 2. **System prompts** — Defining roles for LLM/Agent nodes (via Identity Schema)
10
+
11
+ Without identity, each LLM call is independent. With identity, the LLM "remembers" previous exchanges—similar to Claude's Projects or custom instructions, but dynamically per-execution.
12
+
13
+ ### Why Identity Matters
14
+
15
+ Traditional orchestration treats each LLM call as stateless:
16
+ - No memory of previous responses
17
+ - No context accumulation
18
+ - Each call starts fresh
19
+ - You must specify role/instructions in every prompt
20
+
21
+ Identity transforms this into **dynamic prompting**:
22
+ - Conversation history builds over time
23
+ - LLM can reference its own previous responses
24
+ - Enables multi-turn dialogues within workflows
25
+ - **Identity Schema removes the need to specify role in every prompt**
26
+
27
+ ### Identity Schema in Config
28
+
29
+ Like context schema, identities are defined in your config YAML:
30
+
31
+ ```yaml
32
+ workflows:
33
+ example_workflow:
34
+ Analyzer:
35
+ node_type: llm
36
+ event_triggers: [START]
37
+ prompt: "Analyze: {{ context.input }}"
38
+ identity: analyst # References identity defined below
39
+ output_field: analysis
40
+ event_emissions:
41
+ - signal_name: DONE
42
+
43
+ identities:
44
+ analyst: |
45
+ You are a senior data analyst. Be thorough and precise.
46
+ Always cite sources when making claims.
47
+ reviewer: |
48
+ You are a code reviewer. Focus on correctness and maintainability.
49
+ ```
50
+
51
+ When `identities` is included in config:
52
+ 1. It's automatically saved to the `IdentityBackend`
53
+ 2. LLM/Agent nodes with matching `identity` field receive the system prompt
54
+ 3. Child workflows can access parent's identities through `main_execution_id`
55
+
56
+ **This removes the need to repeat role instructions in every prompt** — define once in identities, use everywhere.
57
+
58
+ ### Key Insight
59
+
60
+ **Identity only matters when you have MULTIPLE LLM calls.**
61
+
62
+ A single LLM node with identity doesn't demonstrate anything—the power comes when a second node with the *same* identity sees the first node's conversation history.
63
+
64
+ ### The Claude Skills Parallel
65
+
66
+ Think of identity like **Claude Skills** or **Custom Instructions**:
67
+
68
+ | Claude Skills | SOE Identity |
69
+ |---------------|--------------|
70
+ | Persistent per-project | Persistent per-identity |
71
+ | User configures manually | Workflow configures dynamically |
72
+ | One instruction set | Multiple identities per workflow |
73
+ | Static prompts | Dynamic context + history |
74
+
75
+ ## Multi-Turn Conversations (Same Identity)
76
+
77
+ Same identity across nodes enables multi-turn dialogues:
78
+
79
+ ### The Workflow
80
+
81
+ ```yaml
82
+ example_workflow:
83
+ FirstTurn:
84
+ node_type: llm
85
+ event_triggers: [START]
86
+ identity: conversation_abc
87
+ prompt: "Start a conversation about {{ context.topic }}"
88
+ output_field: firstResponse
89
+ event_emissions:
90
+ - signal_name: FIRST_COMPLETE
91
+
92
+ SecondTurn:
93
+ node_type: llm
94
+ event_triggers: [FIRST_COMPLETE]
95
+ identity: conversation_abc
96
+ prompt: "Continue the conversation. User asks: {{ context.follow_up }}"
97
+ output_field: secondResponse
98
+ event_emissions:
99
+ - signal_name: CONVERSATION_COMPLETE
100
+ ```
101
+
102
+ ### How It Works
103
+
104
+ 1. `FirstTurn` executes with `identity: conversation_abc`.
105
+ 2. Prompt and response are saved to conversation history.
106
+ 3. `SecondTurn` triggers with the **same identity**.
107
+ 4. LLM receives `conversation_history` containing the first exchange.
108
+ 5. The second LLM can reference: "As I mentioned about technology..."
109
+
110
+ ### What You'll See in the Prompt
111
+
112
+ ```json
113
+ {
114
+ "prompt": "Continue the conversation. User asks: Tell me more",
115
+ "context": "...",
116
+ "conversation_history": "[user]: Start a conversation about technology\n[assistant]: Technology is fascinating! ..."
117
+ }
118
+ ```
119
+
120
+ The `conversation_history` field is automatically populated because both nodes share `identity: conversation_abc`.
121
+
122
+ ## All Identities Share History (Within Execution)
123
+
124
+ Nodes with **different** identity values still share history within the same orchestration:
125
+
126
+ ### The Workflow
127
+
128
+ ```yaml
129
+ example_workflow:
130
+ FirstTurn:
131
+ node_type: llm
132
+ event_triggers: [START]
133
+ identity: session_A
134
+ prompt: "Start a conversation about {{ context.topic }}"
135
+ output_field: firstResponse
136
+ event_emissions:
137
+ - signal_name: FIRST_COMPLETE
138
+
139
+ SecondTurn:
140
+ node_type: llm
141
+ event_triggers: [FIRST_COMPLETE]
142
+ identity: session_B
143
+ prompt: "Continue the conversation. User asks: {{ context.follow_up }}"
144
+ output_field: secondResponse
145
+ event_emissions:
146
+ - signal_name: CONVERSATION_COMPLETE
147
+ ```
148
+
149
+ ### What Actually Happens
150
+
151
+ 1. `FirstTurn` has identity `session_A` → history enabled, keyed by `main_execution_id`.
152
+ 2. `SecondTurn` has identity `session_B` → history enabled, same `main_execution_id`.
153
+ 3. **Both share the same conversation history** because they use the same execution.
154
+ 4. The identity VALUE (`session_A` vs `session_B`) doesn't matter—only its presence.
155
+
156
+ **Key insight**: Identity isolation happens at the **orchestration boundary**, not within a workflow.
157
+
158
+ ## No Identity = No History
159
+
160
+ Without identity, each call is completely stateless:
161
+
162
+ ### The Workflow
163
+
164
+ ```yaml
165
+ example_workflow:
166
+ FirstTurn:
167
+ node_type: llm
168
+ event_triggers: [START]
169
+ prompt: "Start a conversation about {{ context.topic }}"
170
+ output_field: firstResponse
171
+ event_emissions:
172
+ - signal_name: FIRST_COMPLETE
173
+
174
+ SecondTurn:
175
+ node_type: llm
176
+ event_triggers: [FIRST_COMPLETE]
177
+ prompt: "Continue the conversation. User asks: {{ context.follow_up }}"
178
+ output_field: secondResponse
179
+ event_emissions:
180
+ - signal_name: CONVERSATION_COMPLETE
181
+ ```
182
+
183
+ ### What Happens
184
+
185
+ 1. `FirstTurn` executes, but nothing is saved (no identity).
186
+ 2. `SecondTurn` executes with **empty history**.
187
+ 3. Neither node knows about the other.
188
+ 4. Pure function-like, stateless behavior.
189
+
190
+ Use this for independent LLM calls that don't need context.
191
+
192
+ ## How Identity Actually Works
193
+
194
+ **Important**: Identity is a **boolean flag**, not a key.
195
+
196
+ - **Identity present (any truthy value)**: Conversation history is enabled
197
+ - **Identity absent or empty**: No conversation history
198
+
199
+ ### The Reality
200
+
201
+ Conversation history is keyed by `main_execution_id`, NOT the identity value:
202
+
203
+ ```python
204
+ # All these share the SAME history within one orchestration:
205
+ identity: conversation_abc # History keyed by main_execution_id
206
+ identity: session_A # Same history - same main_execution_id
207
+ identity: user_123 # Same history - same main_execution_id
208
+
209
+ # Only this is different:
210
+ # (no identity) # No history at all
211
+ ```
212
+
213
+ This means **all LLM/Agent nodes with any identity share conversation history within an execution tree**. The identity value itself doesn't isolate conversations—it just enables the feature.
214
+
215
+ ### When You Need Isolation
216
+
217
+ If you need truly isolated conversation histories, you must use **separate orchestration calls**:
218
+
219
+ ```python
220
+ # Execution 1 - has its own main_execution_id
221
+ execution_1 = orchestrate(
222
+ initial_context={"user_id": "alice"},
223
+ ...
224
+ )
225
+
226
+ # Execution 2 - different main_execution_id, different history
227
+ execution_2 = orchestrate(
228
+ initial_context={"user_id": "bob"},
229
+ ...
230
+ )
231
+ ```
232
+
233
+ Each `orchestrate()` call creates a new `main_execution_id`, giving isolated histories.
234
+
235
+ ## History Accumulates Over Turns
236
+
237
+ With three or more nodes using the same identity, history grows:
238
+
239
+ ### The Workflow
240
+
241
+ ```yaml
242
+ example_workflow:
243
+ Turn1:
244
+ node_type: llm
245
+ event_triggers: [START]
246
+ identity: long_conversation
247
+ prompt: "User says: {{ context.msg1 }}"
248
+ output_field: response1
249
+ event_emissions:
250
+ - signal_name: TURN1_DONE
251
+
252
+ Turn2:
253
+ node_type: llm
254
+ event_triggers: [TURN1_DONE]
255
+ identity: long_conversation
256
+ prompt: "User says: {{ context.msg2 }}"
257
+ output_field: response2
258
+ event_emissions:
259
+ - signal_name: TURN2_DONE
260
+
261
+ Turn3:
262
+ node_type: llm
263
+ event_triggers: [TURN2_DONE]
264
+ identity: long_conversation
265
+ prompt: "User says: {{ context.msg3 }}"
266
+ output_field: response3
267
+ event_emissions:
268
+ - signal_name: TURN3_DONE
269
+ ```
270
+
271
+ ### Accumulation Pattern
272
+
273
+ | Turn | Sees History From |
274
+ |------|------------------|
275
+ | Turn1 | (empty) |
276
+ | Turn2 | Turn1 |
277
+ | Turn3 | Turn1 + Turn2 |
278
+
279
+ Each subsequent call sees all previous exchanges, enabling long-form conversations.
280
+
281
+ ## The Skill Pattern
282
+
283
+ Combine routing with specialized identities for Claude-like skills:
284
+
285
+ ### The Workflow
286
+
287
+ ```yaml
288
+ example_workflow:
289
+ SkillRouter:
290
+ node_type: router
291
+ event_triggers: [START]
292
+ event_emissions:
293
+ - signal_name: CODING_SKILL
294
+ condition: "{{ 'code' in context.request|lower }}"
295
+ - signal_name: WRITING_SKILL
296
+ condition: "{{ 'write' in context.request|lower }}"
297
+ - signal_name: GENERAL_SKILL
298
+ condition: "{{ 'code' not in context.request|lower and 'write' not in context.request|lower }}"
299
+
300
+ CodingAssistant:
301
+ node_type: llm
302
+ event_triggers: [CODING_SKILL]
303
+ identity: "{{ context.user_id }}_coding"
304
+ prompt: "You are a coding assistant. Help with: {{ context.request }}"
305
+ output_field: response
306
+ event_emissions:
307
+ - signal_name: SKILL_COMPLETE
308
+
309
+ WritingAssistant:
310
+ node_type: llm
311
+ event_triggers: [WRITING_SKILL]
312
+ identity: "{{ context.user_id }}_writing"
313
+ prompt: "You are a writing assistant. Help with: {{ context.request }}"
314
+ output_field: response
315
+ event_emissions:
316
+ - signal_name: SKILL_COMPLETE
317
+
318
+ GeneralAssistant:
319
+ node_type: llm
320
+ event_triggers: [GENERAL_SKILL]
321
+ identity: "{{ context.user_id }}_general"
322
+ prompt: "Help with: {{ context.request }}"
323
+ output_field: response
324
+ event_emissions:
325
+ - signal_name: SKILL_COMPLETE
326
+ ```
327
+
328
+ ### How Skills Work
329
+
330
+ 1. `SkillRouter` routes based on request content.
331
+ 2. Each skill assistant has its own identity: `{{ context.user_id }}_coding`.
332
+ 3. Coding history stays with coding assistant.
333
+ 4. Writing history stays with writing assistant.
334
+ 5. User builds separate expertise relationships per skill.
335
+
336
+ ### Benefits
337
+
338
+ - **Specialized memory**: Each skill remembers its domain conversations.
339
+ - **No cross-contamination**: Coding advice doesn't leak into writing context.
340
+ - **Dynamic prompting**: Same user, different "personalities" per skill.
341
+
342
+ ## Identity and Strong Models
343
+
344
+ Identity enables **dynamic prompting** patterns that leverage strong models' capabilities:
345
+
346
+ ### Why Strong Models Benefit
347
+
348
+ Strong models like Claude, GPT-4, and Gemini Pro excel at:
349
+ - Maintaining context across long conversations
350
+ - Referencing previous exchanges accurately
351
+ - Building on established patterns
352
+
353
+ Identity unlocks this within stateless orchestration:
354
+
355
+ ```python
356
+ # First call builds context
357
+ LLM: "You want to build a REST API. Let's start with the data model..."
358
+
359
+ # Second call (same identity) references first
360
+ LLM: "Based on the User model we designed, here's the authentication..."
361
+
362
+ # Third call builds further
363
+ LLM: "Now that auth is set up, let's add the protected endpoints..."
364
+ ```
365
+
366
+ ### The Dynamic Prompting Pattern
367
+
368
+ 1. **Initial context**: First call establishes baseline.
369
+ 2. **Accumulated history**: Each call adds to shared context.
370
+ 3. **Progressive refinement**: Later calls can reference and build.
371
+ 4. **No explicit state management**: History is automatic.
372
+
373
+ ## Defining Identities in Config (Recommended)
374
+
375
+ The simplest approach is defining `identities` alongside your workflows. Each identity maps to a system prompt:
376
+
377
+ ```yaml
378
+ workflows:
379
+ example_workflow:
380
+ FirstTurn:
381
+ node_type: llm
382
+ event_triggers: [START]
383
+ identity: helpful_assistant
384
+ prompt: "Start a conversation about {{ context.topic }}"
385
+ output_field: firstResponse
386
+ event_emissions:
387
+ - signal_name: FIRST_COMPLETE
388
+
389
+ SecondTurn:
390
+ node_type: llm
391
+ event_triggers: [FIRST_COMPLETE]
392
+ identity: helpful_assistant
393
+ prompt: "Continue the conversation. User asks: {{ context.follow_up }}"
394
+ output_field: secondResponse
395
+ event_emissions:
396
+ - signal_name: CONVERSATION_COMPLETE
397
+
398
+ identities:
399
+ helpful_assistant: "You are a friendly and knowledgeable assistant who explains topics clearly."
400
+ ```
401
+
402
+ When `identities` is included in config:
403
+ 1. Identity definitions are automatically saved to the identity backend
404
+ 2. They're keyed by `execution_id` (specifically `main_execution_id`)
405
+ 3. Child workflows can access parent's identity definitions
406
+ 4. The identity value in nodes (e.g., `identity: helpful_assistant`) is looked up from the definitions
407
+
408
+ ### Multiple Identities
409
+
410
+ Define multiple specialized identities for different roles:
411
+
412
+ ```yaml
413
+ workflows:
414
+ example_workflow:
415
+ SkillRouter:
416
+ node_type: router
417
+ event_triggers: [START]
418
+ event_emissions:
419
+ - signal_name: CODING_SKILL
420
+ condition: "{{ 'code' in context.request|lower }}"
421
+ - signal_name: WRITING_SKILL
422
+ condition: "{{ 'write' in context.request|lower }}"
423
+
424
+ CodingAssistant:
425
+ node_type: llm
426
+ event_triggers: [CODING_SKILL]
427
+ identity: coding_expert
428
+ prompt: "Help with: {{ context.request }}"
429
+ output_field: response
430
+ event_emissions:
431
+ - signal_name: SKILL_COMPLETE
432
+
433
+ WritingAssistant:
434
+ node_type: llm
435
+ event_triggers: [WRITING_SKILL]
436
+ identity: writing_expert
437
+ prompt: "Help with: {{ context.request }}"
438
+ output_field: response
439
+ event_emissions:
440
+ - signal_name: SKILL_COMPLETE
441
+
442
+ identities:
443
+ coding_expert: "You are an expert programmer. Provide clear, well-documented code examples."
444
+ writing_expert: "You are a skilled writer. Focus on clarity, grammar, and style."
445
+ ```
446
+
447
+ ### Full Config (Workflows + Schema + Identities)
448
+
449
+ Combine all sections for complete configuration:
450
+
451
+ ```yaml
452
+ workflows:
453
+ example_workflow:
454
+ ExtractData:
455
+ node_type: llm
456
+ event_triggers: [START]
457
+ identity: data_analyst
458
+ prompt: "Extract key information from: {{ context.input }}"
459
+ output_field: extracted_data
460
+ event_emissions:
461
+ - signal_name: DATA_EXTRACTED
462
+
463
+ SummarizeData:
464
+ node_type: llm
465
+ event_triggers: [DATA_EXTRACTED]
466
+ identity: data_analyst
467
+ prompt: "Summarize the extracted data: {{ context.extracted_data }}"
468
+ output_field: summary
469
+ event_emissions:
470
+ - signal_name: DONE
471
+
472
+ context_schema:
473
+ extracted_data:
474
+ type: object
475
+ description: Structured data extracted from input
476
+ summary:
477
+ type: string
478
+ description: A concise summary of the extracted data
479
+
480
+ identities:
481
+ data_analyst: "You are a data analyst. Be precise, structured, and thorough in your analysis."
482
+ ```
483
+
484
+ ## Conversation History API
485
+
486
+ Access history programmatically:
487
+
488
+ ```python
489
+ from soe.local_backends import create_local_backends
490
+
491
+ backends = create_local_backends(...)
492
+
493
+ # Get conversation history (keyed by execution_id)
494
+ history = backends.conversation_history.get_conversation_history(execution_id)
495
+
496
+ # Append to history
497
+ backends.conversation_history.append_to_conversation_history(
498
+ execution_id,
499
+ {"role": "user", "content": "Hello"}
500
+ )
501
+
502
+ # Replace entire history
503
+ backends.conversation_history.save_conversation_history(
504
+ execution_id,
505
+ [
506
+ {"role": "user", "content": "Hi"},
507
+ {"role": "assistant", "content": "Hello!"}
508
+ ]
509
+ )
510
+
511
+ # Clear history
512
+ backends.conversation_history.delete_conversation_history(execution_id)
513
+ ```
514
+
515
+ ## Identity Best Practices
516
+
517
+ ### Do
518
+
519
+ - **Define in config**: Use `identities` section for clear, centralized definitions.
520
+ - **Use meaningful names**: `coding_expert` over `abc123`.
521
+ - **Scope appropriately**: One identity per logical conversation.
522
+ - **Clean up old histories**: Delete stale conversation data.
523
+
524
+ ### Don't
525
+
526
+ - **Share identities across unrelated tasks**: Causes context confusion.
527
+ - **Use identity for short, independent tasks**: Adds unnecessary overhead.
528
+ - **Forget about history growth**: Long histories consume tokens.
529
+
530
+ ## Key Points
531
+
532
+ - **Define in config**: Use `identities` section in your config for automatic setup.
533
+ - **Simple format**: `identity_name: "system prompt"` - just a string.
534
+ - **Keyed by execution_id**: Identities are stored by `main_execution_id`, enabling child workflow access.
535
+ - **History keyed by execution**: All nodes share history via `main_execution_id`.
536
+ - **Isolation at orchestration boundary**: Different `orchestrate()` calls have different histories.
537
+
538
+ ## Next Steps
539
+
540
+ Now that you understand stateful interactions, let's explore [Child Workflows](guide_08_child.md) for sub-orchestration and modular composition →