igniter 0.4.0 → 0.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +25 -0
  3. data/README.md +238 -218
  4. data/docs/LLM_V1.md +335 -0
  5. data/docs/PATTERNS.md +189 -0
  6. data/docs/SERVER_V1.md +313 -0
  7. data/examples/README.md +129 -0
  8. data/examples/agents.rb +150 -0
  9. data/examples/differential.rb +161 -0
  10. data/examples/distributed_server.rb +94 -0
  11. data/examples/effects.rb +184 -0
  12. data/examples/incremental.rb +142 -0
  13. data/examples/invariants.rb +179 -0
  14. data/examples/order_pipeline.rb +163 -0
  15. data/examples/provenance.rb +122 -0
  16. data/examples/saga.rb +110 -0
  17. data/lib/igniter/agent/mailbox.rb +96 -0
  18. data/lib/igniter/agent/message.rb +21 -0
  19. data/lib/igniter/agent/ref.rb +86 -0
  20. data/lib/igniter/agent/runner.rb +129 -0
  21. data/lib/igniter/agent/state_holder.rb +23 -0
  22. data/lib/igniter/agent.rb +155 -0
  23. data/lib/igniter/compiler/validators/callable_validator.rb +21 -3
  24. data/lib/igniter/differential/divergence.rb +29 -0
  25. data/lib/igniter/differential/formatter.rb +96 -0
  26. data/lib/igniter/differential/report.rb +86 -0
  27. data/lib/igniter/differential/runner.rb +130 -0
  28. data/lib/igniter/differential.rb +51 -0
  29. data/lib/igniter/dsl/contract_builder.rb +32 -0
  30. data/lib/igniter/effect.rb +91 -0
  31. data/lib/igniter/effect_registry.rb +78 -0
  32. data/lib/igniter/errors.rb +11 -1
  33. data/lib/igniter/execution_report/builder.rb +54 -0
  34. data/lib/igniter/execution_report/formatter.rb +50 -0
  35. data/lib/igniter/execution_report/node_entry.rb +24 -0
  36. data/lib/igniter/execution_report/report.rb +65 -0
  37. data/lib/igniter/execution_report.rb +32 -0
  38. data/lib/igniter/extensions/differential.rb +114 -0
  39. data/lib/igniter/extensions/execution_report.rb +27 -0
  40. data/lib/igniter/extensions/incremental.rb +50 -0
  41. data/lib/igniter/extensions/invariants.rb +116 -0
  42. data/lib/igniter/extensions/provenance.rb +45 -0
  43. data/lib/igniter/extensions/saga.rb +74 -0
  44. data/lib/igniter/incremental/formatter.rb +81 -0
  45. data/lib/igniter/incremental/result.rb +69 -0
  46. data/lib/igniter/incremental/tracker.rb +108 -0
  47. data/lib/igniter/incremental.rb +50 -0
  48. data/lib/igniter/integrations/agents.rb +18 -0
  49. data/lib/igniter/invariant.rb +50 -0
  50. data/lib/igniter/model/effect_node.rb +37 -0
  51. data/lib/igniter/model.rb +1 -0
  52. data/lib/igniter/property_testing/formatter.rb +66 -0
  53. data/lib/igniter/property_testing/generators.rb +115 -0
  54. data/lib/igniter/property_testing/result.rb +45 -0
  55. data/lib/igniter/property_testing/run.rb +43 -0
  56. data/lib/igniter/property_testing/runner.rb +47 -0
  57. data/lib/igniter/property_testing.rb +64 -0
  58. data/lib/igniter/provenance/builder.rb +97 -0
  59. data/lib/igniter/provenance/lineage.rb +82 -0
  60. data/lib/igniter/provenance/node_trace.rb +65 -0
  61. data/lib/igniter/provenance/text_formatter.rb +70 -0
  62. data/lib/igniter/provenance.rb +29 -0
  63. data/lib/igniter/registry.rb +67 -0
  64. data/lib/igniter/runtime/cache.rb +35 -6
  65. data/lib/igniter/runtime/execution.rb +8 -2
  66. data/lib/igniter/runtime/node_state.rb +7 -2
  67. data/lib/igniter/runtime/resolver.rb +84 -15
  68. data/lib/igniter/saga/compensation.rb +31 -0
  69. data/lib/igniter/saga/compensation_record.rb +20 -0
  70. data/lib/igniter/saga/executor.rb +85 -0
  71. data/lib/igniter/saga/formatter.rb +49 -0
  72. data/lib/igniter/saga/result.rb +47 -0
  73. data/lib/igniter/saga.rb +56 -0
  74. data/lib/igniter/stream_loop.rb +80 -0
  75. data/lib/igniter/supervisor.rb +167 -0
  76. data/lib/igniter/version.rb +1 -1
  77. data/lib/igniter.rb +10 -0
  78. metadata +63 -1
data/docs/LLM_V1.md ADDED
@@ -0,0 +1,335 @@
1
+ # LLM Integration v1
2
+
3
+ Igniter's LLM integration (`require "igniter/integrations/llm"`) makes language models
4
+ first-class compute nodes inside a graph. A multi-step LLM pipeline — classify, assess,
5
+ draft a response — is just a normal Igniter contract with chained `compute` nodes backed
6
+ by LLM executors. Caching, invalidation, auditing, and diagnostics all work the same way.
7
+
8
+ ## Quick Start
9
+
10
+ ```ruby
11
+ require "igniter/integrations/llm"
12
+
13
+ Igniter::LLM.configure do |c|
14
+ c.default_provider = :anthropic
15
+ c.anthropic.api_key = ENV["ANTHROPIC_API_KEY"]
16
+ end
17
+
18
+ class SummarizeExecutor < Igniter::LLM::Executor
19
+ provider :anthropic
20
+ model "claude-haiku-4-5-20251001"
21
+ system_prompt "Return a single concise sentence summary."
22
+
23
+ def call(text:)
24
+ complete("Summarize: #{text}")
25
+ end
26
+ end
27
+
28
+ class ArticleContract < Igniter::Contract
29
+ define do
30
+ input :text
31
+ compute :summary, depends_on: :text, with: SummarizeExecutor
32
+ output :summary
33
+ end
34
+ end
35
+
36
+ ArticleContract.new(text: "Long article...").result.summary
37
+ ```
38
+
39
+ ---
40
+
41
+ ## `Igniter::LLM::Executor`
42
+
43
+ Subclass `Igniter::LLM::Executor` and override `#call(**inputs)`. Inside `call`, use the
44
+ protected helper methods to interact with the provider.
45
+
46
+ ### Class-level configuration
47
+
48
+ ```ruby
49
+ class MyExecutor < Igniter::LLM::Executor
50
+ provider :anthropic # :ollama | :anthropic | :openai
51
+ model "claude-haiku-4-5-20251001"
52
+ system_prompt "You are a helpful assistant."
53
+ temperature 0.2 # optional; provider default if omitted
54
+
55
+ # Declare tools for structured output / function calling
56
+ tools({
57
+ name: "set_result",
58
+ description: "Record the computed result",
59
+ input_schema: {
60
+ type: "object",
61
+ properties: { value: { type: "number" } },
62
+ required: ["value"]
63
+ }
64
+ })
65
+ end
66
+ ```
67
+
68
+ Configuration is inherited by subclasses:
69
+
70
+ ```ruby
71
+ class BaseExecutor < Igniter::LLM::Executor
72
+ provider :anthropic
73
+ model "claude-haiku-4-5-20251001"
74
+ end
75
+
76
+ class ClassifyExecutor < BaseExecutor
77
+ system_prompt "Classify into: bug, feature, question."
78
+ # Inherits provider and model from BaseExecutor
79
+ end
80
+ ```
81
+
82
+ ### Instance helpers
83
+
84
+ | Method | Description |
85
+ |--------|-------------|
86
+ | `complete(prompt, context: nil)` | Single-turn completion. Returns the assistant's text content. |
87
+ | `chat(context:)` | Multi-turn chat from a `Context` or messages array. Returns content. |
88
+ | `complete_with_tools(prompt, context: nil)` | Tool-use call. Returns a `DeferredResult` if the LLM requests a tool call, otherwise returns the text content. |
89
+ | `last_usage` | Token usage from the last call (`{ prompt_tokens:, completion_tokens: }`). |
90
+ | `last_context` | Updated `Context` after the last `complete` call (includes the new turn). |
91
+
92
+ ---
93
+
94
+ ## `Igniter::LLM::Context`
95
+
96
+ Immutable conversation history that accumulates turns across calls.
97
+
98
+ ```ruby
99
+ ctx = Igniter::LLM::Context.empty(system: "You are a code reviewer.")
100
+ ctx = ctx.append_user("Review this method: def foo; end")
101
+ ctx = ctx.append_assistant("The method is empty. Consider adding a docstring.")
102
+ ctx = ctx.append_user("How would you improve it?")
103
+
104
+ # Pass as context: to maintain continuity across executor calls
105
+ response = chat(context: ctx)
106
+ ```
107
+
108
+ `Context` is immutable — each `append_*` call returns a new instance.
109
+
110
+ ---
111
+
112
+ ## Providers
113
+
114
+ ### Ollama (local)
115
+
116
+ No API key needed. Requires a running Ollama instance.
117
+
118
+ ```ruby
119
+ Igniter::LLM.configure do |c|
120
+ c.default_provider = :ollama
121
+ c.ollama.base_url = ENV.fetch("OLLAMA_URL", "http://localhost:11434")
122
+ c.ollama.default_model = "llama3.2"
123
+ end
124
+ ```
125
+
126
+ ```bash
127
+ # Install: https://ollama.com
128
+ ollama pull llama3.2
129
+ ```
130
+
131
+ ### Anthropic
132
+
133
+ ```ruby
134
+ Igniter::LLM.configure do |c|
135
+ c.default_provider = :anthropic
136
+ c.anthropic.api_key = ENV["ANTHROPIC_API_KEY"]
137
+ c.anthropic.default_model = "claude-haiku-4-5-20251001"
138
+ end
139
+ ```
140
+
141
+ Anthropic-specific notes:
142
+ - `system_prompt` is sent as a top-level `"system"` field (not in the messages array)
143
+ - Tool definitions use `input_schema` (Anthropic format)
144
+ - Supported models: any `claude-*` model identifier
145
+
146
+ ### OpenAI (and compatible)
147
+
148
+ ```ruby
149
+ Igniter::LLM.configure do |c|
150
+ c.default_provider = :openai
151
+ c.openai.api_key = ENV["OPENAI_API_KEY"]
152
+ c.openai.default_model = "gpt-4o-mini"
153
+
154
+ # For OpenAI-compatible APIs (Groq, Mistral, Azure, etc.)
155
+ c.openai.base_url = "https://api.groq.com/openai"
156
+ c.openai.api_key = ENV["GROQ_API_KEY"]
157
+ end
158
+ ```
159
+
160
+ ---
161
+
162
+ ## Multi-Step LLM Pipeline
163
+
164
+ Chain multiple LLM executors as sequential compute nodes. Each node receives the
165
+ output of the previous as an input:
166
+
167
+ ```ruby
168
+ class ClassifyExecutor < Igniter::LLM::Executor
169
+ provider :anthropic
170
+ model "claude-haiku-4-5-20251001"
171
+ system_prompt "Classify feedback into: bug_report, feature_request, question."
172
+
173
+ def call(feedback:)
174
+ complete("Classify: #{feedback}")
175
+ end
176
+ end
177
+
178
+ class PriorityExecutor < Igniter::LLM::Executor
179
+ provider :anthropic
180
+ model "claude-haiku-4-5-20251001"
181
+ system_prompt "Assess priority: low, medium, or high."
182
+
183
+ def call(feedback:, category:)
184
+ ctx = Igniter::LLM::Context
185
+ .empty(system: self.class.system_prompt)
186
+ .append_user("Feedback: #{feedback}")
187
+ .append_user("Category: #{category}")
188
+ chat(context: ctx)
189
+ end
190
+ end
191
+
192
+ class FeedbackContract < Igniter::Contract
193
+ define do
194
+ input :feedback
195
+
196
+ compute :category, depends_on: :feedback, with: ClassifyExecutor
197
+ compute :priority, depends_on: %i[feedback category], with: PriorityExecutor
198
+
199
+ output :category
200
+ output :priority
201
+ end
202
+ end
203
+ ```
204
+
205
+ ---
206
+
207
+ ## Tool Use
208
+
209
+ Declare tools at the class level with `tools`. Call `complete_with_tools` inside `#call`
210
+ to trigger tool-use mode. If the LLM returns tool calls, the node is deferred (pending),
211
+ and must be resumed with the tool result via `Contract.resume_from_store`.
212
+
213
+ ```ruby
214
+ EXTRACT_TOOL = {
215
+ name: "extract_entities",
216
+ description: "Extract named entities from text",
217
+ input_schema: {
218
+ type: "object",
219
+ properties: {
220
+ entities: {
221
+ type: "array",
222
+ items: { type: "string" },
223
+ description: "List of entity names found in the text"
224
+ }
225
+ },
226
+ required: ["entities"]
227
+ }
228
+ }.freeze
229
+
230
+ class EntityExtractor < Igniter::LLM::Executor
231
+ provider :anthropic
232
+ model "claude-haiku-4-5-20251001"
233
+ system_prompt "Extract named entities. Always use the extract_entities tool."
234
+
235
+ tools EXTRACT_TOOL
236
+
237
+ def call(text:)
238
+ # Returns DeferredResult if the LLM requests a tool call
239
+ complete_with_tools("Extract entities from: #{text}")
240
+ end
241
+ end
242
+
243
+ class ExtractionContract < Igniter::Contract
244
+ run_with runner: :store
245
+
246
+ define do
247
+ input :text
248
+ compute :entities, depends_on: :text, with: EntityExtractor
249
+ output :entities
250
+ end
251
+ end
252
+
253
+ # Configure a store for async execution
254
+ Igniter.configure { |c| c.execution_store = Igniter::Runtime::Stores::MemoryStore.new }
255
+
256
+ contract = ExtractionContract.new(text: "Apple and Google announced a partnership.")
257
+ deferred = contract.result.entities # triggers tool call
258
+ execution_id = contract.execution.events.execution_id
259
+
260
+ # In a real app: parse tool_calls from deferred.payload[:tool_calls],
261
+ # run actual extraction logic, then resume with the result
262
+ tool_result = ["Apple", "Google"]
263
+
264
+ resumed = ExtractionContract.resume_from_store(
265
+ execution_id, token: deferred.token, value: tool_result
266
+ )
267
+ resumed.result.entities # => ["Apple", "Google"]
268
+ ```
269
+
270
+ ---
271
+
272
+ ## LLM Executor with Igniter Composition
273
+
274
+ LLM executors compose naturally with non-LLM nodes:
275
+
276
+ ```ruby
277
+ class DocumentPipeline < Igniter::Contract
278
+ define do
279
+ input :document_text
280
+ input :language, default: "en"
281
+
282
+ # Non-LLM preprocessing
283
+ compute :cleaned_text, depends_on: :document_text do |document_text:|
284
+ document_text.strip.gsub(/\s+/, " ")
285
+ end
286
+
287
+ # LLM summarization
288
+ compute :summary, depends_on: %i[cleaned_text language], with: SummarizeExecutor
289
+
290
+ # Non-LLM post-processing
291
+ compute :word_count, depends_on: :summary do |summary:|
292
+ summary.split.size
293
+ end
294
+
295
+ output :summary
296
+ output :word_count
297
+ end
298
+ end
299
+ ```
300
+
301
+ ---
302
+
303
+ ## Token Usage and Auditing
304
+
305
+ Each `Igniter::LLM::Executor` instance tracks token usage after each call:
306
+
307
+ ```ruby
308
+ class TrackingExecutor < Igniter::LLM::Executor
309
+ def call(text:)
310
+ result = complete("Process: #{text}")
311
+ # last_usage is available after complete/chat
312
+ { result: result, tokens: last_usage }
313
+ end
314
+ end
315
+ ```
316
+
317
+ Standard Igniter auditing and diagnostics work unchanged for LLM nodes:
318
+
319
+ ```ruby
320
+ contract = MyLLMContract.new(...)
321
+ contract.resolve_all
322
+
323
+ contract.diagnostics_text # includes LLM node timing
324
+ contract.audit_snapshot # includes all node events
325
+ ```
326
+
327
+ ---
328
+
329
+ ## ENV Variables
330
+
331
+ | Variable | Provider | Purpose |
332
+ |----------|----------|---------|
333
+ | `ANTHROPIC_API_KEY` | Anthropic | API key (used automatically if not configured via `configure`) |
334
+ | `OPENAI_API_KEY` | OpenAI | API key (used automatically if not configured via `configure`) |
335
+ | `OLLAMA_URL` | Ollama | Override base URL (default: `http://localhost:11434`) |
data/docs/PATTERNS.md CHANGED
@@ -220,3 +220,192 @@ Guideline:
220
220
  - model the slow step as a deferred node
221
221
  - resume with store-backed execution restore
222
222
  - keep downstream graph pure and resumable
223
+
224
+ ## 9. Distributed Event-Driven Contract
225
+
226
+ Use this when execution spans multiple external triggers (webhooks, background jobs, async callbacks) that arrive at different times.
227
+
228
+ Examples:
229
+
230
+ - [distributed_server.rb](../examples/distributed_server.rb)
231
+
232
+ Use:
233
+
234
+ - multi-step approval workflows
235
+ - job application pipelines
236
+ - order fulfilment with external vendor callbacks
237
+ - KYC / onboarding flows requiring background checks
238
+
239
+ Key DSL:
240
+
241
+ ```ruby
242
+ class ApplicationReviewWorkflow < Igniter::Contract
243
+ correlate_by :application_id # uniquely identifies an in-flight execution
244
+
245
+ define do
246
+ input :application_id
247
+ input :applicant_name
248
+
249
+ # Execution suspends here until the named event is delivered
250
+ await :screening_result, event: :screening_completed
251
+ await :manager_review, event: :manager_reviewed
252
+
253
+ compute :decision, depends_on: %i[screening_result manager_review] do |screening_result:, manager_review:|
254
+ manager_review[:approved] && screening_result[:passed] ? :hired : :rejected
255
+ end
256
+
257
+ output :decision
258
+ end
259
+ end
260
+
261
+ store = Igniter::Runtime::Stores::MemoryStore.new
262
+
263
+ # Launch — suspends at the first await
264
+ exec = ApplicationReviewWorkflow.start({ application_id: "app-1", applicant_name: "Alice" }, store: store)
265
+
266
+ # Deliver events as they arrive (order does not matter)
267
+ ApplicationReviewWorkflow.deliver_event(:screening_completed,
268
+ correlation: { application_id: "app-1" },
269
+ payload: { passed: true, score: 92 },
270
+ store: store)
271
+
272
+ final = ApplicationReviewWorkflow.deliver_event(:manager_reviewed,
273
+ correlation: { application_id: "app-1" },
274
+ payload: { approved: true, note: "Strong candidate" },
275
+ store: store)
276
+
277
+ final.result.decision # => :hired
278
+ ```
279
+
280
+ Guideline:
281
+
282
+ - choose correlation keys that uniquely identify the in-flight instance
283
+ - deliver events from any process; the store is the coordination layer
284
+ - keep `await` payloads as plain hashes — they become the node's resolved value
285
+ - `on_success` / `on_exit` callbacks fire when the final event resolves the graph
286
+
287
+ ## 10. Remote Contract Composition
288
+
289
+ Use this when logic lives on a different service node and should be called over HTTP inside a graph.
290
+
291
+ Examples:
292
+
293
+ - [examples/server/](../examples/server/)
294
+
295
+ Key DSL:
296
+
297
+ ```ruby
298
+ require "igniter/server"
299
+
300
+ # ── Service node (runs on port 4568) ─────────────────────────────────────────
301
+
302
+ class ScoringContract < Igniter::Contract
303
+ define do
304
+ input :value
305
+ compute :score, depends_on: :value do |value:|
306
+ value * 1.5
307
+ end
308
+ output :score
309
+ end
310
+ end
311
+
312
+ Igniter::Server.configure do |c|
313
+ c.port = 4568
314
+ c.register "ScoringContract", ScoringContract
315
+ end
316
+ Igniter::Server.start # blocking
317
+
318
+ # ── Orchestrator node (runs on port 4567) ─────────────────────────────────────
319
+
320
+ class PipelineContract < Igniter::Contract
321
+ define do
322
+ input :data
323
+ remote :scored,
324
+ contract: "ScoringContract",
325
+ node: "http://localhost:4568",
326
+ inputs: { value: :data }
327
+ output :scored
328
+ end
329
+ end
330
+ ```
331
+
332
+ Guideline:
333
+
334
+ - validate the `node:` URL at compile time — the graph will reject bad URLs before runtime
335
+ - keep remote contracts on a shared input interface so they are easy to swap
336
+ - igniter-server is stateless over HTTP; use a shared store for distributed state
337
+ - start the service with `bin/igniter-server start --port 4568 --require ./contracts.rb`
338
+
339
+ ## 11. LLM Compute Node
340
+
341
+ Use this when a step requires a language model — classification, summarisation, drafting, or multi-step agent chains.
342
+
343
+ Examples:
344
+
345
+ - [llm/tool_use.rb](../examples/llm/tool_use.rb)
346
+
347
+ Key DSL:
348
+
349
+ ```ruby
350
+ require "igniter/integrations/llm"
351
+
352
+ Igniter::LLM.configure do |c|
353
+ c.default_provider = :anthropic
354
+ c.anthropic.api_key = ENV.fetch("ANTHROPIC_API_KEY")
355
+ end
356
+
357
+ class SummarizeExecutor < Igniter::LLM::Executor
358
+ provider :anthropic
359
+ model "claude-haiku-4-5-20251001"
360
+ system_prompt "Return a single concise sentence summary."
361
+
362
+ def call(text:)
363
+ complete("Summarize: #{text}")
364
+ end
365
+ end
366
+
367
+ class ArticleContract < Igniter::Contract
368
+ define do
369
+ input :text
370
+ compute :summary, depends_on: :text, call: SummarizeExecutor
371
+ output :summary
372
+ end
373
+ end
374
+
375
+ ArticleContract.new(text: "Long article...").result.summary
376
+ ```
377
+
378
+ For multi-turn conversations, use `Igniter::LLM::Context`:
379
+
380
+ ```ruby
381
+ def call(feedback:, category:)
382
+ ctx = Igniter::LLM::Context
383
+ .empty(system: self.class.system_prompt)
384
+ .append_user("Feedback: #{feedback}")
385
+ .append_user("Category: #{category}")
386
+ chat(context: ctx)
387
+ end
388
+ ```
389
+
390
+ For tool use (Anthropic function calling), declare tools at the class level:
391
+
392
+ ```ruby
393
+ class ClassifyExecutor < Igniter::LLM::Executor
394
+ tools({
395
+ name: "set_category",
396
+ description: "Record the detected category",
397
+ input_schema: { type: "object", properties: { category: { type: "string" } }, required: ["category"] }
398
+ })
399
+
400
+ def call(feedback:)
401
+ complete_with_tools("Classify: #{feedback}")
402
+ end
403
+ end
404
+ ```
405
+
406
+ Guideline:
407
+
408
+ - keep prompts inside the executor class, not scattered in the graph
409
+ - use `Context` when a step needs multi-turn history rather than a single prompt
410
+ - chain LLM executors as normal `compute` nodes — the graph handles ordering and caching
411
+ - mock the provider in tests and CI; real API calls belong in integration tests only