ruby_llm-contract 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. checksums.yaml +7 -0
  2. data/.rspec +3 -0
  3. data/.rubocop.yml +55 -0
  4. data/CHANGELOG.md +76 -0
  5. data/Gemfile +11 -0
  6. data/Gemfile.lock +176 -0
  7. data/LICENSE +21 -0
  8. data/README.md +154 -0
  9. data/Rakefile +8 -0
  10. data/examples/00_basics.rb +500 -0
  11. data/examples/01_classify_threads.rb +220 -0
  12. data/examples/02_generate_comment.rb +203 -0
  13. data/examples/03_target_audience.rb +201 -0
  14. data/examples/04_real_llm.rb +410 -0
  15. data/examples/05_output_schema.rb +258 -0
  16. data/examples/07_keyword_extraction.rb +239 -0
  17. data/examples/08_translation.rb +353 -0
  18. data/examples/09_eval_dataset.rb +287 -0
  19. data/examples/10_reddit_full_showcase.rb +363 -0
  20. data/examples/README.md +140 -0
  21. data/lib/ruby_llm/contract/adapters/base.rb +13 -0
  22. data/lib/ruby_llm/contract/adapters/response.rb +17 -0
  23. data/lib/ruby_llm/contract/adapters/ruby_llm.rb +94 -0
  24. data/lib/ruby_llm/contract/adapters/test.rb +44 -0
  25. data/lib/ruby_llm/contract/adapters.rb +6 -0
  26. data/lib/ruby_llm/contract/concerns/deep_symbolize.rb +17 -0
  27. data/lib/ruby_llm/contract/concerns/eval_host.rb +109 -0
  28. data/lib/ruby_llm/contract/concerns/trace_equality.rb +15 -0
  29. data/lib/ruby_llm/contract/concerns/usage_aggregator.rb +43 -0
  30. data/lib/ruby_llm/contract/configuration.rb +21 -0
  31. data/lib/ruby_llm/contract/contract/definition.rb +39 -0
  32. data/lib/ruby_llm/contract/contract/invariant.rb +23 -0
  33. data/lib/ruby_llm/contract/contract/parser.rb +143 -0
  34. data/lib/ruby_llm/contract/contract/schema_validator.rb +239 -0
  35. data/lib/ruby_llm/contract/contract/validator.rb +104 -0
  36. data/lib/ruby_llm/contract/contract.rb +7 -0
  37. data/lib/ruby_llm/contract/cost_calculator.rb +38 -0
  38. data/lib/ruby_llm/contract/dsl.rb +13 -0
  39. data/lib/ruby_llm/contract/errors.rb +19 -0
  40. data/lib/ruby_llm/contract/eval/case_result.rb +76 -0
  41. data/lib/ruby_llm/contract/eval/contract_detail_builder.rb +47 -0
  42. data/lib/ruby_llm/contract/eval/dataset.rb +53 -0
  43. data/lib/ruby_llm/contract/eval/eval_definition.rb +112 -0
  44. data/lib/ruby_llm/contract/eval/evaluation_result.rb +27 -0
  45. data/lib/ruby_llm/contract/eval/evaluator/exact.rb +20 -0
  46. data/lib/ruby_llm/contract/eval/evaluator/json_includes.rb +58 -0
  47. data/lib/ruby_llm/contract/eval/evaluator/proc_evaluator.rb +40 -0
  48. data/lib/ruby_llm/contract/eval/evaluator/regex.rb +27 -0
  49. data/lib/ruby_llm/contract/eval/model_comparison.rb +80 -0
  50. data/lib/ruby_llm/contract/eval/pipeline_result_adapter.rb +15 -0
  51. data/lib/ruby_llm/contract/eval/report.rb +115 -0
  52. data/lib/ruby_llm/contract/eval/runner.rb +162 -0
  53. data/lib/ruby_llm/contract/eval/trait_evaluator.rb +75 -0
  54. data/lib/ruby_llm/contract/eval.rb +16 -0
  55. data/lib/ruby_llm/contract/pipeline/base.rb +62 -0
  56. data/lib/ruby_llm/contract/pipeline/result.rb +131 -0
  57. data/lib/ruby_llm/contract/pipeline/runner.rb +139 -0
  58. data/lib/ruby_llm/contract/pipeline/trace.rb +72 -0
  59. data/lib/ruby_llm/contract/pipeline.rb +6 -0
  60. data/lib/ruby_llm/contract/prompt/ast.rb +38 -0
  61. data/lib/ruby_llm/contract/prompt/builder.rb +47 -0
  62. data/lib/ruby_llm/contract/prompt/node.rb +25 -0
  63. data/lib/ruby_llm/contract/prompt/nodes/example_node.rb +27 -0
  64. data/lib/ruby_llm/contract/prompt/nodes/rule_node.rb +15 -0
  65. data/lib/ruby_llm/contract/prompt/nodes/section_node.rb +26 -0
  66. data/lib/ruby_llm/contract/prompt/nodes/system_node.rb +15 -0
  67. data/lib/ruby_llm/contract/prompt/nodes/user_node.rb +15 -0
  68. data/lib/ruby_llm/contract/prompt/nodes.rb +7 -0
  69. data/lib/ruby_llm/contract/prompt/renderer.rb +76 -0
  70. data/lib/ruby_llm/contract/railtie.rb +20 -0
  71. data/lib/ruby_llm/contract/rake_task.rb +78 -0
  72. data/lib/ruby_llm/contract/rspec/pass_eval.rb +96 -0
  73. data/lib/ruby_llm/contract/rspec/satisfy_contract.rb +31 -0
  74. data/lib/ruby_llm/contract/rspec.rb +6 -0
  75. data/lib/ruby_llm/contract/step/base.rb +138 -0
  76. data/lib/ruby_llm/contract/step/dsl.rb +144 -0
  77. data/lib/ruby_llm/contract/step/limit_checker.rb +64 -0
  78. data/lib/ruby_llm/contract/step/result.rb +38 -0
  79. data/lib/ruby_llm/contract/step/retry_executor.rb +90 -0
  80. data/lib/ruby_llm/contract/step/retry_policy.rb +76 -0
  81. data/lib/ruby_llm/contract/step/runner.rb +126 -0
  82. data/lib/ruby_llm/contract/step/trace.rb +70 -0
  83. data/lib/ruby_llm/contract/step.rb +10 -0
  84. data/lib/ruby_llm/contract/token_estimator.rb +19 -0
  85. data/lib/ruby_llm/contract/types.rb +11 -0
  86. data/lib/ruby_llm/contract/version.rb +7 -0
  87. data/lib/ruby_llm/contract.rb +108 -0
  88. data/ruby_llm-contract.gemspec +33 -0
  89. metadata +172 -0
@@ -0,0 +1,500 @@
1
+ # frozen_string_literal: true
2
+
3
+ # =============================================================================
4
+ # EXAMPLE 0: From zero to ruby_llm-contract
5
+ #
6
+ # Starting from the simplest case — a plain string prompt —
7
+ # and adding one layer at a time.
8
+ # =============================================================================
9
+
10
+ require_relative "../lib/ruby_llm/contract"
11
+
12
+ # Setup: test adapter returns canned responses (no real LLM needed)
13
+ RubyLLM::Contract.configure do |config|
14
+ config.default_adapter = RubyLLM::Contract::Adapters::Test.new(
15
+ response: '{"sentiment": "positive"}'
16
+ )
17
+ end
18
+
19
+ # =============================================================================
20
+ # STEP 1: Simplest possible step — plain string prompt
21
+ #
22
+ # BEFORE (typical Rails code):
23
+ #
24
+ # prompt = "Classify the sentiment of this text as positive, negative, or neutral. Return JSON."
25
+ # response = OpenAI::Client.new.chat(messages: [{role: "user", content: prompt + "\n\n" + text}])
26
+ # JSON.parse(response.dig("choices", 0, "message", "content"))
27
+ #
28
+ # Or with ruby_llm (one-liner, but still no validation):
29
+ #
30
+ # RubyLLM.chat.ask("Classify the sentiment: #{text}")
31
+ #
32
+ # PROBLEM: no validation, no types, no trace, no structure
33
+ # =============================================================================
34
+
35
+ # Option A: with output_schema (recommended — simplest)
36
+ class SimpleSentiment < RubyLLM::Contract::Step::Base
37
+ input_type String # plain Ruby class works!
38
+
39
+ output_schema do
40
+ string :sentiment
41
+ end
42
+
43
+ prompt do
44
+ user "Classify the sentiment of this text as positive, negative, or neutral. Return JSON.\n\n{input}"
45
+ end
46
+ end
47
+
48
+ result = SimpleSentiment.run("I love this product!")
49
+ result.status # => :ok
50
+ result.parsed_output # => {sentiment: "positive"}
51
+
52
+ # Option B: with output_type (plain Ruby class — JSON parsing is implicit for Hash)
53
+ class SimpleSentimentDryTypes < RubyLLM::Contract::Step::Base
54
+ input_type String
55
+ output_type Hash
56
+
57
+ prompt do
58
+ user "Classify the sentiment of this text as positive, negative, or neutral. Return JSON.\n\n{input}"
59
+ end
60
+ end
61
+
62
+ result = SimpleSentimentDryTypes.run("I love this product!")
63
+ result.status # => :ok
64
+ result.parsed_output # => {sentiment: "positive"}
65
+
66
+ # =============================================================================
67
+ # STEP 2: Add system message — separate instructions from data
68
+ #
69
+ # BEFORE:
70
+ # Everything in one string — instructions and data mixed together
71
+ #
72
+ # AFTER:
73
+ # system = instructions (constant)
74
+ # user = data (variable)
75
+ # =============================================================================
76
+
77
+ class SentimentWithSystem < RubyLLM::Contract::Step::Base
78
+ input_type String
79
+
80
+ output_schema do
81
+ string :sentiment
82
+ end
83
+
84
+ prompt do
85
+ system "Classify the sentiment of the user's text."
86
+ user "{input}"
87
+ end
88
+ end
89
+
90
+ result = SentimentWithSystem.run("I love this product!")
91
+ result.status # => :ok
92
+ result.parsed_output # => {sentiment: "positive"}
93
+
94
+ # =============================================================================
95
+ # STEP 3: Add rules — clear instructions for the model
96
+ #
97
+ # Rules are individual requirements. One rule per concern.
98
+ # Much clearer than a single wall of text.
99
+ # =============================================================================
100
+
101
+ RubyLLM::Contract.configure do |c|
102
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
103
+ response: '{"sentiment": "positive", "confidence": 0.95}'
104
+ )
105
+ end
106
+
107
+ class SentimentWithRules < RubyLLM::Contract::Step::Base
108
+ input_type String
109
+
110
+ output_schema do
111
+ string :sentiment, enum: %w[positive negative neutral]
112
+ number :confidence, minimum: 0.0, maximum: 1.0
113
+ end
114
+
115
+ prompt do
116
+ system "You are a sentiment classifier."
117
+ rule "Return JSON only."
118
+ rule "Use exactly one of: positive, negative, neutral."
119
+ rule "Include a confidence score from 0.0 to 1.0."
120
+ user "{input}"
121
+ end
122
+ end
123
+
124
+ result = SentimentWithRules.run("I love this product!")
125
+ result.status # => :ok
126
+ result.parsed_output # => {sentiment: "positive", confidence: 0.95}
127
+
128
+ # =============================================================================
129
+ # STEP 4: Add invariants — custom business logic on top of schema
130
+ #
131
+ # Schema handles structure (enums, ranges). Invariants handle logic
132
+ # that schema can't express: conditional rules, cross-field checks, etc.
133
+ # =============================================================================
134
+
135
+ class SentimentValidated < RubyLLM::Contract::Step::Base
136
+ input_type String
137
+
138
+ output_schema do
139
+ string :sentiment, enum: %w[positive negative neutral]
140
+ number :confidence, minimum: 0.0, maximum: 1.0
141
+ end
142
+
143
+ prompt do
144
+ system "You are a sentiment classifier."
145
+ rule "Return JSON only."
146
+ rule "Use exactly one of: positive, negative, neutral."
147
+ rule "Include a confidence score from 0.0 to 1.0."
148
+ user "{input}"
149
+ end
150
+
151
+ # Schema already enforces enum + range. Validate adds custom logic:
152
+ validate("high confidence required for extreme sentiments") do |o|
153
+ next true unless %w[positive negative].include?(o[:sentiment])
154
+
155
+ o[:confidence] >= 0.7
156
+ end
157
+ end
158
+
159
+ # Happy path:
160
+ result = SentimentValidated.run("I love this product!")
161
+ result.status # => :ok
162
+ result.parsed_output # => {sentiment: "positive", confidence: 0.95}
163
+
164
+ # Model returns low confidence for extreme sentiment — invariant catches it:
165
+ RubyLLM::Contract.configure do |c|
166
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
167
+ response: '{"sentiment": "positive", "confidence": 0.3}'
168
+ )
169
+ end
170
+
171
+ result = SentimentValidated.run("I love this product!")
172
+ result.status # => :validation_failed
173
+ result.validation_errors # => ["high confidence required for extreme sentiments"]
174
+
175
+ # Model returns non-JSON:
176
+ RubyLLM::Contract.configure do |c|
177
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
178
+ response: "I think it's positive"
179
+ )
180
+ end
181
+
182
+ result = SentimentValidated.run("I love this product!")
183
+ result.status # => :parse_error
184
+ result.validation_errors # => ["Failed to parse JSON: ..."]
185
+
186
+ # =============================================================================
187
+ # STEP 5: Add examples — show the model what you expect
188
+ #
189
+ # Few-shot: provide example input → output pairs.
190
+ # The model better understands the expected format.
191
+ # =============================================================================
192
+
193
+ RubyLLM::Contract.configure do |c|
194
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
195
+ response: '{"sentiment": "positive", "confidence": 0.92}'
196
+ )
197
+ end
198
+
199
+ class SentimentWithExample < RubyLLM::Contract::Step::Base
200
+ input_type String
201
+
202
+ output_schema do
203
+ string :sentiment, enum: %w[positive negative neutral]
204
+ number :confidence, minimum: 0.0, maximum: 1.0
205
+ end
206
+
207
+ prompt do
208
+ system "You are a sentiment classifier."
209
+ rule "Return JSON only."
210
+ rule "Use exactly one of: positive, negative, neutral."
211
+ rule "Include a confidence score from 0.0 to 1.0."
212
+ example input: "This is terrible", output: '{"sentiment": "negative", "confidence": 0.9}'
213
+ example input: "It works fine I guess", output: '{"sentiment": "neutral", "confidence": 0.6}'
214
+ user "{input}"
215
+ end
216
+ end
217
+
218
+ result = SentimentWithExample.run("I love this product!")
219
+ result.status # => :ok
220
+ result.parsed_output # => {sentiment: "positive", confidence: 0.92}
221
+
222
+ # =============================================================================
223
+ # STEP 6: Sections — replace heredoc string with structured AST
224
+ #
225
+ # BEFORE (typical heredoc prompt — one big string):
226
+ #
227
+ # prompt = <<~PROMPT # AFTER:
228
+ # You are a sentiment classifier for customer support. # system "You are a sentiment classifier for customer support."
229
+ # Return JSON with sentiment, confidence, and reason. # rule "Return JSON with sentiment, confidence, and reason."
230
+ # #
231
+ # [CONTEXT] # section "CONTEXT",
232
+ # We sell software for freelancers. # "We sell software for freelancers."
233
+ # #
234
+ # [SCORING GUIDE] # section "SCORING GUIDE",
235
+ # negative = complaint or frustration # "negative = complaint or frustration\n
236
+ # positive = praise or thanks # positive = praise or thanks\n
237
+ # neutral = question or factual statement # neutral = question or factual statement"
238
+ # #
239
+ # Classify this: #{text} # user "Classify this: {input}"
240
+ # PROMPT #
241
+ #
242
+ # PROBLEM: one big string — can't reorder, diff, or reuse individual sections
243
+ # AFTER: each part is a separate node in the prompt AST
244
+ # =============================================================================
245
+
246
+ RubyLLM::Contract.configure do |c|
247
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
248
+ response: '{"sentiment": "negative", "confidence": 0.85, "reason": "product complaint"}'
249
+ )
250
+ end
251
+
252
+ class SentimentWithSections < RubyLLM::Contract::Step::Base
253
+ input_type String
254
+
255
+ output_schema do
256
+ string :sentiment, enum: %w[positive negative neutral]
257
+ number :confidence, minimum: 0.0, maximum: 1.0
258
+ string :reason
259
+ end
260
+
261
+ prompt do
262
+ system "You are a sentiment classifier for customer support."
263
+ rule "Return JSON with sentiment, confidence, and reason."
264
+
265
+ section "CONTEXT", "We sell software for freelancers."
266
+ section "SCORING GUIDE",
267
+ "negative = complaint or frustration\npositive = praise or thanks\nneutral = question or factual statement"
268
+
269
+ user "Classify this: {input}"
270
+ end
271
+ end
272
+
273
+ result = SentimentWithSections.run("Your billing page is broken again!")
274
+ result.status # => :ok
275
+ result.parsed_output # => {sentiment: "negative", confidence: 0.85, reason: "product complaint"}
276
+
277
+ # =============================================================================
278
+ # STEP 7: Hash input — multiple fields with auto-interpolation
279
+ #
280
+ # When input is a Hash, each key becomes a template variable.
281
+ # {title} resolves to input[:title], {language} to input[:language], etc.
282
+ # No manual string building needed.
283
+ # =============================================================================
284
+
285
+ RubyLLM::Contract.configure do |c|
286
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
287
+ response: '{"category": "billing", "priority": "high"}'
288
+ )
289
+ end
290
+
291
+ class ClassifyTicket < RubyLLM::Contract::Step::Base
292
+ input_type RubyLLM::Contract::Types::Hash.schema(
293
+ title: RubyLLM::Contract::Types::String,
294
+ body: RubyLLM::Contract::Types::String,
295
+ language: RubyLLM::Contract::Types::String
296
+ )
297
+
298
+ output_schema do
299
+ string :category, enum: %w[billing technical feature_request other]
300
+ string :priority, enum: %w[low medium high urgent]
301
+ end
302
+
303
+ prompt do
304
+ system "You classify customer support tickets."
305
+ rule "Return JSON with category and priority."
306
+ rule "Respond in {language}."
307
+ user "Title: {title}\n\nBody: {body}"
308
+ end
309
+ end
310
+
311
+ result = ClassifyTicket.run(
312
+ { title: "Can't update credit card", body: "Payment page gives error 500", language: "en" }
313
+ )
314
+ result.status # => :ok
315
+ result.parsed_output # => {category: "billing", priority: "high"}
316
+
317
+ # =============================================================================
318
+ # STEP 8: 2-arity invariants — validate output against input
319
+ #
320
+ # Sometimes you need to check that the output is consistent with the input.
321
+ # A 2-arity invariant receives both |output, input| so you can cross-validate.
322
+ # =============================================================================
323
+
324
+ RubyLLM::Contract.configure do |c|
325
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
326
+ response: '{"translation": "Bonjour le monde", "source_lang": "en", "target_lang": "fr"}'
327
+ )
328
+ end
329
+
330
+ class Translate < RubyLLM::Contract::Step::Base
331
+ input_type RubyLLM::Contract::Types::Hash.schema(
332
+ text: RubyLLM::Contract::Types::String,
333
+ target_lang: RubyLLM::Contract::Types::String
334
+ )
335
+
336
+ output_schema do
337
+ string :translation, min_length: 1
338
+ string :source_lang
339
+ string :target_lang
340
+ end
341
+
342
+ prompt do
343
+ system "Translate the text to the target language."
344
+ rule "Return JSON with translation, source_lang, and target_lang."
345
+ user "Translate to {target_lang}: {text}"
346
+ end
347
+
348
+ # Schema handles: translation non-empty, all fields present
349
+ # 2-arity validate: cross-validate output against input
350
+ validate("target_lang must match requested language") do |output, input|
351
+ output[:target_lang] == input[:target_lang]
352
+ end
353
+ end
354
+
355
+ result = Translate.run({ text: "Hello world", target_lang: "fr" })
356
+ result.status # => :ok
357
+ result.parsed_output # => {translation: "Bonjour le monde", source_lang: "en", target_lang: "fr"}
358
+
359
+ # What if model returns wrong target language?
360
+ RubyLLM::Contract.configure do |c|
361
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
362
+ response: '{"translation": "Hola mundo", "source_lang": "en", "target_lang": "es"}'
363
+ )
364
+ end
365
+
366
+ result = Translate.run({ text: "Hello world", target_lang: "fr" })
367
+ result.status # => :validation_failed
368
+ result.validation_errors # => ["target_lang must match requested language"]
369
+
370
+ # =============================================================================
371
+ # STEP 9: Context override — per-run adapter and model
372
+ #
373
+ # Global config sets defaults. You can override per call via context.
374
+ # Useful for: testing, switching models, A/B testing prompts.
375
+ # =============================================================================
376
+
377
+ RubyLLM::Contract.configure do |c|
378
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(response: '{"sentiment": "positive"}')
379
+ c.default_model = "gpt-4.1-mini"
380
+ end
381
+
382
+ # Uses global defaults:
383
+ result = SimpleSentiment.run("I love this product!")
384
+ result.status # => :ok
385
+ result.trace[:model] # => "gpt-4.1-mini"
386
+
387
+ # Override adapter and model for this specific call:
388
+ other_adapter = RubyLLM::Contract::Adapters::Test.new(response: '{"sentiment": "neutral"}')
389
+ result = SimpleSentiment.run("I love this product!", context: { adapter: other_adapter, model: "gpt-5" })
390
+ result.status # => :ok
391
+ result.parsed_output # => {sentiment: "neutral"}
392
+ result.trace[:model] # => "gpt-5"
393
+
394
+ # =============================================================================
395
+ # STEP 10: StepResult — everything you get back from a run
396
+ #
397
+ # Every .run() returns a StepResult with status, output, errors, and trace.
398
+ # =============================================================================
399
+
400
+ adapter = RubyLLM::Contract::Adapters::Test.new(response: '{"sentiment": "positive", "confidence": 0.92}')
401
+ result = SentimentValidated.run("I love this product!", context: { adapter: adapter, model: "gpt-4.1-mini" })
402
+
403
+ result.status # => :ok
404
+ result.ok? # => true
405
+ result.failed? # => false
406
+ result.raw_output # => '{"sentiment": "positive", "confidence": 0.92}'
407
+ result.parsed_output # => {sentiment: "positive", confidence: 0.92}
408
+ result.validation_errors # => []
409
+ result.trace[:model] # => "gpt-4.1-mini"
410
+ result.trace[:latency_ms] # => 0 (instant with test adapter)
411
+ result.trace[:messages] # => [{role: :system, content: "..."}, {role: :user, content: "..."}]
412
+
413
+ # On failure, you still get everything for debugging:
414
+ bad_adapter = RubyLLM::Contract::Adapters::Test.new(response: '{"sentiment": "positive", "confidence": 0.1}')
415
+ result = SentimentValidated.run("I love this product!", context: { adapter: bad_adapter })
416
+
417
+ result.status # => :validation_failed
418
+ result.ok? # => false
419
+ result.failed? # => true
420
+ result.raw_output # => '{"sentiment": "positive", "confidence": 0.1}'
421
+ result.parsed_output # => {sentiment: "positive", confidence: 0.1}
422
+ result.validation_errors # => ["high confidence required for extreme sentiments"]
423
+
424
+ # =============================================================================
425
+ # STEP 11: Pipeline — chain multiple steps with fail-fast
426
+ #
427
+ # Pipeline::Base composes steps into a sequence.
428
+ # Output of step N automatically becomes input to step N+1.
429
+ # If any step fails, execution halts immediately.
430
+ # =============================================================================
431
+
432
+ # Step A: classify sentiment
433
+ class PipelineSentiment < RubyLLM::Contract::Step::Base
434
+ input_type String
435
+
436
+ output_schema do
437
+ string :text
438
+ string :sentiment, enum: %w[positive negative neutral]
439
+ end
440
+
441
+ prompt do
442
+ system "Classify sentiment and return the original text."
443
+ user "{input}"
444
+ end
445
+ end
446
+
447
+ # Step B: generate a response based on sentiment
448
+ class PipelineRespond < RubyLLM::Contract::Step::Base
449
+ input_type Hash
450
+
451
+ output_schema do
452
+ string :response
453
+ string :tone
454
+ end
455
+
456
+ prompt do
457
+ system "Generate a customer support response matching the sentiment."
458
+ user "Text: {text}\nSentiment: {sentiment}"
459
+ end
460
+ end
461
+
462
+ # Pipeline: sentiment → respond
463
+ class SupportPipeline < RubyLLM::Contract::Pipeline::Base
464
+ step PipelineSentiment, as: :classify
465
+ step PipelineRespond, as: :respond
466
+ end
467
+
468
+ # Happy path:
469
+ RubyLLM::Contract.configure do |c|
470
+ c.default_adapter = RubyLLM::Contract::Adapters::Test.new(
471
+ response: '{"text": "I love this product!", "sentiment": "positive"}'
472
+ )
473
+ end
474
+
475
+ # NOTE: with Test adapter, both steps get the same canned response.
476
+ # With a real LLM, each step would get a different response.
477
+ result = SupportPipeline.run("I love this product!")
478
+ result.ok? # => true
479
+ result.outputs_by_step[:classify] # => {text: "I love this product!", sentiment: "positive"}
480
+ result.outputs_by_step[:respond] # => {text: "I love this product!", sentiment: "positive"}
481
+ result.step_results.length # => 2
482
+
483
+ # =============================================================================
484
+ # SUMMARY
485
+ #
486
+ # Step 1: user "{input}" — plain string, nothing else
487
+ # Step 2: system + user — separate instructions from data
488
+ # Step 3: + output_schema — declarative output structure
489
+ # Step 4: + invariants — custom business logic on top
490
+ # Step 5: + examples — few-shot
491
+ # Step 6: + sections — labeled context blocks
492
+ # Step 7: Hash input — multiple fields, auto-interpolation
493
+ # Step 8: 2-arity invariants — cross-validate output vs input
494
+ # Step 9: context override — per-run adapter and model
495
+ # Step 10: StepResult — full status, output, errors, trace
496
+ # Step 11: Pipeline — chain steps with fail-fast
497
+ #
498
+ # Each step adds one layer. Use as many as you need.
499
+ # Even Step 1 gives you: typed input, JSON parsing, and trace.
500
+ # =============================================================================