soprano-sdk 0.2.19__tar.gz → 0.2.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/PKG-INFO +251 -1
  2. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/README.md +250 -0
  3. soprano_sdk-0.2.21/docs/framework_flow_diagrams.md +430 -0
  4. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/return_workflow.yaml +7 -0
  5. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/pyproject.toml +1 -1
  6. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/agents/structured_output.py +5 -1
  7. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/core/constants.py +45 -0
  8. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/core/engine.py +183 -5
  9. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/core/state.py +2 -2
  10. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/base.py +6 -1
  11. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/collect_input.py +85 -7
  12. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/factory.py +2 -0
  13. soprano_sdk-0.2.21/soprano_sdk/nodes/follow_up.py +351 -0
  14. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/routing/router.py +6 -2
  15. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/tools.py +55 -8
  16. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/validation/schema.py +51 -0
  17. soprano_sdk-0.2.21/tests/test_base_node.py +232 -0
  18. soprano_sdk-0.2.21/tests/test_engine_failure_message.py +335 -0
  19. soprano_sdk-0.2.21/tests/test_follow_up.py +473 -0
  20. soprano_sdk-0.2.21/tests/test_out_of_scope.py +529 -0
  21. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/.claude/settings.local.json +0 -0
  22. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/.github/workflows/test_build_and_publish.yaml +0 -0
  23. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/.gitignore +0 -0
  24. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/.python-version +0 -0
  25. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/CLAUDE.md +0 -0
  26. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/LICENSE +0 -0
  27. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/__init__.py +0 -0
  28. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/booking_helpers.py +0 -0
  29. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/concert_ticket_booking.yaml +0 -0
  30. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/concert_tools.py +0 -0
  31. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/example_runner.py +0 -0
  32. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/concert_booking/tool.py +0 -0
  33. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/framework_example.yaml +0 -0
  34. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/greeting_functions.py +0 -0
  35. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/greeting_workflow.yaml +0 -0
  36. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/main.py +0 -0
  37. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/payment_async_functions.py +0 -0
  38. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/payment_async_workflow.yaml +0 -0
  39. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/persistence/README.md +0 -0
  40. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/persistence/conversation_based.py +0 -0
  41. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/persistence/entity_based.py +0 -0
  42. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/persistence/mongodb_demo.py +0 -0
  43. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/return_functions.py +0 -0
  44. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/structured_output_example.yaml +0 -0
  45. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/README.md +0 -0
  46. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/crewai_supervisor_ui.py +0 -0
  47. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/langgraph_supervisor_ui.py +0 -0
  48. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/tools/__init__.py +0 -0
  49. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/tools/crewai_tools.py +0 -0
  50. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/tools/langgraph_tools.py +0 -0
  51. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/supervisors/workflow_tools.py +0 -0
  52. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/test_payment_async.py +0 -0
  53. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/tools/__init__.py +0 -0
  54. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/tools/address.py +0 -0
  55. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/examples/validator.py +0 -0
  56. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/langgraph_demo.py +0 -0
  57. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/langgraph_selfloop_demo.py +0 -0
  58. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/langgraph_v.py +0 -0
  59. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/main.py +0 -0
  60. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/return_fsm.excalidraw +0 -0
  61. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/return_state_machine.png +0 -0
  62. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/legacy/ui.py +0 -0
  63. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/scripts/visualize_workflow.py +0 -0
  64. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/scripts/workflow_demo.py +0 -0
  65. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/scripts/workflow_demo_ui.py +0 -0
  66. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/__init__.py +0 -0
  67. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/agents/__init__.py +0 -0
  68. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/agents/adaptor.py +0 -0
  69. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/agents/factory.py +0 -0
  70. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/authenticators/__init__.py +0 -0
  71. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/authenticators/mfa.py +0 -0
  72. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/core/__init__.py +0 -0
  73. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/core/rollback_strategies.py +0 -0
  74. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/engine.py +0 -0
  75. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/__init__.py +0 -0
  76. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/async_function.py +0 -0
  77. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/nodes/call_function.py +0 -0
  78. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/routing/__init__.py +0 -0
  79. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/__init__.py +0 -0
  80. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/function.py +0 -0
  81. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/logger.py +0 -0
  82. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/template.py +0 -0
  83. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/tool.py +0 -0
  84. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/utils/tracing.py +0 -0
  85. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/validation/__init__.py +0 -0
  86. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/soprano_sdk/validation/validator.py +0 -0
  87. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/debug_jinja2.py +0 -0
  88. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_adaptor_logging.py +0 -0
  89. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_agent_factory.py +0 -0
  90. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_async_function.py +0 -0
  91. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_collect_input_refactor.py +0 -0
  92. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_external_values.py +0 -0
  93. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_inputs_validation.py +0 -0
  94. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_jinja2_path.py +0 -0
  95. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_jinja2_standalone.py +0 -0
  96. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_mfa_scenarios.py +0 -0
  97. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_persistence.py +0 -0
  98. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_structured_output.py +0 -0
  99. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_transition_routing.py +0 -0
  100. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/tests/test_workflow_tool_context_update.py +0 -0
  101. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/todo.md +0 -0
  102. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/.eslintrc.cjs +0 -0
  103. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/.gitignore +0 -0
  104. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/README.md +0 -0
  105. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/index.html +0 -0
  106. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/package-lock.json +0 -0
  107. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/package.json +0 -0
  108. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/App.jsx +0 -0
  109. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/CustomNode.jsx +0 -0
  110. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/StepDetailsModal.jsx +0 -0
  111. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/WorkflowGraph.jsx +0 -0
  112. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/WorkflowInfoPanel.jsx +0 -0
  113. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/assets/react.svg +0 -0
  114. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/src/main.jsx +0 -0
  115. {soprano_sdk-0.2.19 → soprano_sdk-0.2.21}/workflow-visualizer/vite.config.js +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: soprano-sdk
3
- Version: 0.2.19
3
+ Version: 0.2.21
4
4
  Summary: YAML-driven workflow engine with AI agent integration for building conversational SOPs
5
5
  Author: Arvind Thangamani
6
6
  License: MIT
@@ -51,6 +51,11 @@ A YAML-driven workflow engine with AI agent integration for building conversatio
51
51
  - **External Context Injection**: Support for pre-populated fields from external orchestrators
52
52
  - **Pattern Matching**: Flexible transition logic based on patterns and conditions
53
53
  - **Visualization**: Generate workflow graphs as images or Mermaid diagrams
54
+ - **Follow-up Conversations**: Handle user follow-up questions with full workflow context
55
+ - **Intent Detection**: Route users between collector nodes based on detected intent
56
+ - **Out-of-Scope Detection**: Signal when user queries are unrelated to the current workflow
57
+ - **Outcome Humanization**: LLM-powered transformation of outcome messages into natural, context-aware responses
58
+ - **Per-Turn Localization**: Dynamic language and script switching for multi-language support
54
59
 
55
60
  ## Installation
56
61
 
@@ -268,6 +273,247 @@ Calls a Python function with workflow state.
268
273
  next: failure_step
269
274
  ```
270
275
 
276
+ ### call_async_function
277
+
278
+ Calls an async function that may return a pending status, triggering an interrupt until the async operation completes.
279
+
280
+ ```yaml
281
+ - id: verify_payment
282
+ action: call_async_function
283
+ function: "payments.start_verification"
284
+ output: verification_result
285
+ transitions:
286
+ - condition: "verified"
287
+ next: payment_approved
288
+ - condition: "failed"
289
+ next: payment_rejected
290
+ ```
291
+
292
+ ### follow_up
293
+
294
+ Handles follow-up questions from users. Unlike `collect_input_with_agent` where the agent asks first, here the **user initiates** by asking questions. The agent responds using full workflow context.
295
+
296
+ ```yaml
297
+ - id: handle_questions
298
+ action: follow_up
299
+ next: final_confirmation # Where to go when user says "done"
300
+ closure_patterns: # Optional: customize closure detection
301
+ - "ok"
302
+ - "thank you"
303
+ - "done"
304
+ agent:
305
+ name: "FollowUpAssistant"
306
+ model: "gpt-4o-mini"
307
+ description: "Answering questions about the order"
308
+ instructions: |
309
+ Help the user with any questions about their order.
310
+ Be concise and helpful.
311
+ detect_out_of_scope: true # Signal when user asks unrelated questions
312
+ transitions: # Optional: route based on patterns
313
+ - pattern: "ROUTE_TO_PAYMENT:"
314
+ next: payment_step
315
+ ```
316
+
317
+ **Key features:**
318
+ - **User initiates**: No initial prompt - waits for user to ask a question
319
+ - **Full state context**: Agent sees all collected workflow data
320
+ - **Closure detection**: Detects "ok", "thanks", "done" → proceeds to next step
321
+ - **Intent change**: Routes to collector nodes when user wants to change data
322
+ - **Out-of-scope**: Signals to parent orchestrator for unrelated queries
323
+
324
+ ## Interrupt Types
325
+
326
+ The workflow engine uses three interrupt types to pause execution and communicate with the caller:
327
+
328
+ | Type | Marker | Triggered By | Use Case |
329
+ |------|--------|--------------|----------|
330
+ | **USER_INPUT** | `__WORKFLOW_INTERRUPT__` | `collect_input_with_agent`, `follow_up` | Waiting for user input |
331
+ | **ASYNC** | `__ASYNC_INTERRUPT__` | `call_async_function` | Waiting for async operation callback |
332
+ | **OUT_OF_SCOPE** | `__OUT_OF_SCOPE_INTERRUPT__` | `collect_input_with_agent`, `follow_up` | User query unrelated to current task |
333
+
334
+ ### Handling Interrupts
335
+
336
+ ```python
337
+ result = graph.invoke({}, config=config)
338
+
339
+ if "__interrupt__" in result and result["__interrupt__"]:
340
+ interrupt_value = result["__interrupt__"][0].value
341
+
342
+ # Check interrupt type
343
+ if isinstance(interrupt_value, dict):
344
+ if interrupt_value.get("type") == "async":
345
+ # Async interrupt - wait for external callback
346
+ pending_metadata = interrupt_value.get("pending")
347
+ # ... handle async operation ...
348
+ result = graph.invoke(Command(resume=async_result), config=config)
349
+
350
+ elif interrupt_value.get("type") == "out_of_scope":
351
+ # Out-of-scope - user asking unrelated question
352
+ reason = interrupt_value.get("reason")
353
+ user_message = interrupt_value.get("user_message")
354
+ # ... route to different workflow or handle appropriately ...
355
+ else:
356
+ # User input interrupt - prompt is a string
357
+ prompt = interrupt_value
358
+ user_input = input(f"Bot: {prompt}\nYou: ")
359
+ result = graph.invoke(Command(resume=user_input), config=config)
360
+ ```
361
+
362
+ ### Out-of-Scope Detection
363
+
364
+ Data collector and follow-up nodes can detect when user queries are unrelated to the current task. This is useful for multi-workflow systems where a supervisor agent needs to route users to different SOPs.
365
+
366
+ **Configuration:**
367
+ ```yaml
368
+ agent:
369
+ detect_out_of_scope: true # Disabled by default, set to true to enable
370
+ scope_description: "collecting order information for returns" # Optional
371
+ ```
372
+
373
+ **Response format:**
374
+ ```
375
+ __OUT_OF_SCOPE_INTERRUPT__|{thread_id}|{workflow_name}|{"reason":"...","user_message":"..."}
376
+ ```
377
+
378
+ ## Outcome Humanization
379
+
380
+ Outcome messages can be automatically humanized using an LLM to transform template-based messages into natural, context-aware responses. This feature uses the full conversation history to generate responses that match the tone and context of the interaction.
381
+
382
+ ### How It Works
383
+
384
+ 1. **Template rendering**: The outcome message template is first rendered with state values (e.g., `{{order_id}}` → `1234`)
385
+ 2. **LLM humanization**: The rendered message is passed to an LLM along with the conversation history
386
+ 3. **Natural response**: The LLM generates a warm, conversational response while preserving all factual details
387
+
388
+ ### Configuration
389
+
390
+ Humanization is **enabled by default**. Configure it at the workflow level:
391
+
392
+ ```yaml
393
+ name: "Return Processing Workflow"
394
+ version: "1.0"
395
+
396
+ # Humanization configuration (optional - enabled by default)
397
+ humanization_agent:
398
+ model: "gpt-4o" # Override model for humanization (optional)
399
+ base_url: "https://custom-api.com/v1" # Override base URL (optional)
400
+ instructions: | # Custom instructions (optional)
401
+ You are a friendly customer service representative.
402
+ Rewrite the message to be warm and empathetic.
403
+ Always thank the customer for their patience.
404
+
405
+ outcomes:
406
+ - id: success
407
+ type: success
408
+ message: "Return approved for order {{order_id}}. Reason: {{return_reason}}."
409
+
410
+ - id: technical_error
411
+ type: failure
412
+ humanize: false # Disable humanization for this specific outcome
413
+ message: "Error code: {{error_code}}. Contact support."
414
+ ```
415
+
416
+ ### Example Transformation
417
+
418
+ | Template Message | Humanized Response |
419
+ |-----------------|-------------------|
420
+ | `"Return approved for order 1234. Reason: damaged item."` | `"Great news! I've approved the return for your order #1234. I completely understand about the damaged item - that's so frustrating. You'll receive an email shortly with return instructions. Is there anything else I can help you with?"` |
421
+
422
+ ### Disabling Humanization
423
+
424
+ **Globally** (for entire workflow):
425
+ ```yaml
426
+ humanization_agent:
427
+ enabled: false
428
+ ```
429
+
430
+ **Per-outcome**:
431
+ ```yaml
432
+ outcomes:
433
+ - id: error_code
434
+ type: failure
435
+ humanize: false # Keep exact message for debugging/logging
436
+ message: "Error: {{error_code}}"
437
+ ```
438
+
439
+ ### Model Configuration
440
+
441
+ The humanization agent inherits the workflow's runtime `model_config`. You can override specific settings:
442
+
443
+ ```python
444
+ config = {
445
+ "model_config": {
446
+ "model_name": "gpt-4o-mini", # Base model for all agents
447
+ "api_key": os.getenv("OPENAI_API_KEY"),
448
+ }
449
+ }
450
+
451
+ # In YAML, humanization_agent.model overrides model_name for humanization only
452
+ ```
453
+
454
+ ## Per-Turn Localization
455
+
456
+ The framework supports per-turn localization, allowing dynamic language and script switching during workflow execution. Each call to `execute()` can specify a different target language/script.
457
+
458
+ ### How It Works
459
+
460
+ 1. **Per-turn parameters**: Pass `target_language` and `target_script` to `execute()`
461
+ 2. **Instruction injection**: Localization instructions are prepended to agent system prompts
462
+ 3. **No extra LLM calls**: The same agent that generates the response handles localization
463
+
464
+ ### Usage
465
+
466
+ **Per-turn language switching:**
467
+ ```python
468
+ from soprano_sdk import WorkflowTool
469
+
470
+ tool = WorkflowTool(
471
+ yaml_path="return_workflow.yaml",
472
+ name="return_processor",
473
+ description="Process returns",
474
+ checkpointer=checkpointer,
475
+ config=config
476
+ )
477
+
478
+ # Turn 1: English (no localization)
479
+ result = tool.execute(thread_id="123", user_message="hi")
480
+
481
+ # Turn 2: Switch to Tamil
482
+ result = tool.execute(
483
+ thread_id="123",
484
+ user_message="my order id is 1234",
485
+ target_language="Tamil",
486
+ target_script="Tamil"
487
+ )
488
+
489
+ # Turn 3: Back to English (no localization params)
490
+ result = tool.execute(thread_id="123", user_message="yes")
491
+ ```
492
+
493
+ ### YAML Defaults (Optional)
494
+
495
+ You can set default localization in the workflow YAML. These are used when `target_language`/`target_script` are not passed to `execute()`:
496
+
497
+ ```yaml
498
+ name: "Return Workflow"
499
+ version: "1.0"
500
+
501
+ localization:
502
+ language: "Tamil"
503
+ script: "Tamil"
504
+ instructions: | # Optional: custom instructions
505
+ Use formal Tamil suitable for customer service.
506
+ Always be polite and respectful.
507
+
508
+ # ... rest of workflow
509
+ ```
510
+
511
+ ### Key Points
512
+
513
+ - **Localization affects**: Data collector prompts, follow-up responses, and humanized outcome messages
514
+ - **Outcome messages require humanization**: If `humanize: false`, outcome messages stay in English (template output)
515
+ - **Per-turn override**: Runtime parameters always override YAML defaults
516
+
271
517
  ## Examples
272
518
 
273
519
  See the `examples/` directory for complete workflow examples:
@@ -411,6 +657,10 @@ Contributions are welcome! Please open an issue or submit a pull request.
411
657
  - ✅ Database persistence (SqliteSaver, PostgresSaver supported)
412
658
  - ✅ Pluggable checkpointer system
413
659
  - ✅ Thread ID strategies and examples
660
+ - ✅ Follow-up node for conversational Q&A
661
+ - ✅ Out-of-scope detection for multi-workflow routing
662
+ - ✅ Outcome humanization with LLM
663
+ - ✅ Per-turn localization for multi-language support
414
664
  - Additional action types (webhook, conditional branching, parallel execution)
415
665
  - More workflow examples (customer onboarding, support ticketing, approval flows)
416
666
  - Workflow testing utilities
@@ -10,6 +10,11 @@ A YAML-driven workflow engine with AI agent integration for building conversatio
10
10
  - **External Context Injection**: Support for pre-populated fields from external orchestrators
11
11
  - **Pattern Matching**: Flexible transition logic based on patterns and conditions
12
12
  - **Visualization**: Generate workflow graphs as images or Mermaid diagrams
13
+ - **Follow-up Conversations**: Handle user follow-up questions with full workflow context
14
+ - **Intent Detection**: Route users between collector nodes based on detected intent
15
+ - **Out-of-Scope Detection**: Signal when user queries are unrelated to the current workflow
16
+ - **Outcome Humanization**: LLM-powered transformation of outcome messages into natural, context-aware responses
17
+ - **Per-Turn Localization**: Dynamic language and script switching for multi-language support
13
18
 
14
19
  ## Installation
15
20
 
@@ -227,6 +232,247 @@ Calls a Python function with workflow state.
227
232
  next: failure_step
228
233
  ```
229
234
 
235
+ ### call_async_function
236
+
237
+ Calls an async function that may return a pending status, triggering an interrupt until the async operation completes.
238
+
239
+ ```yaml
240
+ - id: verify_payment
241
+ action: call_async_function
242
+ function: "payments.start_verification"
243
+ output: verification_result
244
+ transitions:
245
+ - condition: "verified"
246
+ next: payment_approved
247
+ - condition: "failed"
248
+ next: payment_rejected
249
+ ```
250
+
251
+ ### follow_up
252
+
253
+ Handles follow-up questions from users. Unlike `collect_input_with_agent` where the agent asks first, here the **user initiates** by asking questions. The agent responds using full workflow context.
254
+
255
+ ```yaml
256
+ - id: handle_questions
257
+ action: follow_up
258
+ next: final_confirmation # Where to go when user says "done"
259
+ closure_patterns: # Optional: customize closure detection
260
+ - "ok"
261
+ - "thank you"
262
+ - "done"
263
+ agent:
264
+ name: "FollowUpAssistant"
265
+ model: "gpt-4o-mini"
266
+ description: "Answering questions about the order"
267
+ instructions: |
268
+ Help the user with any questions about their order.
269
+ Be concise and helpful.
270
+ detect_out_of_scope: true # Signal when user asks unrelated questions
271
+ transitions: # Optional: route based on patterns
272
+ - pattern: "ROUTE_TO_PAYMENT:"
273
+ next: payment_step
274
+ ```
275
+
276
+ **Key features:**
277
+ - **User initiates**: No initial prompt - waits for user to ask a question
278
+ - **Full state context**: Agent sees all collected workflow data
279
+ - **Closure detection**: Detects "ok", "thanks", "done" → proceeds to next step
280
+ - **Intent change**: Routes to collector nodes when user wants to change data
281
+ - **Out-of-scope**: Signals to parent orchestrator for unrelated queries
282
+
283
+ ## Interrupt Types
284
+
285
+ The workflow engine uses three interrupt types to pause execution and communicate with the caller:
286
+
287
+ | Type | Marker | Triggered By | Use Case |
288
+ |------|--------|--------------|----------|
289
+ | **USER_INPUT** | `__WORKFLOW_INTERRUPT__` | `collect_input_with_agent`, `follow_up` | Waiting for user input |
290
+ | **ASYNC** | `__ASYNC_INTERRUPT__` | `call_async_function` | Waiting for async operation callback |
291
+ | **OUT_OF_SCOPE** | `__OUT_OF_SCOPE_INTERRUPT__` | `collect_input_with_agent`, `follow_up` | User query unrelated to current task |
292
+
293
+ ### Handling Interrupts
294
+
295
+ ```python
296
+ result = graph.invoke({}, config=config)
297
+
298
+ if "__interrupt__" in result and result["__interrupt__"]:
299
+ interrupt_value = result["__interrupt__"][0].value
300
+
301
+ # Check interrupt type
302
+ if isinstance(interrupt_value, dict):
303
+ if interrupt_value.get("type") == "async":
304
+ # Async interrupt - wait for external callback
305
+ pending_metadata = interrupt_value.get("pending")
306
+ # ... handle async operation ...
307
+ result = graph.invoke(Command(resume=async_result), config=config)
308
+
309
+ elif interrupt_value.get("type") == "out_of_scope":
310
+ # Out-of-scope - user asking unrelated question
311
+ reason = interrupt_value.get("reason")
312
+ user_message = interrupt_value.get("user_message")
313
+ # ... route to different workflow or handle appropriately ...
314
+ else:
315
+ # User input interrupt - prompt is a string
316
+ prompt = interrupt_value
317
+ user_input = input(f"Bot: {prompt}\nYou: ")
318
+ result = graph.invoke(Command(resume=user_input), config=config)
319
+ ```
320
+
321
+ ### Out-of-Scope Detection
322
+
323
+ Data collector and follow-up nodes can detect when user queries are unrelated to the current task. This is useful for multi-workflow systems where a supervisor agent needs to route users to different SOPs.
324
+
325
+ **Configuration:**
326
+ ```yaml
327
+ agent:
328
+ detect_out_of_scope: true # Disabled by default, set to true to enable
329
+ scope_description: "collecting order information for returns" # Optional
330
+ ```
331
+
332
+ **Response format:**
333
+ ```
334
+ __OUT_OF_SCOPE_INTERRUPT__|{thread_id}|{workflow_name}|{"reason":"...","user_message":"..."}
335
+ ```
336
+
337
+ ## Outcome Humanization
338
+
339
+ Outcome messages can be automatically humanized using an LLM to transform template-based messages into natural, context-aware responses. This feature uses the full conversation history to generate responses that match the tone and context of the interaction.
340
+
341
+ ### How It Works
342
+
343
+ 1. **Template rendering**: The outcome message template is first rendered with state values (e.g., `{{order_id}}` → `1234`)
344
+ 2. **LLM humanization**: The rendered message is passed to an LLM along with the conversation history
345
+ 3. **Natural response**: The LLM generates a warm, conversational response while preserving all factual details
346
+
347
+ ### Configuration
348
+
349
+ Humanization is **enabled by default**. Configure it at the workflow level:
350
+
351
+ ```yaml
352
+ name: "Return Processing Workflow"
353
+ version: "1.0"
354
+
355
+ # Humanization configuration (optional - enabled by default)
356
+ humanization_agent:
357
+ model: "gpt-4o" # Override model for humanization (optional)
358
+ base_url: "https://custom-api.com/v1" # Override base URL (optional)
359
+ instructions: | # Custom instructions (optional)
360
+ You are a friendly customer service representative.
361
+ Rewrite the message to be warm and empathetic.
362
+ Always thank the customer for their patience.
363
+
364
+ outcomes:
365
+ - id: success
366
+ type: success
367
+ message: "Return approved for order {{order_id}}. Reason: {{return_reason}}."
368
+
369
+ - id: technical_error
370
+ type: failure
371
+ humanize: false # Disable humanization for this specific outcome
372
+ message: "Error code: {{error_code}}. Contact support."
373
+ ```
374
+
375
+ ### Example Transformation
376
+
377
+ | Template Message | Humanized Response |
378
+ |-----------------|-------------------|
379
+ | `"Return approved for order 1234. Reason: damaged item."` | `"Great news! I've approved the return for your order #1234. I completely understand about the damaged item - that's so frustrating. You'll receive an email shortly with return instructions. Is there anything else I can help you with?"` |
380
+
381
+ ### Disabling Humanization
382
+
383
+ **Globally** (for entire workflow):
384
+ ```yaml
385
+ humanization_agent:
386
+ enabled: false
387
+ ```
388
+
389
+ **Per-outcome**:
390
+ ```yaml
391
+ outcomes:
392
+ - id: error_code
393
+ type: failure
394
+ humanize: false # Keep exact message for debugging/logging
395
+ message: "Error: {{error_code}}"
396
+ ```
397
+
398
+ ### Model Configuration
399
+
400
+ The humanization agent inherits the workflow's runtime `model_config`. You can override specific settings:
401
+
402
+ ```python
403
+ config = {
404
+ "model_config": {
405
+ "model_name": "gpt-4o-mini", # Base model for all agents
406
+ "api_key": os.getenv("OPENAI_API_KEY"),
407
+ }
408
+ }
409
+
410
+ # In YAML, humanization_agent.model overrides model_name for humanization only
411
+ ```
412
+
413
+ ## Per-Turn Localization
414
+
415
+ The framework supports per-turn localization, allowing dynamic language and script switching during workflow execution. Each call to `execute()` can specify a different target language/script.
416
+
417
+ ### How It Works
418
+
419
+ 1. **Per-turn parameters**: Pass `target_language` and `target_script` to `execute()`
420
+ 2. **Instruction injection**: Localization instructions are prepended to agent system prompts
421
+ 3. **No extra LLM calls**: The same agent that generates the response handles localization
422
+
423
+ ### Usage
424
+
425
+ **Per-turn language switching:**
426
+ ```python
427
+ from soprano_sdk import WorkflowTool
428
+
429
+ tool = WorkflowTool(
430
+ yaml_path="return_workflow.yaml",
431
+ name="return_processor",
432
+ description="Process returns",
433
+ checkpointer=checkpointer,
434
+ config=config
435
+ )
436
+
437
+ # Turn 1: English (no localization)
438
+ result = tool.execute(thread_id="123", user_message="hi")
439
+
440
+ # Turn 2: Switch to Tamil
441
+ result = tool.execute(
442
+ thread_id="123",
443
+ user_message="my order id is 1234",
444
+ target_language="Tamil",
445
+ target_script="Tamil"
446
+ )
447
+
448
+ # Turn 3: Back to English (no localization params)
449
+ result = tool.execute(thread_id="123", user_message="yes")
450
+ ```
451
+
452
+ ### YAML Defaults (Optional)
453
+
454
+ You can set default localization in the workflow YAML. These are used when `target_language`/`target_script` are not passed to `execute()`:
455
+
456
+ ```yaml
457
+ name: "Return Workflow"
458
+ version: "1.0"
459
+
460
+ localization:
461
+ language: "Tamil"
462
+ script: "Tamil"
463
+ instructions: | # Optional: custom instructions
464
+ Use formal Tamil suitable for customer service.
465
+ Always be polite and respectful.
466
+
467
+ # ... rest of workflow
468
+ ```
469
+
470
+ ### Key Points
471
+
472
+ - **Localization affects**: Data collector prompts, follow-up responses, and humanized outcome messages
473
+ - **Outcome messages require humanization**: If `humanize: false`, outcome messages stay in English (template output)
474
+ - **Per-turn override**: Runtime parameters always override YAML defaults
475
+
230
476
  ## Examples
231
477
 
232
478
  See the `examples/` directory for complete workflow examples:
@@ -370,6 +616,10 @@ Contributions are welcome! Please open an issue or submit a pull request.
370
616
  - ✅ Database persistence (SqliteSaver, PostgresSaver supported)
371
617
  - ✅ Pluggable checkpointer system
372
618
  - ✅ Thread ID strategies and examples
619
+ - ✅ Follow-up node for conversational Q&A
620
+ - ✅ Out-of-scope detection for multi-workflow routing
621
+ - ✅ Outcome humanization with LLM
622
+ - ✅ Per-turn localization for multi-language support
373
623
  - Additional action types (webhook, conditional branching, parallel execution)
374
624
  - More workflow examples (customer onboarding, support ticketing, approval flows)
375
625
  - Workflow testing utilities