soprano-sdk 0.2.20__tar.gz → 0.2.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/PKG-INFO +145 -2
  2. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/README.md +144 -1
  3. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/return_workflow.yaml +7 -0
  4. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/pyproject.toml +1 -1
  5. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/core/constants.py +43 -0
  6. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/core/engine.py +182 -5
  7. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/collect_input.py +7 -2
  8. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/follow_up.py +6 -1
  9. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/tools.py +53 -9
  10. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/validation/schema.py +47 -0
  11. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_follow_up.py +7 -7
  12. soprano_sdk-0.2.22/tests/test_initial_context_override_bug.py +114 -0
  13. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_out_of_scope.py +7 -7
  14. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/.claude/settings.local.json +0 -0
  15. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/.github/workflows/test_build_and_publish.yaml +0 -0
  16. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/.gitignore +0 -0
  17. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/.python-version +0 -0
  18. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/CLAUDE.md +0 -0
  19. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/LICENSE +0 -0
  20. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/docs/framework_flow_diagrams.md +0 -0
  21. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/__init__.py +0 -0
  22. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/booking_helpers.py +0 -0
  23. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/concert_ticket_booking.yaml +0 -0
  24. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/concert_tools.py +0 -0
  25. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/example_runner.py +0 -0
  26. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/concert_booking/tool.py +0 -0
  27. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/framework_example.yaml +0 -0
  28. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/greeting_functions.py +0 -0
  29. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/greeting_workflow.yaml +0 -0
  30. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/main.py +0 -0
  31. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/payment_async_functions.py +0 -0
  32. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/payment_async_workflow.yaml +0 -0
  33. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/persistence/README.md +0 -0
  34. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/persistence/conversation_based.py +0 -0
  35. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/persistence/entity_based.py +0 -0
  36. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/persistence/mongodb_demo.py +0 -0
  37. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/return_functions.py +0 -0
  38. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/structured_output_example.yaml +0 -0
  39. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/README.md +0 -0
  40. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/crewai_supervisor_ui.py +0 -0
  41. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/langgraph_supervisor_ui.py +0 -0
  42. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/tools/__init__.py +0 -0
  43. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/tools/crewai_tools.py +0 -0
  44. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/tools/langgraph_tools.py +0 -0
  45. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/supervisors/workflow_tools.py +0 -0
  46. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/test_payment_async.py +0 -0
  47. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/tools/__init__.py +0 -0
  48. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/tools/address.py +0 -0
  49. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/examples/validator.py +0 -0
  50. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/langgraph_demo.py +0 -0
  51. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/langgraph_selfloop_demo.py +0 -0
  52. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/langgraph_v.py +0 -0
  53. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/main.py +0 -0
  54. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/return_fsm.excalidraw +0 -0
  55. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/return_state_machine.png +0 -0
  56. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/legacy/ui.py +0 -0
  57. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/scripts/visualize_workflow.py +0 -0
  58. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/scripts/workflow_demo.py +0 -0
  59. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/scripts/workflow_demo_ui.py +0 -0
  60. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/__init__.py +0 -0
  61. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/agents/__init__.py +0 -0
  62. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/agents/adaptor.py +0 -0
  63. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/agents/factory.py +0 -0
  64. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/agents/structured_output.py +0 -0
  65. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/authenticators/__init__.py +0 -0
  66. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/authenticators/mfa.py +0 -0
  67. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/core/__init__.py +0 -0
  68. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/core/rollback_strategies.py +0 -0
  69. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/core/state.py +0 -0
  70. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/engine.py +0 -0
  71. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/__init__.py +0 -0
  72. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/async_function.py +0 -0
  73. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/base.py +0 -0
  74. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/call_function.py +0 -0
  75. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/nodes/factory.py +0 -0
  76. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/routing/__init__.py +0 -0
  77. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/routing/router.py +0 -0
  78. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/__init__.py +0 -0
  79. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/function.py +0 -0
  80. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/logger.py +0 -0
  81. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/template.py +0 -0
  82. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/tool.py +0 -0
  83. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/utils/tracing.py +0 -0
  84. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/validation/__init__.py +0 -0
  85. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/soprano_sdk/validation/validator.py +0 -0
  86. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/debug_jinja2.py +0 -0
  87. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_adaptor_logging.py +0 -0
  88. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_agent_factory.py +0 -0
  89. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_async_function.py +0 -0
  90. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_base_node.py +0 -0
  91. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_collect_input_refactor.py +0 -0
  92. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_engine_failure_message.py +0 -0
  93. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_external_values.py +0 -0
  94. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_inputs_validation.py +0 -0
  95. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_jinja2_path.py +0 -0
  96. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_jinja2_standalone.py +0 -0
  97. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_mfa_scenarios.py +0 -0
  98. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_persistence.py +0 -0
  99. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_structured_output.py +0 -0
  100. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_transition_routing.py +0 -0
  101. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/tests/test_workflow_tool_context_update.py +0 -0
  102. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/todo.md +0 -0
  103. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/.eslintrc.cjs +0 -0
  104. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/.gitignore +0 -0
  105. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/README.md +0 -0
  106. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/index.html +0 -0
  107. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/package-lock.json +0 -0
  108. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/package.json +0 -0
  109. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/App.jsx +0 -0
  110. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/CustomNode.jsx +0 -0
  111. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/StepDetailsModal.jsx +0 -0
  112. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/WorkflowGraph.jsx +0 -0
  113. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/WorkflowInfoPanel.jsx +0 -0
  114. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/assets/react.svg +0 -0
  115. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/src/main.jsx +0 -0
  116. {soprano_sdk-0.2.20 → soprano_sdk-0.2.22}/workflow-visualizer/vite.config.js +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: soprano-sdk
3
- Version: 0.2.20
3
+ Version: 0.2.22
4
4
  Summary: YAML-driven workflow engine with AI agent integration for building conversational SOPs
5
5
  Author: Arvind Thangamani
6
6
  License: MIT
@@ -54,6 +54,8 @@ A YAML-driven workflow engine with AI agent integration for building conversatio
54
54
  - **Follow-up Conversations**: Handle user follow-up questions with full workflow context
55
55
  - **Intent Detection**: Route users between collector nodes based on detected intent
56
56
  - **Out-of-Scope Detection**: Signal when user queries are unrelated to the current workflow
57
+ - **Outcome Humanization**: LLM-powered transformation of outcome messages into natural, context-aware responses
58
+ - **Per-Turn Localization**: Dynamic language and script switching for multi-language support
57
59
 
58
60
  ## Installation
59
61
 
@@ -364,7 +366,7 @@ Data collector and follow-up nodes can detect when user queries are unrelated to
364
366
  **Configuration:**
365
367
  ```yaml
366
368
  agent:
367
- detect_out_of_scope: true # Enabled by default
369
+ detect_out_of_scope: true # Disabled by default, set to true to enable
368
370
  scope_description: "collecting order information for returns" # Optional
369
371
  ```
370
372
 
@@ -373,6 +375,145 @@ agent:
373
375
  __OUT_OF_SCOPE_INTERRUPT__|{thread_id}|{workflow_name}|{"reason":"...","user_message":"..."}
374
376
  ```
375
377
 
378
+ ## Outcome Humanization
379
+
380
+ Outcome messages can be automatically humanized using an LLM to transform template-based messages into natural, context-aware responses. This feature uses the full conversation history to generate responses that match the tone and context of the interaction.
381
+
382
+ ### How It Works
383
+
384
+ 1. **Template rendering**: The outcome message template is first rendered with state values (e.g., `{{order_id}}` → `1234`)
385
+ 2. **LLM humanization**: The rendered message is passed to an LLM along with the conversation history
386
+ 3. **Natural response**: The LLM generates a warm, conversational response while preserving all factual details
387
+
388
+ ### Configuration
389
+
390
+ Humanization is **enabled by default**. Configure it at the workflow level:
391
+
392
+ ```yaml
393
+ name: "Return Processing Workflow"
394
+ version: "1.0"
395
+
396
+ # Humanization configuration (optional - enabled by default)
397
+ humanization_agent:
398
+ model: "gpt-4o" # Override model for humanization (optional)
399
+ base_url: "https://custom-api.com/v1" # Override base URL (optional)
400
+ instructions: | # Custom instructions (optional)
401
+ You are a friendly customer service representative.
402
+ Rewrite the message to be warm and empathetic.
403
+ Always thank the customer for their patience.
404
+
405
+ outcomes:
406
+ - id: success
407
+ type: success
408
+ message: "Return approved for order {{order_id}}. Reason: {{return_reason}}."
409
+
410
+ - id: technical_error
411
+ type: failure
412
+ humanize: false # Disable humanization for this specific outcome
413
+ message: "Error code: {{error_code}}. Contact support."
414
+ ```
415
+
416
+ ### Example Transformation
417
+
418
+ | Template Message | Humanized Response |
419
+ |-----------------|-------------------|
420
+ | `"Return approved for order 1234. Reason: damaged item."` | `"Great news! I've approved the return for your order #1234. I completely understand about the damaged item - that's so frustrating. You'll receive an email shortly with return instructions. Is there anything else I can help you with?"` |
421
+
422
+ ### Disabling Humanization
423
+
424
+ **Globally** (for entire workflow):
425
+ ```yaml
426
+ humanization_agent:
427
+ enabled: false
428
+ ```
429
+
430
+ **Per-outcome**:
431
+ ```yaml
432
+ outcomes:
433
+ - id: error_code
434
+ type: failure
435
+ humanize: false # Keep exact message for debugging/logging
436
+ message: "Error: {{error_code}}"
437
+ ```
438
+
439
+ ### Model Configuration
440
+
441
+ The humanization agent inherits the workflow's runtime `model_config`. You can override specific settings:
442
+
443
+ ```python
444
+ config = {
445
+ "model_config": {
446
+ "model_name": "gpt-4o-mini", # Base model for all agents
447
+ "api_key": os.getenv("OPENAI_API_KEY"),
448
+ }
449
+ }
450
+
451
+ # In YAML, humanization_agent.model overrides model_name for humanization only
452
+ ```
453
+
454
+ ## Per-Turn Localization
455
+
456
+ The framework supports per-turn localization, allowing dynamic language and script switching during workflow execution. Each call to `execute()` can specify a different target language/script.
457
+
458
+ ### How It Works
459
+
460
+ 1. **Per-turn parameters**: Pass `target_language` and `target_script` to `execute()`
461
+ 2. **Instruction injection**: Localization instructions are prepended to agent system prompts
462
+ 3. **No extra LLM calls**: The same agent that generates the response handles localization
463
+
464
+ ### Usage
465
+
466
+ **Per-turn language switching:**
467
+ ```python
468
+ from soprano_sdk import WorkflowTool
469
+
470
+ tool = WorkflowTool(
471
+ yaml_path="return_workflow.yaml",
472
+ name="return_processor",
473
+ description="Process returns",
474
+ checkpointer=checkpointer,
475
+ config=config
476
+ )
477
+
478
+ # Turn 1: English (no localization)
479
+ result = tool.execute(thread_id="123", user_message="hi")
480
+
481
+ # Turn 2: Switch to Tamil
482
+ result = tool.execute(
483
+ thread_id="123",
484
+ user_message="my order id is 1234",
485
+ target_language="Tamil",
486
+ target_script="Tamil"
487
+ )
488
+
489
+ # Turn 3: Back to English (no localization params)
490
+ result = tool.execute(thread_id="123", user_message="yes")
491
+ ```
492
+
493
+ ### YAML Defaults (Optional)
494
+
495
+ You can set default localization in the workflow YAML. These are used when `target_language`/`target_script` are not passed to `execute()`:
496
+
497
+ ```yaml
498
+ name: "Return Workflow"
499
+ version: "1.0"
500
+
501
+ localization:
502
+ language: "Tamil"
503
+ script: "Tamil"
504
+ instructions: | # Optional: custom instructions
505
+ Use formal Tamil suitable for customer service.
506
+ Always be polite and respectful.
507
+
508
+ # ... rest of workflow
509
+ ```
510
+
511
+ ### Key Points
512
+
513
+ - **Localization affects**: Data collector prompts, follow-up responses, and humanized outcome messages
514
+ - **Outcome messages require humanization**: If `humanize: false`, outcome messages stay in English (template output)
515
+ - **Per-turn override**: Runtime parameters always override YAML defaults
516
+
376
517
  ## Examples
377
518
 
378
519
  See the `examples/` directory for complete workflow examples:
@@ -518,6 +659,8 @@ Contributions are welcome! Please open an issue or submit a pull request.
518
659
  - ✅ Thread ID strategies and examples
519
660
  - ✅ Follow-up node for conversational Q&A
520
661
  - ✅ Out-of-scope detection for multi-workflow routing
662
+ - ✅ Outcome humanization with LLM
663
+ - ✅ Per-turn localization for multi-language support
521
664
  - Additional action types (webhook, conditional branching, parallel execution)
522
665
  - More workflow examples (customer onboarding, support ticketing, approval flows)
523
666
  - Workflow testing utilities
@@ -13,6 +13,8 @@ A YAML-driven workflow engine with AI agent integration for building conversatio
13
13
  - **Follow-up Conversations**: Handle user follow-up questions with full workflow context
14
14
  - **Intent Detection**: Route users between collector nodes based on detected intent
15
15
  - **Out-of-Scope Detection**: Signal when user queries are unrelated to the current workflow
16
+ - **Outcome Humanization**: LLM-powered transformation of outcome messages into natural, context-aware responses
17
+ - **Per-Turn Localization**: Dynamic language and script switching for multi-language support
16
18
 
17
19
  ## Installation
18
20
 
@@ -323,7 +325,7 @@ Data collector and follow-up nodes can detect when user queries are unrelated to
323
325
  **Configuration:**
324
326
  ```yaml
325
327
  agent:
326
- detect_out_of_scope: true # Enabled by default
328
+ detect_out_of_scope: true # Disabled by default, set to true to enable
327
329
  scope_description: "collecting order information for returns" # Optional
328
330
  ```
329
331
 
@@ -332,6 +334,145 @@ agent:
332
334
  __OUT_OF_SCOPE_INTERRUPT__|{thread_id}|{workflow_name}|{"reason":"...","user_message":"..."}
333
335
  ```
334
336
 
337
+ ## Outcome Humanization
338
+
339
+ Outcome messages can be automatically humanized using an LLM to transform template-based messages into natural, context-aware responses. This feature uses the full conversation history to generate responses that match the tone and context of the interaction.
340
+
341
+ ### How It Works
342
+
343
+ 1. **Template rendering**: The outcome message template is first rendered with state values (e.g., `{{order_id}}` → `1234`)
344
+ 2. **LLM humanization**: The rendered message is passed to an LLM along with the conversation history
345
+ 3. **Natural response**: The LLM generates a warm, conversational response while preserving all factual details
346
+
347
+ ### Configuration
348
+
349
+ Humanization is **enabled by default**. Configure it at the workflow level:
350
+
351
+ ```yaml
352
+ name: "Return Processing Workflow"
353
+ version: "1.0"
354
+
355
+ # Humanization configuration (optional - enabled by default)
356
+ humanization_agent:
357
+ model: "gpt-4o" # Override model for humanization (optional)
358
+ base_url: "https://custom-api.com/v1" # Override base URL (optional)
359
+ instructions: | # Custom instructions (optional)
360
+ You are a friendly customer service representative.
361
+ Rewrite the message to be warm and empathetic.
362
+ Always thank the customer for their patience.
363
+
364
+ outcomes:
365
+ - id: success
366
+ type: success
367
+ message: "Return approved for order {{order_id}}. Reason: {{return_reason}}."
368
+
369
+ - id: technical_error
370
+ type: failure
371
+ humanize: false # Disable humanization for this specific outcome
372
+ message: "Error code: {{error_code}}. Contact support."
373
+ ```
374
+
375
+ ### Example Transformation
376
+
377
+ | Template Message | Humanized Response |
378
+ |-----------------|-------------------|
379
+ | `"Return approved for order 1234. Reason: damaged item."` | `"Great news! I've approved the return for your order #1234. I completely understand about the damaged item - that's so frustrating. You'll receive an email shortly with return instructions. Is there anything else I can help you with?"` |
380
+
381
+ ### Disabling Humanization
382
+
383
+ **Globally** (for entire workflow):
384
+ ```yaml
385
+ humanization_agent:
386
+ enabled: false
387
+ ```
388
+
389
+ **Per-outcome**:
390
+ ```yaml
391
+ outcomes:
392
+ - id: error_code
393
+ type: failure
394
+ humanize: false # Keep exact message for debugging/logging
395
+ message: "Error: {{error_code}}"
396
+ ```
397
+
398
+ ### Model Configuration
399
+
400
+ The humanization agent inherits the workflow's runtime `model_config`. You can override specific settings:
401
+
402
+ ```python
403
+ config = {
404
+ "model_config": {
405
+ "model_name": "gpt-4o-mini", # Base model for all agents
406
+ "api_key": os.getenv("OPENAI_API_KEY"),
407
+ }
408
+ }
409
+
410
+ # In YAML, humanization_agent.model overrides model_name for humanization only
411
+ ```
412
+
413
+ ## Per-Turn Localization
414
+
415
+ The framework supports per-turn localization, allowing dynamic language and script switching during workflow execution. Each call to `execute()` can specify a different target language/script.
416
+
417
+ ### How It Works
418
+
419
+ 1. **Per-turn parameters**: Pass `target_language` and `target_script` to `execute()`
420
+ 2. **Instruction injection**: Localization instructions are prepended to agent system prompts
421
+ 3. **No extra LLM calls**: The same agent that generates the response handles localization
422
+
423
+ ### Usage
424
+
425
+ **Per-turn language switching:**
426
+ ```python
427
+ from soprano_sdk import WorkflowTool
428
+
429
+ tool = WorkflowTool(
430
+ yaml_path="return_workflow.yaml",
431
+ name="return_processor",
432
+ description="Process returns",
433
+ checkpointer=checkpointer,
434
+ config=config
435
+ )
436
+
437
+ # Turn 1: English (no localization)
438
+ result = tool.execute(thread_id="123", user_message="hi")
439
+
440
+ # Turn 2: Switch to Tamil
441
+ result = tool.execute(
442
+ thread_id="123",
443
+ user_message="my order id is 1234",
444
+ target_language="Tamil",
445
+ target_script="Tamil"
446
+ )
447
+
448
+ # Turn 3: Back to English (no localization params)
449
+ result = tool.execute(thread_id="123", user_message="yes")
450
+ ```
451
+
452
+ ### YAML Defaults (Optional)
453
+
454
+ You can set default localization in the workflow YAML. These are used when `target_language`/`target_script` are not passed to `execute()`:
455
+
456
+ ```yaml
457
+ name: "Return Workflow"
458
+ version: "1.0"
459
+
460
+ localization:
461
+ language: "Tamil"
462
+ script: "Tamil"
463
+ instructions: | # Optional: custom instructions
464
+ Use formal Tamil suitable for customer service.
465
+ Always be polite and respectful.
466
+
467
+ # ... rest of workflow
468
+ ```
469
+
470
+ ### Key Points
471
+
472
+ - **Localization affects**: Data collector prompts, follow-up responses, and humanized outcome messages
473
+ - **Outcome messages require humanization**: If `humanize: false`, outcome messages stay in English (template output)
474
+ - **Per-turn override**: Runtime parameters always override YAML defaults
475
+
335
476
  ## Examples
336
477
 
337
478
  See the `examples/` directory for complete workflow examples:
@@ -477,6 +618,8 @@ Contributions are welcome! Please open an issue or submit a pull request.
477
618
  - ✅ Thread ID strategies and examples
478
619
  - ✅ Follow-up node for conversational Q&A
479
620
  - ✅ Out-of-scope detection for multi-workflow routing
621
+ - ✅ Outcome humanization with LLM
622
+ - ✅ Per-turn localization for multi-language support
480
623
  - Additional action types (webhook, conditional branching, parallel execution)
481
624
  - More workflow examples (customer onboarding, support ticketing, approval flows)
482
625
  - Workflow testing utilities
@@ -2,6 +2,13 @@ name: "Return Processing Workflow"
2
2
  description: "AI-powered workflow for processing customer returns"
3
3
  version: "1.0"
4
4
 
5
+ # LLM-powered humanization for outcome messages
6
+ humanization_agent:
7
+ instructions: |
8
+ You are a friendly customer service representative. Rewrite the message to be warm,
9
+ empathetic, and conversational while keeping all the important details.
10
+ Match the tone of the conversation history.
11
+
5
12
  data:
6
13
  - name: order_id
7
14
  type: text
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "soprano-sdk"
7
- version = "0.2.20"
7
+ version = "0.2.22"
8
8
  description = "YAML-driven workflow engine with AI agent integration for building conversational SOPs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.12"
@@ -17,6 +17,8 @@ class WorkflowKeys:
17
17
  NODE_FIELD_MAP = '_node_field_map'
18
18
  COMPUTED_FIELDS = '_computed_fields'
19
19
  ERROR = 'error'
20
+ TARGET_LANGUAGE = '_target_language'
21
+ TARGET_SCRIPT = '_target_script'
20
22
 
21
23
 
22
24
  class ActionType(Enum):
@@ -70,6 +72,47 @@ DEFAULT_TIMEOUT = 300
70
72
  MAX_ATTEMPTS_MESSAGE = "I'm having trouble understanding your {field}. Please contact customer service for assistance."
71
73
  WORKFLOW_COMPLETE_MESSAGE = "Workflow completed."
72
74
 
75
+ # Humanization defaults
76
+ DEFAULT_HUMANIZATION_ENABLED = True
77
+ DEFAULT_HUMANIZATION_SYSTEM_PROMPT = """You are a helpful assistant that transforms template-based messages into natural, conversational responses.
78
+
79
+ Your task:
80
+ 1. Take the reference message provided and rewrite it naturally
81
+ 2. Maintain ALL factual information and important details from the reference
82
+ 3. Use the conversation history for context and tone matching
83
+ 4. Be warm, professional, and helpful
84
+ 5. Keep the response concise but complete
85
+
86
+ Reference message to humanize:
87
+ {reference_message}
88
+
89
+ Respond with ONLY the humanized message. Do not add explanations or meta-commentary."""
90
+
91
+
92
+ class HumanizationKeys:
93
+ """Keys for humanization agent configuration"""
94
+ ENABLED = 'enabled'
95
+ MODEL = 'model'
96
+ BASE_URL = 'base_url'
97
+ INSTRUCTIONS = 'instructions'
98
+
99
+
100
+ # Localization defaults
101
+ DEFAULT_LOCALIZATION_INSTRUCTIONS = """LANGUAGE REQUIREMENT:
102
+ You MUST respond in {language} using {script} script.
103
+ - All your responses must be in {language}
104
+ - Use the {script} writing system
105
+ - Maintain the same meaning and tone as you would in English
106
+ - Do not mix languages unless quoting the user
107
+ """
108
+
109
+
110
+ class LocalizationKeys:
111
+ """Keys for localization configuration"""
112
+ LANGUAGE = 'language'
113
+ SCRIPT = 'script'
114
+ INSTRUCTIONS = 'instructions'
115
+
73
116
 
74
117
  class MFAConfig(BaseSettings):
75
118
  """
@@ -1,4 +1,4 @@
1
- from typing import Optional, Dict, Any, Tuple
1
+ from typing import Optional, Dict, Any, Tuple, List
2
2
 
3
3
  import yaml
4
4
  from jinja2 import Environment
@@ -7,7 +7,16 @@ from langgraph.constants import START
7
7
  from langgraph.graph import StateGraph
8
8
  from langgraph.graph.state import CompiledStateGraph
9
9
 
10
- from .constants import WorkflowKeys, MFAConfig
10
+ from .constants import (
11
+ WorkflowKeys,
12
+ MFAConfig,
13
+ DEFAULT_HUMANIZATION_ENABLED,
14
+ DEFAULT_HUMANIZATION_SYSTEM_PROMPT,
15
+ HumanizationKeys,
16
+ DEFAULT_LOCALIZATION_INSTRUCTIONS,
17
+ LocalizationKeys
18
+ )
19
+ from ..agents.factory import AgentFactory
11
20
  from .state import create_state_model
12
21
  from ..nodes.factory import NodeFactory
13
22
  from ..routing.router import WorkflowRouter
@@ -148,6 +157,167 @@ class WorkflowEngine:
148
157
  except Exception as e:
149
158
  raise RuntimeError(f"Failed to build workflow graph: {e}")
150
159
 
160
+ def _aggregate_conversation_history(self, state: Dict[str, Any]) -> List[Dict[str, str]]:
161
+ """Aggregate all conversations from collector nodes in execution order."""
162
+ conversations = state.get(WorkflowKeys.CONVERSATIONS, {})
163
+ node_order = state.get(WorkflowKeys.NODE_EXECUTION_ORDER, [])
164
+ node_field_map = state.get(WorkflowKeys.NODE_FIELD_MAP, {})
165
+
166
+ aggregated = []
167
+ for node_id in node_order:
168
+ field = node_field_map.get(node_id)
169
+ if field:
170
+ conv_key = f"{field}_conversation"
171
+ if conv_messages := conversations.get(conv_key):
172
+ aggregated.extend(conv_messages)
173
+
174
+ return aggregated
175
+
176
+ def _get_humanization_config(self) -> Dict[str, Any]:
177
+ """Get humanization agent configuration from workflow config."""
178
+ return self.config.get('humanization_agent', {})
179
+
180
+ def _should_humanize_outcome(self, outcome: Dict[str, Any]) -> bool:
181
+ """Determine if an outcome should be humanized."""
182
+ # Check workflow-level setting (default: enabled)
183
+ workflow_humanization = self._get_humanization_config()
184
+ workflow_enabled = workflow_humanization.get(
185
+ HumanizationKeys.ENABLED,
186
+ DEFAULT_HUMANIZATION_ENABLED
187
+ )
188
+
189
+ if not workflow_enabled:
190
+ return False
191
+
192
+ # Check per-outcome setting (default: True, inherit from workflow)
193
+ return outcome.get('humanize', True)
194
+
195
+ def _get_humanization_model_config(self) -> Optional[Dict[str, Any]]:
196
+ """Get model config for humanization, with overrides applied."""
197
+ model_config = self.get_config_value('model_config')
198
+ if not model_config:
199
+ return None
200
+
201
+ humanization_config = self._get_humanization_config()
202
+ model_config = model_config.copy() # Don't mutate original
203
+
204
+ # Apply overrides from humanization_agent config
205
+ if model := humanization_config.get(HumanizationKeys.MODEL):
206
+ model_config['model_name'] = model
207
+ if base_url := humanization_config.get(HumanizationKeys.BASE_URL):
208
+ model_config['base_url'] = base_url
209
+
210
+ return model_config
211
+
212
+ def _get_localization_config(self) -> Dict[str, Any]:
213
+ """Get localization configuration from workflow config."""
214
+ return self.config.get('localization', {})
215
+
216
+ def get_localization_instructions(self, state: Dict[str, Any]) -> str:
217
+ """Get localization instructions based on state (per-turn) or YAML defaults.
218
+
219
+ Args:
220
+ state: Current workflow state containing per-turn language/script values
221
+
222
+ Returns:
223
+ Localization instructions string to prepend to agent prompts, or empty string if no localization
224
+ """
225
+ # First check state for per-turn values
226
+ language = state.get(WorkflowKeys.TARGET_LANGUAGE)
227
+ script = state.get(WorkflowKeys.TARGET_SCRIPT)
228
+
229
+ # Fall back to YAML defaults if not in state
230
+ yaml_config = self._get_localization_config()
231
+ if not language:
232
+ language = yaml_config.get(LocalizationKeys.LANGUAGE)
233
+ if not script:
234
+ script = yaml_config.get(LocalizationKeys.SCRIPT)
235
+
236
+ # No localization if neither specified
237
+ if not language and not script:
238
+ return ""
239
+
240
+ # Use custom instructions if provided in YAML
241
+ custom_instructions = yaml_config.get(LocalizationKeys.INSTRUCTIONS)
242
+ if custom_instructions:
243
+ return custom_instructions.format(
244
+ language=language or "the target language",
245
+ script=script or "the appropriate script"
246
+ )
247
+
248
+ return DEFAULT_LOCALIZATION_INSTRUCTIONS.format(
249
+ language=language or "the target language",
250
+ script=script or "the appropriate script"
251
+ )
252
+
253
+ def _humanize_message(self, reference_message: str, state: Dict[str, Any]) -> str:
254
+ """Use LLM to humanize the reference message using conversation context."""
255
+ try:
256
+ model_config = self._get_humanization_model_config()
257
+ if not model_config:
258
+ logger.warning("No model_config found, skipping humanization")
259
+ return reference_message
260
+
261
+ humanization_config = self._get_humanization_config()
262
+
263
+ # Build system prompt
264
+ custom_instructions = humanization_config.get(HumanizationKeys.INSTRUCTIONS)
265
+ if custom_instructions:
266
+ system_prompt = f"{custom_instructions}\n\nReference message to humanize:\n{reference_message}"
267
+ else:
268
+ system_prompt = DEFAULT_HUMANIZATION_SYSTEM_PROMPT.format(
269
+ reference_message=reference_message
270
+ )
271
+
272
+ # Inject localization instructions if specified
273
+ localization_instructions = self.get_localization_instructions(state)
274
+ if localization_instructions:
275
+ system_prompt = f"{localization_instructions}\n\n{system_prompt}"
276
+
277
+ # Aggregate conversation history
278
+ conversation_history = self._aggregate_conversation_history(state)
279
+
280
+ # Create agent for humanization
281
+ framework = self.get_config_value('agent_framework', 'langgraph')
282
+ agent = AgentFactory.create_agent(
283
+ framework=framework,
284
+ name="HumanizationAgent",
285
+ model_config=model_config,
286
+ tools=[],
287
+ system_prompt=system_prompt,
288
+ structured_output_model=None
289
+ )
290
+
291
+ # Invoke agent with conversation history
292
+ if conversation_history:
293
+ messages = conversation_history + [
294
+ {"role": "user", "content": "Please humanize the reference message based on our conversation."}
295
+ ]
296
+ else:
297
+ messages = [
298
+ {"role": "user", "content": "Please humanize the reference message."}
299
+ ]
300
+
301
+ humanized_response = agent.invoke(messages)
302
+
303
+ # Handle different response types
304
+ if isinstance(humanized_response, dict):
305
+ humanized_message = humanized_response.get('content', str(humanized_response))
306
+ else:
307
+ humanized_message = str(humanized_response)
308
+
309
+ # Validate we got a meaningful response
310
+ if not humanized_message or humanized_message.strip() == '':
311
+ logger.warning("Humanization returned empty response, using original message")
312
+ return reference_message
313
+
314
+ logger.info(f"Message humanized successfully: {humanized_message[:100]}...")
315
+ return humanized_message
316
+
317
+ except Exception as e:
318
+ logger.warning(f"Humanization failed, using original message: {e}")
319
+ return reference_message
320
+
151
321
  def get_outcome_message(self, state: Dict[str, Any]) -> str:
152
322
  outcome_id = state.get(WorkflowKeys.OUTCOME_ID)
153
323
  step_id = state.get(WorkflowKeys.STEP_ID)
@@ -156,9 +326,16 @@ class WorkflowEngine:
156
326
  if outcome and 'message' in outcome:
157
327
  message = outcome['message']
158
328
  template_loader = self.get_config_value("template_loader", Environment())
159
- message = template_loader.from_string(message).render(state)
160
- logger.info(f"Outcome message generated in step {step_id}: {message}")
161
- return message
329
+ rendered_message = template_loader.from_string(message).render(state)
330
+
331
+ # Apply humanization if enabled for this outcome
332
+ if self._should_humanize_outcome(outcome):
333
+ final_message = self._humanize_message(rendered_message, state)
334
+ else:
335
+ final_message = rendered_message
336
+
337
+ logger.info(f"Outcome message generated in step {step_id}: {final_message}")
338
+ return final_message
162
339
 
163
340
  if error := state.get("error"):
164
341
  logger.info(f"Outcome error found in step {step_id}: {error}")
@@ -122,8 +122,8 @@ class CollectInputStrategy(ActionStrategy):
122
122
  self.next_step = self.step_config.get("next", None)
123
123
  self.is_structured_output = self.agent_config.get("structured_output", {}).get("enabled", False)
124
124
 
125
- # Out-of-scope detection configuration (enabled by default)
126
- self.enable_out_of_scope = self.agent_config.get("detect_out_of_scope", True)
125
+ # Out-of-scope detection configuration (disabled by default)
126
+ self.enable_out_of_scope = self.agent_config.get("detect_out_of_scope", False)
127
127
  self.scope_description = self.agent_config.get(
128
128
  "scope_description",
129
129
  self.agent_config.get("description", f"collecting {self.field}")
@@ -348,6 +348,11 @@ class CollectInputStrategy(ActionStrategy):
348
348
 
349
349
  instructions = self._render_template_string(instructions, state)
350
350
 
351
+ # Inject localization instructions at the start (per-turn)
352
+ localization_instructions = self.engine_context.get_localization_instructions(state)
353
+ if localization_instructions:
354
+ instructions = f"{localization_instructions}\n\n{instructions}"
355
+
351
356
  if collector_nodes:
352
357
  collector_nodes_for_intent_change = {
353
358
  node_id: node_desc for node_id, node_desc in collector_nodes.items()
@@ -103,7 +103,7 @@ class FollowUpStrategy(ActionStrategy):
103
103
  'closure_patterns',
104
104
  DEFAULT_CLOSURE_PATTERNS
105
105
  )
106
- self.enable_out_of_scope = self.agent_config.get('detect_out_of_scope', True)
106
+ self.enable_out_of_scope = self.agent_config.get('detect_out_of_scope', False)
107
107
  self.scope_description = self.agent_config.get(
108
108
  'scope_description',
109
109
  self.agent_config.get('description', 'answering follow-up questions')
@@ -318,6 +318,11 @@ class FollowUpStrategy(ActionStrategy):
318
318
  scope_description=self.scope_description
319
319
  )
320
320
 
321
+ # Inject localization instructions at the start (per-turn)
322
+ localization_instructions = self.engine_context.get_localization_instructions(state)
323
+ if localization_instructions:
324
+ instructions = f"{localization_instructions}\n\n{instructions}"
325
+
321
326
  framework = self.engine_context.get_config_value('agent_framework', 'langgraph')
322
327
 
323
328
  return AgentFactory.create_agent(