soprano-sdk 0.2.11__tar.gz → 0.2.13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/.gitignore +4 -1
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/PKG-INFO +2 -1
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/payment_async_functions.py +18 -18
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/test_payment_async.py +47 -22
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/pyproject.toml +2 -1
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/agents/adaptor.py +3 -3
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/core/engine.py +11 -1
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/collect_input.py +4 -1
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/tools.py +63 -4
- soprano_sdk-0.2.13/tests/test_adaptor_logging.py +173 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_collect_input_refactor.py +36 -4
- soprano_sdk-0.2.13/tests/test_workflow_tool_context_update.py +304 -0
- soprano_sdk-0.2.11/examples/ASYNC_FUNCTIONS_README.md +0 -414
- soprano_sdk-0.2.11/uv.lock +0 -5163
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/.github/workflows/test_build_and_publish.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/.python-version +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/CLAUDE.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/LICENSE +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/README.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/concert_booking/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/concert_booking/booking_helpers.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/concert_booking/concert_ticket_booking.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/framework_example.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/greeting_functions.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/greeting_workflow.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/main.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/payment_async_workflow.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/persistence/README.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/persistence/conversation_based.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/persistence/entity_based.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/persistence/mongodb_demo.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/return_functions.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/return_workflow.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/structured_output_example.yaml +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/README.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/crewai_supervisor_ui.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/langgraph_supervisor_ui.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/tools/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/tools/crewai_tools.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/tools/langgraph_tools.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/supervisors/workflow_tools.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/tools/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/tools/address.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/examples/validator.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/langgraph_demo.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/langgraph_selfloop_demo.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/langgraph_v.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/main.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/return_fsm.excalidraw +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/return_state_machine.png +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/legacy/ui.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/scripts/visualize_workflow.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/scripts/workflow_demo.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/scripts/workflow_demo_ui.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/agents/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/agents/factory.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/agents/structured_output.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/authenticators/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/authenticators/mfa.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/core/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/core/constants.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/core/rollback_strategies.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/core/state.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/engine.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/async_function.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/base.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/call_function.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/nodes/factory.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/routing/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/routing/router.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/function.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/logger.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/template.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/tool.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/utils/tracing.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/validation/__init__.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/validation/schema.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/soprano_sdk/validation/validator.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/debug_jinja2.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_agent_factory.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_async_function.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_external_values.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_inputs_validation.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_jinja2_path.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_jinja2_standalone.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_mfa_scenarios.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_persistence.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_structured_output.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/tests/test_transition_routing.py +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/todo.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/.eslintrc.cjs +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/.gitignore +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/README.md +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/index.html +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/package-lock.json +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/package.json +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/App.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/CustomNode.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/StepDetailsModal.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/WorkflowGraph.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/WorkflowInfoPanel.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/assets/react.svg +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/src/main.jsx +0 -0
- {soprano_sdk-0.2.11 → soprano_sdk-0.2.13}/workflow-visualizer/vite.config.js +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: soprano-sdk
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.13
|
|
4
4
|
Summary: YAML-driven workflow engine with AI agent integration for building conversational SOPs
|
|
5
5
|
Author: Arvind Thangamani
|
|
6
6
|
License: MIT
|
|
@@ -29,6 +29,7 @@ Requires-Dist: pyyaml>=6.0
|
|
|
29
29
|
Provides-Extra: dev
|
|
30
30
|
Requires-Dist: gradio>=5.46.0; extra == 'dev'
|
|
31
31
|
Requires-Dist: pytest>=7.0.0; extra == 'dev'
|
|
32
|
+
Requires-Dist: ruff==0.14.13; extra == 'dev'
|
|
32
33
|
Provides-Extra: persistence
|
|
33
34
|
Requires-Dist: langgraph-checkpoint-mongodb>=0.2.0; extra == 'persistence'
|
|
34
35
|
Requires-Dist: pymongo>=4.0.0; extra == 'persistence'
|
|
@@ -67,13 +67,13 @@ def verify_payment(state: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
67
67
|
job_id = f"pay_verify_{uuid.uuid4().hex[:8]}"
|
|
68
68
|
|
|
69
69
|
print(f"\n{'='*60}")
|
|
70
|
-
print(
|
|
70
|
+
print("PAYMENT VERIFICATION - PHASE 1 (Initial Call)")
|
|
71
71
|
print(f"{'='*60}")
|
|
72
72
|
print(f"Job ID: {job_id}")
|
|
73
73
|
print(f"Amount: ${payment_amount}")
|
|
74
74
|
print(f"Method: {payment_method}")
|
|
75
|
-
print(
|
|
76
|
-
print(
|
|
75
|
+
print("Action: Initiating async verification...")
|
|
76
|
+
print("\nReturning PENDING status - workflow will interrupt")
|
|
77
77
|
print(f"{'='*60}\n")
|
|
78
78
|
|
|
79
79
|
# In a real system, you would:
|
|
@@ -204,18 +204,18 @@ def check_fraud(state: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
204
204
|
detection_id = f"fraud_check_{uuid.uuid4().hex[:8]}"
|
|
205
205
|
|
|
206
206
|
print(f"\n{'='*60}")
|
|
207
|
-
print(
|
|
207
|
+
print("FRAUD DETECTION - PHASE 1 (Initial Call)")
|
|
208
208
|
print(f"{'='*60}")
|
|
209
209
|
print(f"Detection ID: {detection_id}")
|
|
210
210
|
print(f"Amount: ${payment_amount}")
|
|
211
211
|
print(f"Verification ID: {verification_result.get('verification_id', 'N/A')}")
|
|
212
|
-
print(
|
|
213
|
-
print(
|
|
214
|
-
print(
|
|
215
|
-
print(
|
|
216
|
-
print(
|
|
217
|
-
print(
|
|
218
|
-
print(
|
|
212
|
+
print("Action: Initiating async fraud detection...")
|
|
213
|
+
print("\nChecks to run:")
|
|
214
|
+
print(" - IP Geolocation analysis")
|
|
215
|
+
print(" - Velocity checking")
|
|
216
|
+
print(" - Device fingerprinting")
|
|
217
|
+
print(" - Behavioral analysis")
|
|
218
|
+
print("\nReturning PENDING status - workflow will interrupt")
|
|
219
219
|
print(f"{'='*60}\n")
|
|
220
220
|
|
|
221
221
|
# In a real system:
|
|
@@ -305,14 +305,14 @@ def print_resume_instructions(pending_result: Dict[str, Any], function_name: str
|
|
|
305
305
|
print(f"\n{'='*60}")
|
|
306
306
|
print(f"WORKFLOW INTERRUPTED - Waiting for {function_name}")
|
|
307
307
|
print(f"{'='*60}")
|
|
308
|
-
print(
|
|
309
|
-
print(
|
|
308
|
+
print("\nTo resume this workflow, the external system should:")
|
|
309
|
+
print("\n1. Complete the async operation")
|
|
310
310
|
print(f"2. Call the webhook: {pending_result.get('callback_url')}")
|
|
311
|
-
print(
|
|
312
|
-
print(
|
|
313
|
-
print(
|
|
314
|
-
print(
|
|
315
|
-
print(
|
|
311
|
+
print("3. The webhook handler should call:")
|
|
312
|
+
print("\n graph.update_state(config, Command(resume=result))")
|
|
313
|
+
print("\nwhere 'result' is a dict with:")
|
|
314
|
+
print(" - All data needed for routing (check transitions in YAML)")
|
|
315
|
+
print(" - Additional metadata for the workflow")
|
|
316
316
|
print(f"\n{'='*60}\n")
|
|
317
317
|
|
|
318
318
|
|
|
@@ -54,7 +54,7 @@ def test_check_pending_status():
|
|
|
54
54
|
print(f"✓ Has webhook_url? {has_webhook}")
|
|
55
55
|
|
|
56
56
|
# CHECK 3: Access the pending metadata
|
|
57
|
-
print(
|
|
57
|
+
print("\nPending Metadata:")
|
|
58
58
|
print(f" Job ID: {result.get('job_id')}")
|
|
59
59
|
print(f" Webhook: {result.get('webhook_url')}")
|
|
60
60
|
print(f" Estimated time: {result.get('estimated_time')}")
|
|
@@ -98,15 +98,24 @@ def test_workflow_pending_state():
|
|
|
98
98
|
"payment_method": "credit_card"
|
|
99
99
|
}
|
|
100
100
|
|
|
101
|
-
# Mock interrupt to capture the call
|
|
101
|
+
# Mock interrupt to capture the call and simulate interrupt behavior
|
|
102
102
|
captured_interrupt_data = None
|
|
103
|
+
class WorkflowInterrupt(Exception):
|
|
104
|
+
"""Simulates workflow interruption"""
|
|
105
|
+
pass
|
|
106
|
+
|
|
103
107
|
def capture_interrupt(value):
|
|
104
108
|
nonlocal captured_interrupt_data
|
|
105
109
|
captured_interrupt_data = value
|
|
106
|
-
|
|
110
|
+
# Simulate actual interrupt by raising an exception
|
|
111
|
+
raise WorkflowInterrupt()
|
|
107
112
|
|
|
108
|
-
|
|
109
|
-
|
|
113
|
+
try:
|
|
114
|
+
with patch('soprano_sdk.nodes.async_function.interrupt', side_effect=capture_interrupt):
|
|
115
|
+
strategy.execute(state)
|
|
116
|
+
except WorkflowInterrupt:
|
|
117
|
+
# Expected - workflow was interrupted
|
|
118
|
+
pass
|
|
110
119
|
|
|
111
120
|
# CHECK 1: Workflow status is set to pending
|
|
112
121
|
workflow_status = state.get(WorkflowKeys.STATUS)
|
|
@@ -125,13 +134,13 @@ def test_workflow_pending_state():
|
|
|
125
134
|
|
|
126
135
|
# CHECK 4: Access stored pending metadata
|
|
127
136
|
pending_metadata = state[pending_key]
|
|
128
|
-
print(
|
|
137
|
+
print("\nStored Pending Metadata:")
|
|
129
138
|
print(f" Status: {pending_metadata.get('status')}")
|
|
130
139
|
print(f" Job ID: {pending_metadata.get('job_id')}")
|
|
131
140
|
print(f" Amount: ${pending_metadata.get('payment_amount')}")
|
|
132
141
|
|
|
133
142
|
# CHECK 5: Verify interrupt was called correctly
|
|
134
|
-
print(
|
|
143
|
+
print("\nInterrupt Call Data:")
|
|
135
144
|
print(f" Type: {captured_interrupt_data['type']}")
|
|
136
145
|
print(f" Step ID: {captured_interrupt_data['step_id']}")
|
|
137
146
|
print(f" Pending data: {captured_interrupt_data['pending'].get('job_id')}")
|
|
@@ -141,7 +150,7 @@ def test_workflow_pending_state():
|
|
|
141
150
|
|
|
142
151
|
# CHECK 6: Output field NOT set yet (waiting for resume)
|
|
143
152
|
assert "verification_result" not in state
|
|
144
|
-
print(
|
|
153
|
+
print("✓ Output field 'verification_result' not set (waiting for resume)")
|
|
145
154
|
|
|
146
155
|
print("\n✅ Test passed: Workflow is in pending state")
|
|
147
156
|
|
|
@@ -184,15 +193,23 @@ def test_full_execution_two_phase():
|
|
|
184
193
|
}
|
|
185
194
|
|
|
186
195
|
pending_result = None
|
|
196
|
+
class WorkflowInterrupt(Exception):
|
|
197
|
+
"""Simulates workflow interruption"""
|
|
198
|
+
pass
|
|
199
|
+
|
|
187
200
|
def capture_pending(value):
|
|
188
201
|
nonlocal pending_result
|
|
189
202
|
pending_result = value
|
|
190
|
-
|
|
203
|
+
raise WorkflowInterrupt()
|
|
191
204
|
|
|
192
|
-
|
|
193
|
-
|
|
205
|
+
try:
|
|
206
|
+
with patch('soprano_sdk.nodes.async_function.interrupt', side_effect=capture_pending):
|
|
207
|
+
strategy.execute(state_phase1)
|
|
208
|
+
except WorkflowInterrupt:
|
|
209
|
+
# Expected - workflow was interrupted
|
|
210
|
+
pass
|
|
194
211
|
|
|
195
|
-
print(
|
|
212
|
+
print("✓ Workflow interrupted with pending status")
|
|
196
213
|
print(f"✓ Job ID: {pending_result['pending']['job_id']}")
|
|
197
214
|
print(f"✓ Workflow status: {state_phase1[WorkflowKeys.STATUS]}")
|
|
198
215
|
|
|
@@ -208,7 +225,7 @@ def test_full_execution_two_phase():
|
|
|
208
225
|
job_id = pending_result['pending']['job_id']
|
|
209
226
|
async_result = simulate_payment_verification_callback(job_id, success=True)
|
|
210
227
|
|
|
211
|
-
print(
|
|
228
|
+
print("✓ Payment gateway completed verification")
|
|
212
229
|
print(f"✓ Verification ID: {async_result['verification_id']}")
|
|
213
230
|
print(f"✓ Status: {async_result['status']}")
|
|
214
231
|
|
|
@@ -226,7 +243,7 @@ def test_full_execution_two_phase():
|
|
|
226
243
|
with patch('soprano_sdk.nodes.async_function.interrupt', return_value=async_result):
|
|
227
244
|
final_state = strategy.execute(state_phase2)
|
|
228
245
|
|
|
229
|
-
print(
|
|
246
|
+
print("✓ Workflow resumed successfully")
|
|
230
247
|
print(f"✓ Final status: {final_state[WorkflowKeys.STATUS]}")
|
|
231
248
|
|
|
232
249
|
# Verify Phase 2 completion
|
|
@@ -235,7 +252,7 @@ def test_full_execution_two_phase():
|
|
|
235
252
|
assert final_state[WorkflowKeys.STATUS] == "verify_payment_success"
|
|
236
253
|
assert "_async_pending_verify_payment" not in state_phase2
|
|
237
254
|
|
|
238
|
-
print(
|
|
255
|
+
print("\nFinal Verification Result:")
|
|
239
256
|
print(f" Verification ID: {state_phase2['verification_result']['verification_id']}")
|
|
240
257
|
print(f" Transaction ID: {state_phase2['verification_result']['transaction_id']}")
|
|
241
258
|
print(f" Amount: ${state_phase2['verification_result']['amount_verified']}")
|
|
@@ -278,19 +295,27 @@ def test_accessing_interrupt_and_resume_data():
|
|
|
278
295
|
}
|
|
279
296
|
|
|
280
297
|
interrupt_data = None
|
|
298
|
+
class WorkflowInterrupt(Exception):
|
|
299
|
+
"""Simulates workflow interruption"""
|
|
300
|
+
pass
|
|
301
|
+
|
|
281
302
|
def capture_interrupt_data(value):
|
|
282
303
|
nonlocal interrupt_data
|
|
283
304
|
interrupt_data = value
|
|
284
|
-
|
|
305
|
+
raise WorkflowInterrupt()
|
|
285
306
|
|
|
286
|
-
|
|
287
|
-
|
|
307
|
+
try:
|
|
308
|
+
with patch('soprano_sdk.nodes.async_function.interrupt', side_effect=capture_interrupt_data):
|
|
309
|
+
strategy.execute(state)
|
|
310
|
+
except WorkflowInterrupt:
|
|
311
|
+
# Expected - workflow was interrupted
|
|
312
|
+
pass
|
|
288
313
|
|
|
289
314
|
# ACCESS THE THREE INTERRUPT VALUES
|
|
290
315
|
print("\n--- Accessing Interrupt Call Data ---")
|
|
291
316
|
print(f"✓ Type: {interrupt_data['type']}")
|
|
292
317
|
print(f"✓ Step ID: {interrupt_data['step_id']}")
|
|
293
|
-
print(
|
|
318
|
+
print("✓ Pending metadata:")
|
|
294
319
|
for key, value in interrupt_data['pending'].items():
|
|
295
320
|
print(f" {key}: {value}")
|
|
296
321
|
|
|
@@ -380,15 +405,15 @@ def test_sync_completion_no_pending():
|
|
|
380
405
|
assert not strategy._is_async_pending(state)
|
|
381
406
|
|
|
382
407
|
# CHECK: Result immediately available
|
|
383
|
-
print(
|
|
408
|
+
print("✓ Result available immediately")
|
|
384
409
|
assert "verification_result" in state
|
|
385
410
|
assert state["verification_result"]["status"] == "verified"
|
|
386
411
|
|
|
387
412
|
# CHECK: No pending metadata
|
|
388
413
|
assert "_async_pending_verify_sync" not in state
|
|
389
|
-
print(
|
|
414
|
+
print("✓ No pending metadata stored")
|
|
390
415
|
|
|
391
|
-
print(
|
|
416
|
+
print("\nImmediate Result:")
|
|
392
417
|
print(f" Status: {state['verification_result']['status']}")
|
|
393
418
|
print(f" Verification ID: {state['verification_result']['verification_id']}")
|
|
394
419
|
print(f" Amount: ${state['verification_result']['amount_verified']}")
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "soprano-sdk"
|
|
7
|
-
version = "0.2.
|
|
7
|
+
version = "0.2.13"
|
|
8
8
|
description = "YAML-driven workflow engine with AI agent integration for building conversational SOPs"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.12"
|
|
@@ -41,6 +41,7 @@ dependencies = [
|
|
|
41
41
|
dev = [
|
|
42
42
|
"gradio>=5.46.0",
|
|
43
43
|
"pytest>=7.0.0",
|
|
44
|
+
"ruff==0.14.13"
|
|
44
45
|
]
|
|
45
46
|
persistence = [
|
|
46
47
|
"langgraph-checkpoint-mongodb>=0.2.0",
|
|
@@ -20,7 +20,7 @@ class LangGraphAgentAdapter(AgentAdapter):
|
|
|
20
20
|
self.agent = agent
|
|
21
21
|
|
|
22
22
|
def invoke(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
|
23
|
-
logger.info("Invoking
|
|
23
|
+
logger.info("Invoking LangGraphAgentAdapter agent with messages")
|
|
24
24
|
response = self.agent.invoke({"messages": messages})
|
|
25
25
|
|
|
26
26
|
if structured_response := response.get('structured_response'):
|
|
@@ -44,7 +44,7 @@ class CrewAIAgentAdapter(AgentAdapter):
|
|
|
44
44
|
|
|
45
45
|
def invoke(self, messages: List[Dict[str, str]]) -> Any:
|
|
46
46
|
try:
|
|
47
|
-
logger.info("Invoking
|
|
47
|
+
logger.info("Invoking CrewAIAgentAdapter agent with messages")
|
|
48
48
|
result = self.agent.kickoff(messages, response_format=self.output_schema)
|
|
49
49
|
|
|
50
50
|
if structured_response := getattr(result, 'pydantic', None) :
|
|
@@ -65,7 +65,7 @@ class AgnoAgentAdapter(AgentAdapter):
|
|
|
65
65
|
|
|
66
66
|
def invoke(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
|
67
67
|
try:
|
|
68
|
-
logger.info("Invoking
|
|
68
|
+
logger.info("Invoking AgnoAgentAdapter agent with messages")
|
|
69
69
|
response = self.agent.run(messages)
|
|
70
70
|
agent_response = response.content if hasattr(response, 'content') else str(response)
|
|
71
71
|
|
|
@@ -56,7 +56,8 @@ class WorkflowEngine:
|
|
|
56
56
|
|
|
57
57
|
logger.info(
|
|
58
58
|
f"Workflow loaded: {self.workflow_name} v{self.workflow_version} "
|
|
59
|
-
f"({len(self.steps)} steps, {len(self.outcomes)} outcomes
|
|
59
|
+
f"({len(self.steps)} steps, {len(self.outcomes)} outcomes, "
|
|
60
|
+
f"{len(self.collector_node_field_map)} collector nodes)"
|
|
60
61
|
)
|
|
61
62
|
|
|
62
63
|
except Exception as e:
|
|
@@ -202,6 +203,7 @@ class WorkflowEngine:
|
|
|
202
203
|
def load_steps(self):
|
|
203
204
|
prepared_steps: list = []
|
|
204
205
|
mfa_redirects: Dict[str, str] = {}
|
|
206
|
+
self.collector_node_field_map: Dict[str, str] = {} # Map of node_id -> field
|
|
205
207
|
|
|
206
208
|
for step in self.config['steps']:
|
|
207
209
|
step_id = step['id']
|
|
@@ -228,6 +230,14 @@ class WorkflowEngine:
|
|
|
228
230
|
|
|
229
231
|
prepared_steps.append(step)
|
|
230
232
|
|
|
233
|
+
# Build collector node -> field map
|
|
234
|
+
for step in prepared_steps:
|
|
235
|
+
if step.get('action') == 'collect_input':
|
|
236
|
+
node_id = step.get('id')
|
|
237
|
+
field = step.get('field')
|
|
238
|
+
if node_id and field:
|
|
239
|
+
self.collector_node_field_map[node_id] = field
|
|
240
|
+
|
|
231
241
|
for step in prepared_steps:
|
|
232
242
|
if step['id'] in self.mfa_validator_steps: # MFA Validator
|
|
233
243
|
continue
|
|
@@ -195,6 +195,7 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
195
195
|
context_value = self.engine_context.get_context_value(self.field)
|
|
196
196
|
if context_value is None:
|
|
197
197
|
return
|
|
198
|
+
|
|
198
199
|
logger.info(f"Using context value for '{self.field}': {context_value}")
|
|
199
200
|
state[self.field] = context_value
|
|
200
201
|
span.add_event("context.value_used", {"field": self.field, "value": str(context_value)})
|
|
@@ -375,7 +376,9 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
375
376
|
if self.is_structured_output:
|
|
376
377
|
try:
|
|
377
378
|
response_dict = json.loads(agent_response) if isinstance(agent_response, str) else agent_response
|
|
378
|
-
|
|
379
|
+
bot_response = response_dict.get("bot_response", None)
|
|
380
|
+
# Treat empty or whitespace-only bot_response as None
|
|
381
|
+
prompt = bot_response if (bot_response and bot_response.strip()) else None
|
|
379
382
|
except (json.JSONDecodeError, TypeError, ValueError) as e:
|
|
380
383
|
logger.error(f"Error When Converting Structured Output {agent_response} to JSON {e}")
|
|
381
384
|
prompt = agent_response
|
|
@@ -83,21 +83,35 @@ class WorkflowTool:
|
|
|
83
83
|
callback_handler = CallbackHandler()
|
|
84
84
|
config = {"configurable": {"thread_id": thread_id}, "callbacks": [callback_handler]}
|
|
85
85
|
|
|
86
|
-
self.engine.update_context(initial_context)
|
|
87
|
-
span.add_event("context.updated", {"fields": list(initial_context.keys())})
|
|
88
|
-
|
|
89
86
|
state = self.graph.get_state(config)
|
|
90
87
|
|
|
88
|
+
# Intelligently update context based on workflow state
|
|
91
89
|
if state.next:
|
|
90
|
+
# Workflow is resuming - only update fields that haven't been collected yet
|
|
92
91
|
span.set_attribute("workflow.resumed", True)
|
|
93
92
|
logger.info(f"[WorkflowTool] Resuming interrupted workflow {self.name} (thread: {thread_id})")
|
|
93
|
+
|
|
94
|
+
filtered_context = self._filter_already_collected_fields(state.values, initial_context)
|
|
95
|
+
self.engine.update_context(filtered_context)
|
|
96
|
+
|
|
97
|
+
span.add_event("context.updated", {
|
|
98
|
+
"fields": list(filtered_context.keys()),
|
|
99
|
+
"filtered_out": list(set(initial_context.keys()) - set(filtered_context.keys()))
|
|
100
|
+
})
|
|
101
|
+
|
|
94
102
|
result = self.graph.invoke(
|
|
95
|
-
Command(resume=user_message or "", update=
|
|
103
|
+
Command(resume=user_message or "", update=filtered_context),
|
|
96
104
|
config=config
|
|
97
105
|
)
|
|
106
|
+
|
|
98
107
|
else:
|
|
108
|
+
# Fresh start - update all fields from initial_context
|
|
99
109
|
span.set_attribute("workflow.resumed", False)
|
|
100
110
|
logger.info(f"[WorkflowTool] Starting fresh workflow {self.name} (thread: {thread_id})")
|
|
111
|
+
|
|
112
|
+
self.engine.update_context(initial_context)
|
|
113
|
+
span.add_event("context.updated", {"fields": list(initial_context.keys())})
|
|
114
|
+
|
|
101
115
|
result = self.graph.invoke(initial_context, config=config)
|
|
102
116
|
|
|
103
117
|
final_state = self.graph.get_state(config)
|
|
@@ -124,6 +138,51 @@ class WorkflowTool:
|
|
|
124
138
|
span.set_attribute("workflow.status", "completed")
|
|
125
139
|
return self.engine.get_outcome_message(result)
|
|
126
140
|
|
|
141
|
+
def _filter_already_collected_fields(
|
|
142
|
+
self,
|
|
143
|
+
current_state: Dict[str, Any],
|
|
144
|
+
initial_context: Optional[Dict[str, Any]]
|
|
145
|
+
) -> Dict[str, Any]:
|
|
146
|
+
"""
|
|
147
|
+
Filter initial_context to exclude fields that have already been collected.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
current_state: Current workflow state
|
|
151
|
+
initial_context: Context to filter
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Filtered context with only uncollected fields
|
|
155
|
+
"""
|
|
156
|
+
if not initial_context:
|
|
157
|
+
return {}
|
|
158
|
+
|
|
159
|
+
from .core.constants import WorkflowKeys
|
|
160
|
+
|
|
161
|
+
execution_order = current_state.get(WorkflowKeys.NODE_EXECUTION_ORDER, [])
|
|
162
|
+
|
|
163
|
+
node_to_field_map = self.engine.collector_node_field_map
|
|
164
|
+
|
|
165
|
+
# Determine which fields have already been collected
|
|
166
|
+
collected_fields = set()
|
|
167
|
+
for executed_node_id in execution_order:
|
|
168
|
+
if executed_node_id in node_to_field_map:
|
|
169
|
+
collected_fields.add(node_to_field_map[executed_node_id])
|
|
170
|
+
|
|
171
|
+
# Filter initial_context to exclude already-collected fields
|
|
172
|
+
filtered_context = {
|
|
173
|
+
field: value
|
|
174
|
+
for field, value in initial_context.items()
|
|
175
|
+
if field not in collected_fields
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if collected_fields:
|
|
179
|
+
logger.info(
|
|
180
|
+
f"[WorkflowTool] Filtered out already-collected fields: {collected_fields}. "
|
|
181
|
+
f"Updating context with: {list(filtered_context.keys())}"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
return filtered_context
|
|
185
|
+
|
|
127
186
|
def resume(
|
|
128
187
|
self,
|
|
129
188
|
thread_id: str,
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Tests for agent adapter logging fixes."""
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from unittest.mock import MagicMock, patch
|
|
5
|
+
from soprano_sdk.agents.adaptor import (
|
|
6
|
+
LangGraphAgentAdapter,
|
|
7
|
+
CrewAIAgentAdapter,
|
|
8
|
+
AgnoAgentAdapter,
|
|
9
|
+
PydanticAIAgentAdapter,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TestLangGraphAdapterLogging:
|
|
14
|
+
"""Test suite for LangGraphAgentAdapter logging."""
|
|
15
|
+
|
|
16
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
17
|
+
def test_langgraph_adapter_logs_correct_name(self, mock_logger):
|
|
18
|
+
"""Test that LangGraphAgentAdapter logs its correct name."""
|
|
19
|
+
# Create a mock agent
|
|
20
|
+
mock_agent = MagicMock()
|
|
21
|
+
mock_message = MagicMock()
|
|
22
|
+
mock_message.content = "Test response"
|
|
23
|
+
mock_agent.invoke.return_value = {"messages": [mock_message]}
|
|
24
|
+
|
|
25
|
+
# Create adapter and invoke
|
|
26
|
+
adapter = LangGraphAgentAdapter(mock_agent)
|
|
27
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
28
|
+
|
|
29
|
+
# Verify correct logging
|
|
30
|
+
mock_logger.info.assert_called_once_with(
|
|
31
|
+
"Invoking LangGraphAgentAdapter agent with messages"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
35
|
+
def test_langgraph_adapter_does_not_log_wrong_name(self, mock_logger):
|
|
36
|
+
"""Test that LangGraphAgentAdapter does not log 'LangGraph agent'."""
|
|
37
|
+
mock_agent = MagicMock()
|
|
38
|
+
mock_message = MagicMock()
|
|
39
|
+
mock_message.content = "Test response"
|
|
40
|
+
mock_agent.invoke.return_value = {"messages": [mock_message]}
|
|
41
|
+
|
|
42
|
+
adapter = LangGraphAgentAdapter(mock_agent)
|
|
43
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
44
|
+
|
|
45
|
+
# Verify it doesn't log the old incorrect message
|
|
46
|
+
for call in mock_logger.info.call_args_list:
|
|
47
|
+
assert "Invoking LangGraph agent with messages" not in str(call)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class TestCrewAIAdapterLogging:
|
|
51
|
+
"""Test suite for CrewAIAgentAdapter logging."""
|
|
52
|
+
|
|
53
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
54
|
+
def test_crewai_adapter_logs_correct_name(self, mock_logger):
|
|
55
|
+
"""Test that CrewAIAgentAdapter logs its correct name."""
|
|
56
|
+
# Create a mock agent
|
|
57
|
+
mock_agent = MagicMock()
|
|
58
|
+
mock_result = MagicMock()
|
|
59
|
+
mock_result.raw = "Test response"
|
|
60
|
+
mock_agent.kickoff.return_value = mock_result
|
|
61
|
+
|
|
62
|
+
mock_output_schema = MagicMock()
|
|
63
|
+
|
|
64
|
+
# Create adapter and invoke
|
|
65
|
+
adapter = CrewAIAgentAdapter(mock_agent, mock_output_schema)
|
|
66
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
67
|
+
|
|
68
|
+
# Verify correct logging
|
|
69
|
+
mock_logger.info.assert_called_once_with(
|
|
70
|
+
"Invoking CrewAIAgentAdapter agent with messages"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
74
|
+
def test_crewai_adapter_does_not_log_wrong_name(self, mock_logger):
|
|
75
|
+
"""Test that CrewAIAgentAdapter does not log 'LangGraph agent'."""
|
|
76
|
+
mock_agent = MagicMock()
|
|
77
|
+
mock_result = MagicMock()
|
|
78
|
+
mock_result.raw = "Test response"
|
|
79
|
+
mock_agent.kickoff.return_value = mock_result
|
|
80
|
+
|
|
81
|
+
mock_output_schema = MagicMock()
|
|
82
|
+
|
|
83
|
+
adapter = CrewAIAgentAdapter(mock_agent, mock_output_schema)
|
|
84
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
85
|
+
|
|
86
|
+
# Verify it doesn't log the old incorrect message
|
|
87
|
+
for call in mock_logger.info.call_args_list:
|
|
88
|
+
assert "Invoking LangGraph agent with messages" not in str(call)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class TestAgnoAdapterLogging:
|
|
92
|
+
"""Test suite for AgnoAgentAdapter logging."""
|
|
93
|
+
|
|
94
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
95
|
+
def test_agno_adapter_logs_correct_name(self, mock_logger):
|
|
96
|
+
"""Test that AgnoAgentAdapter logs its correct name."""
|
|
97
|
+
# Create a mock agent
|
|
98
|
+
mock_agent = MagicMock()
|
|
99
|
+
mock_response = MagicMock()
|
|
100
|
+
mock_response.content = "Test response"
|
|
101
|
+
mock_agent.run.return_value = mock_response
|
|
102
|
+
|
|
103
|
+
# Create adapter and invoke
|
|
104
|
+
adapter = AgnoAgentAdapter(mock_agent)
|
|
105
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
106
|
+
|
|
107
|
+
# Verify correct logging
|
|
108
|
+
mock_logger.info.assert_called_once_with(
|
|
109
|
+
"Invoking AgnoAgentAdapter agent with messages"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
113
|
+
def test_agno_adapter_does_not_log_wrong_name(self, mock_logger):
|
|
114
|
+
"""Test that AgnoAgentAdapter does not log 'LangGraph agent'."""
|
|
115
|
+
mock_agent = MagicMock()
|
|
116
|
+
mock_response = MagicMock()
|
|
117
|
+
mock_response.content = "Test response"
|
|
118
|
+
mock_agent.run.return_value = mock_response
|
|
119
|
+
|
|
120
|
+
adapter = AgnoAgentAdapter(mock_agent)
|
|
121
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
122
|
+
|
|
123
|
+
# Verify it doesn't log the old incorrect message
|
|
124
|
+
for call in mock_logger.info.call_args_list:
|
|
125
|
+
assert "Invoking LangGraph agent with messages" not in str(call)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class TestPydanticAIAdapterLogging:
|
|
129
|
+
"""Test suite for PydanticAIAgentAdapter logging.
|
|
130
|
+
|
|
131
|
+
Note: This adapter still has the bug where it logs 'LangGraph agent'.
|
|
132
|
+
These tests document the current (incorrect) behavior.
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
136
|
+
def test_pydantic_ai_adapter_current_logging_behavior(self, mock_logger):
|
|
137
|
+
"""Test current (incorrect) logging behavior of PydanticAIAgentAdapter.
|
|
138
|
+
|
|
139
|
+
This test documents that PydanticAIAgentAdapter still logs 'LangGraph agent'.
|
|
140
|
+
This should be fixed in a future update.
|
|
141
|
+
"""
|
|
142
|
+
mock_agent = MagicMock()
|
|
143
|
+
mock_result = MagicMock()
|
|
144
|
+
mock_result.output = "Test response"
|
|
145
|
+
mock_agent.run_sync.return_value = mock_result
|
|
146
|
+
|
|
147
|
+
adapter = PydanticAIAgentAdapter(mock_agent)
|
|
148
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
149
|
+
|
|
150
|
+
# Currently logs incorrect message (bug)
|
|
151
|
+
mock_logger.info.assert_called_once_with(
|
|
152
|
+
"Invoking LangGraph agent with messages"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
@patch('soprano_sdk.agents.adaptor.logger')
|
|
156
|
+
def test_pydantic_ai_adapter_should_log_correct_name(self, mock_logger):
|
|
157
|
+
"""Test what PydanticAIAgentAdapter SHOULD log (expected future behavior).
|
|
158
|
+
|
|
159
|
+
This test will fail until the bug is fixed. It documents the expected behavior.
|
|
160
|
+
"""
|
|
161
|
+
mock_agent = MagicMock()
|
|
162
|
+
mock_result = MagicMock()
|
|
163
|
+
mock_result.output = "Test response"
|
|
164
|
+
mock_agent.run_sync.return_value = mock_result
|
|
165
|
+
|
|
166
|
+
adapter = PydanticAIAgentAdapter(mock_agent)
|
|
167
|
+
|
|
168
|
+
# This test documents expected behavior (currently fails)
|
|
169
|
+
with pytest.raises(AssertionError):
|
|
170
|
+
adapter.invoke([{"role": "user", "content": "Hello"}])
|
|
171
|
+
mock_logger.info.assert_called_once_with(
|
|
172
|
+
"Invoking PydanticAIAgentAdapter agent with messages"
|
|
173
|
+
)
|