soprano-sdk 0.2.10__py3-none-any.whl → 0.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- soprano_sdk/agents/adaptor.py +59 -10
- soprano_sdk/agents/factory.py +88 -19
- soprano_sdk/authenticators/mfa.py +42 -39
- soprano_sdk/core/engine.py +11 -1
- soprano_sdk/nodes/collect_input.py +8 -4
- soprano_sdk/tools.py +63 -4
- soprano_sdk/validation/schema.py +1 -1
- soprano_sdk/validation/validator.py +0 -4
- {soprano_sdk-0.2.10.dist-info → soprano_sdk-0.2.17.dist-info}/METADATA +3 -1
- {soprano_sdk-0.2.10.dist-info → soprano_sdk-0.2.17.dist-info}/RECORD +12 -12
- {soprano_sdk-0.2.10.dist-info → soprano_sdk-0.2.17.dist-info}/WHEEL +0 -0
- {soprano_sdk-0.2.10.dist-info → soprano_sdk-0.2.17.dist-info}/licenses/LICENSE +0 -0
soprano_sdk/agents/adaptor.py
CHANGED
|
@@ -6,6 +6,9 @@ from pydantic import BaseModel
|
|
|
6
6
|
from pydantic_ai.agent import Agent as PydanticAIAgent
|
|
7
7
|
from crewai.agent import Agent as CrewAIAgent
|
|
8
8
|
from ..utils.logger import logger
|
|
9
|
+
import json
|
|
10
|
+
import ast
|
|
11
|
+
|
|
9
12
|
|
|
10
13
|
class AgentAdapter(ABC):
|
|
11
14
|
|
|
@@ -20,7 +23,7 @@ class LangGraphAgentAdapter(AgentAdapter):
|
|
|
20
23
|
self.agent = agent
|
|
21
24
|
|
|
22
25
|
def invoke(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
|
23
|
-
logger.info("Invoking
|
|
26
|
+
logger.info("Invoking LangGraphAgentAdapter agent with messages")
|
|
24
27
|
response = self.agent.invoke({"messages": messages})
|
|
25
28
|
|
|
26
29
|
if structured_response := response.get('structured_response'):
|
|
@@ -37,23 +40,69 @@ class LangGraphAgentAdapter(AgentAdapter):
|
|
|
37
40
|
|
|
38
41
|
|
|
39
42
|
class CrewAIAgentAdapter(AgentAdapter):
|
|
40
|
-
|
|
43
|
+
|
|
41
44
|
def __init__(self, agent: CrewAIAgent, output_schema: BaseModel):
|
|
42
45
|
self.agent = agent
|
|
43
|
-
self.output_schema=output_schema
|
|
44
|
-
|
|
46
|
+
self.output_schema = output_schema
|
|
47
|
+
|
|
48
|
+
def _convert_to_dict(self, response: Any) -> Dict[str, Any]:
|
|
49
|
+
"""
|
|
50
|
+
Convert response to dict using 3 strategies:
|
|
51
|
+
1. Check if already a dict
|
|
52
|
+
2. Try json.loads()
|
|
53
|
+
3. Try ast.literal_eval()
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
# Strategy 1: Already a dict
|
|
57
|
+
if isinstance(response, dict):
|
|
58
|
+
logger.info("Response is already a dict")
|
|
59
|
+
return response
|
|
60
|
+
|
|
61
|
+
# Convert to string for parsing
|
|
62
|
+
response_str = str(response)
|
|
63
|
+
|
|
64
|
+
# Strategy 2: Try json.loads()
|
|
65
|
+
try:
|
|
66
|
+
parsed = json.loads(response_str)
|
|
67
|
+
logger.info("Successfully parsed with json.loads()")
|
|
68
|
+
return parsed
|
|
69
|
+
except (json.JSONDecodeError, ValueError, TypeError) as e:
|
|
70
|
+
logger.error(f"json.loads() failed: {e}")
|
|
71
|
+
|
|
72
|
+
# Strategy 3: Try ast.literal_eval()
|
|
73
|
+
try:
|
|
74
|
+
parsed = ast.literal_eval(response_str)
|
|
75
|
+
if isinstance(parsed, dict):
|
|
76
|
+
logger.info("Successfully parsed with ast.literal_eval()")
|
|
77
|
+
return parsed
|
|
78
|
+
except (ValueError, SyntaxError, TypeError) as e:
|
|
79
|
+
logger.error(f"ast.literal_eval() failed: {e}")
|
|
80
|
+
|
|
81
|
+
# No schema and all parsing failed - return as string
|
|
82
|
+
logger.error("No schema provided and parsing failed, returning raw response")
|
|
83
|
+
return response_str
|
|
84
|
+
|
|
45
85
|
def invoke(self, messages: List[Dict[str, str]]) -> Any:
|
|
46
86
|
try:
|
|
47
|
-
logger.info("Invoking
|
|
87
|
+
logger.info("Invoking CrewAIAgentAdapter agent with messages")
|
|
48
88
|
result = self.agent.kickoff(messages, response_format=self.output_schema)
|
|
49
89
|
|
|
50
|
-
if structured_response := getattr(result, 'pydantic', None)
|
|
90
|
+
if structured_response := getattr(result, 'pydantic', None):
|
|
91
|
+
logger.info("Got pydantic structured response")
|
|
51
92
|
return structured_response.model_dump()
|
|
52
93
|
|
|
53
|
-
|
|
94
|
+
agent_response = getattr(result, 'raw', None)
|
|
95
|
+
if agent_response is None:
|
|
96
|
+
agent_response = str(result)
|
|
97
|
+
|
|
98
|
+
logger.info(f"Processing raw response type: {type(agent_response)}")
|
|
99
|
+
|
|
100
|
+
if not self.output_schema:
|
|
101
|
+
logger.info("No output schema provided, returning raw response")
|
|
54
102
|
return agent_response
|
|
55
|
-
|
|
56
|
-
return
|
|
103
|
+
|
|
104
|
+
return self._convert_to_dict(agent_response)
|
|
105
|
+
|
|
57
106
|
except Exception as e:
|
|
58
107
|
raise RuntimeError(f"CrewAI agent invocation failed: {e}")
|
|
59
108
|
|
|
@@ -65,7 +114,7 @@ class AgnoAgentAdapter(AgentAdapter):
|
|
|
65
114
|
|
|
66
115
|
def invoke(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
|
67
116
|
try:
|
|
68
|
-
logger.info("Invoking
|
|
117
|
+
logger.info("Invoking AgnoAgentAdapter agent with messages")
|
|
69
118
|
response = self.agent.run(messages)
|
|
70
119
|
agent_response = response.content if hasattr(response, 'content') else str(response)
|
|
71
120
|
|
soprano_sdk/agents/factory.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import Any, Dict, List, Literal, Tuple, Callable
|
|
2
|
+
from typing import Any, Dict, List, Literal, Tuple, Callable, get_origin, get_args
|
|
3
3
|
|
|
4
4
|
from agno.models.openai import OpenAIChat
|
|
5
5
|
from crewai import LLM
|
|
@@ -18,36 +18,41 @@ from .adaptor import (
|
|
|
18
18
|
|
|
19
19
|
def get_model(config: Dict[str, Any], framework: Literal['langgraph', 'crewai', 'agno', 'pydantic-ai'] = "langgraph", output_schema: Optional[BaseModel] = None, tools: Optional[List] = None):
|
|
20
20
|
errors = []
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
model_name: str = config.get("model_name", "")
|
|
23
23
|
if not model_name:
|
|
24
24
|
errors.append("Model name is required in model_config")
|
|
25
|
-
|
|
25
|
+
|
|
26
|
+
# Determine provider (openai, ollama, etc.)
|
|
27
|
+
provider = config.get("provider", "openai").lower()
|
|
28
|
+
|
|
29
|
+
# Handle base_url - required for Ollama, optional for OpenAI
|
|
26
30
|
base_url = config.get("base_url")
|
|
27
|
-
if not base_url:
|
|
28
|
-
errors.append("Base url for model is required in model_config")
|
|
29
|
-
|
|
30
31
|
api_key = config.get("api_key", "")
|
|
31
32
|
if not api_key:
|
|
32
33
|
if auth_callback := config.get("auth_callback"):
|
|
33
34
|
api_key = auth_callback()
|
|
34
35
|
if not api_key:
|
|
35
|
-
|
|
36
|
-
|
|
36
|
+
# Ollama doesn't require a real API key
|
|
37
|
+
if provider == "ollama":
|
|
38
|
+
api_key = "ollama"
|
|
39
|
+
else:
|
|
40
|
+
errors.append("API key/Auth callback for model is required in model_config")
|
|
41
|
+
|
|
37
42
|
if errors:
|
|
38
43
|
raise ValueError("; ".join(errors))
|
|
39
|
-
|
|
40
|
-
if framework == "agno"
|
|
44
|
+
|
|
45
|
+
if framework == "agno":
|
|
41
46
|
return OpenAIChat(
|
|
42
47
|
id=model_name,
|
|
43
48
|
api_key=api_key,
|
|
44
49
|
base_url=base_url
|
|
45
50
|
)
|
|
46
51
|
|
|
47
|
-
if framework == "crewai"
|
|
52
|
+
if framework == "crewai":
|
|
48
53
|
return LLM(
|
|
49
54
|
api_key=api_key,
|
|
50
|
-
model=f"
|
|
55
|
+
model=f"{provider}/{model_name}",
|
|
51
56
|
base_url=base_url,
|
|
52
57
|
temperature=0.1,
|
|
53
58
|
top_p=0.7
|
|
@@ -58,7 +63,6 @@ def get_model(config: Dict[str, Any], framework: Literal['langgraph', 'crewai',
|
|
|
58
63
|
api_key=SecretStr(api_key),
|
|
59
64
|
base_url=base_url,
|
|
60
65
|
)
|
|
61
|
-
|
|
62
66
|
if output_schema:
|
|
63
67
|
return llm.with_structured_output(output_schema)
|
|
64
68
|
|
|
@@ -120,12 +124,77 @@ class CrewAIAgentCreator(AgentCreator):
|
|
|
120
124
|
structured_output_model: Any = None
|
|
121
125
|
) -> CrewAIAgentAdapter:
|
|
122
126
|
from crewai.agent import Agent
|
|
123
|
-
from crewai.tools import
|
|
127
|
+
from crewai.tools import BaseTool
|
|
128
|
+
from pydantic import Field, create_model
|
|
129
|
+
from typing import Type
|
|
130
|
+
import inspect
|
|
131
|
+
|
|
132
|
+
def create_crewai_tool(tool_name: str, tool_description: str, tool_callable: Callable) -> Any:
|
|
133
|
+
"""Create a CrewAI tool with proper Pydantic schema for type hints"""
|
|
134
|
+
|
|
135
|
+
# Get function signature to build schema
|
|
136
|
+
sig = inspect.signature(tool_callable)
|
|
137
|
+
|
|
138
|
+
# Build Pydantic fields from function parameters
|
|
139
|
+
schema_fields = {}
|
|
140
|
+
for param_name, param in sig.parameters.items():
|
|
141
|
+
# Skip **kwargs and **state parameters - they shouldn't be in the schema
|
|
142
|
+
# These are used for validators and internal context
|
|
143
|
+
if param.kind == inspect.Parameter.VAR_KEYWORD:
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
# Get parameter annotation, default to str if not specified
|
|
147
|
+
param_type = param.annotation if param.annotation != inspect.Parameter.empty else str
|
|
148
|
+
|
|
149
|
+
# Default description
|
|
150
|
+
description = f"Parameter: {param_name}"
|
|
151
|
+
|
|
152
|
+
# Check if using Annotated type hint for description
|
|
153
|
+
if get_origin(param_type) is not None:
|
|
154
|
+
# Handle Annotated[type, "description", ...]
|
|
155
|
+
try:
|
|
156
|
+
from typing import Annotated
|
|
157
|
+
if get_origin(param_type) is Annotated:
|
|
158
|
+
args = get_args(param_type)
|
|
159
|
+
param_type = args[0] # First arg is the actual type
|
|
160
|
+
# Look for string metadata as description
|
|
161
|
+
for metadata in args[1:]:
|
|
162
|
+
if isinstance(metadata, str):
|
|
163
|
+
description = metadata
|
|
164
|
+
break
|
|
165
|
+
except ImportError:
|
|
166
|
+
# Python < 3.9 doesn't have Annotated in typing
|
|
167
|
+
pass
|
|
168
|
+
|
|
169
|
+
# Get default value if exists
|
|
170
|
+
if param.default != inspect.Parameter.empty:
|
|
171
|
+
schema_fields[param_name] = (
|
|
172
|
+
param_type,
|
|
173
|
+
Field(default=param.default, description=description)
|
|
174
|
+
)
|
|
175
|
+
else:
|
|
176
|
+
schema_fields[param_name] = (
|
|
177
|
+
param_type,
|
|
178
|
+
Field(..., description=description)
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
# Create dynamic Pydantic model for tool input schema
|
|
182
|
+
ToolInputSchema = create_model(
|
|
183
|
+
f"{tool_name.title().replace('_', '')}Input",
|
|
184
|
+
**schema_fields
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Create custom BaseTool class with proper schema
|
|
188
|
+
class CustomCrewAITool(BaseTool):
|
|
189
|
+
name: str = tool_name
|
|
190
|
+
description: str = tool_description
|
|
191
|
+
args_schema: Type[BaseModel] = ToolInputSchema
|
|
192
|
+
|
|
193
|
+
def _run(self, **kwargs) -> str:
|
|
194
|
+
return tool_callable(**kwargs)
|
|
195
|
+
|
|
196
|
+
return CustomCrewAITool()
|
|
124
197
|
|
|
125
|
-
def create_crewai_tool(tool_name: str, description: str, tool_callable: Callable) -> Any:
|
|
126
|
-
tool_callable.__doc__ = description
|
|
127
|
-
return tool(tool_name)(tool_callable)
|
|
128
|
-
|
|
129
198
|
tools = [create_crewai_tool(tn, desc, tc) for tn, desc, tc in tools]
|
|
130
199
|
|
|
131
200
|
agent = Agent(
|
|
@@ -136,7 +205,7 @@ class CrewAIAgentCreator(AgentCreator):
|
|
|
136
205
|
llm=get_model(model_config, 'crewai'),
|
|
137
206
|
max_retry_limit=2
|
|
138
207
|
)
|
|
139
|
-
|
|
208
|
+
|
|
140
209
|
return CrewAIAgentAdapter(agent, output_schema=structured_output_model)
|
|
141
210
|
|
|
142
211
|
|
|
@@ -137,7 +137,47 @@ class MFANodeConfig:
|
|
|
137
137
|
|
|
138
138
|
@classmethod
|
|
139
139
|
def get_validate_user_input(cls, source_node: str, next_node: str, mfa_config: dict):
|
|
140
|
-
|
|
140
|
+
agent_config = dict(
|
|
141
|
+
name="MFA Input Data Collector",
|
|
142
|
+
initial_message="{{_mfa.message}}",
|
|
143
|
+
instructions="""
|
|
144
|
+
You are an authentication value extractor. Your job is to identify and extract MFA codes from user input, or detect if the user wants to cancel the authentication flow.
|
|
145
|
+
|
|
146
|
+
**Task:**
|
|
147
|
+
- Read the user's message carefully
|
|
148
|
+
- First, check if the user wants to cancel, stop, or exit the authentication process
|
|
149
|
+
- If they want to cancel, output: MFA_CANCELLED:
|
|
150
|
+
- Otherwise, extract ONLY the OTP/MFA code value and output in the format shown below
|
|
151
|
+
|
|
152
|
+
**Cancellation Detection:**
|
|
153
|
+
If the user expresses any intent to cancel, stop, exit, abort, or quit the authentication process, respond with: MFA_CANCELLED
|
|
154
|
+
|
|
155
|
+
Examples of cancellation phrases:
|
|
156
|
+
* "cancel" → MFA_CANCELLED:
|
|
157
|
+
* "I want to stop" → MFA_CANCELLED:
|
|
158
|
+
* "exit" → MFA_CANCELLED:
|
|
159
|
+
* "nevermind" → MFA_CANCELLED:
|
|
160
|
+
* "I don't want to continue" → MFA_CANCELLED:
|
|
161
|
+
* "stop this" → MFA_CANCELLED:
|
|
162
|
+
* "forget it" → MFA_CANCELLED:
|
|
163
|
+
* "abort" → MFA_CANCELLED:
|
|
164
|
+
* "quit" → MFA_CANCELLED:
|
|
165
|
+
|
|
166
|
+
**OTP Capture Examples:**
|
|
167
|
+
* "1234" → MFA_CAPTURED:1234
|
|
168
|
+
* "2345e" → MFA_CAPTURED:2345e
|
|
169
|
+
* "the code is 567890" → MFA_CAPTURED:567890
|
|
170
|
+
* "my otp is 123456" → MFA_CAPTURED:123456
|
|
171
|
+
|
|
172
|
+
**Output Format:**
|
|
173
|
+
- For OTP/MFA codes: MFA_CAPTURED:<otp_value>
|
|
174
|
+
- For cancellation: MFA_CANCELLED:
|
|
175
|
+
|
|
176
|
+
""")
|
|
177
|
+
|
|
178
|
+
if mfa_model := mfa_config.get('model'):
|
|
179
|
+
agent_config.update(model_name=mfa_model)
|
|
180
|
+
|
|
141
181
|
max_attempts = mfa_config.get('max_attempts', 3)
|
|
142
182
|
on_max_attempts_reached = mfa_config.get('on_max_attempts_reached')
|
|
143
183
|
|
|
@@ -149,44 +189,7 @@ class MFANodeConfig:
|
|
|
149
189
|
field=input_field_name,
|
|
150
190
|
max_attempts=max_attempts,
|
|
151
191
|
validator="soprano_sdk.authenticators.mfa.mfa_validate_user_input",
|
|
152
|
-
agent=
|
|
153
|
-
name="MFA Input Data Collector",
|
|
154
|
-
model=model_name,
|
|
155
|
-
initial_message="{{_mfa.message}}",
|
|
156
|
-
instructions="""
|
|
157
|
-
You are an authentication value extractor. Your job is to identify and extract MFA codes from user input, or detect if the user wants to cancel the authentication flow.
|
|
158
|
-
|
|
159
|
-
**Task:**
|
|
160
|
-
- Read the user's message carefully
|
|
161
|
-
- First, check if the user wants to cancel, stop, or exit the authentication process
|
|
162
|
-
- If they want to cancel, output: MFA_CANCELLED:
|
|
163
|
-
- Otherwise, extract ONLY the OTP/MFA code value and output in the format shown below
|
|
164
|
-
|
|
165
|
-
**Cancellation Detection:**
|
|
166
|
-
If the user expresses any intent to cancel, stop, exit, abort, or quit the authentication process, respond with: MFA_CANCELLED
|
|
167
|
-
|
|
168
|
-
Examples of cancellation phrases:
|
|
169
|
-
* "cancel" → MFA_CANCELLED:
|
|
170
|
-
* "I want to stop" → MFA_CANCELLED:
|
|
171
|
-
* "exit" → MFA_CANCELLED:
|
|
172
|
-
* "nevermind" → MFA_CANCELLED:
|
|
173
|
-
* "I don't want to continue" → MFA_CANCELLED:
|
|
174
|
-
* "stop this" → MFA_CANCELLED:
|
|
175
|
-
* "forget it" → MFA_CANCELLED:
|
|
176
|
-
* "abort" → MFA_CANCELLED:
|
|
177
|
-
* "quit" → MFA_CANCELLED:
|
|
178
|
-
|
|
179
|
-
**OTP Capture Examples:**
|
|
180
|
-
* "1234" → MFA_CAPTURED:1234
|
|
181
|
-
* "2345e" → MFA_CAPTURED:2345e
|
|
182
|
-
* "the code is 567890" → MFA_CAPTURED:567890
|
|
183
|
-
* "my otp is 123456" → MFA_CAPTURED:123456
|
|
184
|
-
|
|
185
|
-
**Output Format:**
|
|
186
|
-
- For OTP/MFA codes: MFA_CAPTURED:<otp_value>
|
|
187
|
-
- For cancellation: MFA_CANCELLED:
|
|
188
|
-
|
|
189
|
-
"""),
|
|
192
|
+
agent=agent_config,
|
|
190
193
|
transitions=[
|
|
191
194
|
dict(
|
|
192
195
|
pattern="MFA_CAPTURED:",
|
soprano_sdk/core/engine.py
CHANGED
|
@@ -56,7 +56,8 @@ class WorkflowEngine:
|
|
|
56
56
|
|
|
57
57
|
logger.info(
|
|
58
58
|
f"Workflow loaded: {self.workflow_name} v{self.workflow_version} "
|
|
59
|
-
f"({len(self.steps)} steps, {len(self.outcomes)} outcomes
|
|
59
|
+
f"({len(self.steps)} steps, {len(self.outcomes)} outcomes, "
|
|
60
|
+
f"{len(self.collector_node_field_map)} collector nodes)"
|
|
60
61
|
)
|
|
61
62
|
|
|
62
63
|
except Exception as e:
|
|
@@ -202,6 +203,7 @@ class WorkflowEngine:
|
|
|
202
203
|
def load_steps(self):
|
|
203
204
|
prepared_steps: list = []
|
|
204
205
|
mfa_redirects: Dict[str, str] = {}
|
|
206
|
+
self.collector_node_field_map: Dict[str, str] = {} # Map of node_id -> field
|
|
205
207
|
|
|
206
208
|
for step in self.config['steps']:
|
|
207
209
|
step_id = step['id']
|
|
@@ -228,6 +230,14 @@ class WorkflowEngine:
|
|
|
228
230
|
|
|
229
231
|
prepared_steps.append(step)
|
|
230
232
|
|
|
233
|
+
# Build collector node -> field map
|
|
234
|
+
for step in prepared_steps:
|
|
235
|
+
if step.get('action') == 'collect_input':
|
|
236
|
+
node_id = step.get('id')
|
|
237
|
+
field = step.get('field')
|
|
238
|
+
if node_id and field:
|
|
239
|
+
self.collector_node_field_map[node_id] = field
|
|
240
|
+
|
|
231
241
|
for step in prepared_steps:
|
|
232
242
|
if step['id'] in self.mfa_validator_steps: # MFA Validator
|
|
233
243
|
continue
|
|
@@ -195,6 +195,7 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
195
195
|
context_value = self.engine_context.get_context_value(self.field)
|
|
196
196
|
if context_value is None:
|
|
197
197
|
return
|
|
198
|
+
|
|
198
199
|
logger.info(f"Using context value for '{self.field}': {context_value}")
|
|
199
200
|
state[self.field] = context_value
|
|
200
201
|
span.add_event("context.value_used", {"field": self.field, "value": str(context_value)})
|
|
@@ -375,15 +376,18 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
375
376
|
if self.is_structured_output:
|
|
376
377
|
try:
|
|
377
378
|
response_dict = json.loads(agent_response) if isinstance(agent_response, str) else agent_response
|
|
378
|
-
|
|
379
|
+
bot_response = response_dict.get("bot_response", None)
|
|
380
|
+
# Treat empty or whitespace-only bot_response as None
|
|
381
|
+
prompt = bot_response if (bot_response and bot_response.strip()) else None
|
|
379
382
|
except (json.JSONDecodeError, TypeError, ValueError) as e:
|
|
380
383
|
logger.error(f"Error When Converting Structured Output {agent_response} to JSON {e}")
|
|
381
384
|
prompt = agent_response
|
|
382
385
|
else:
|
|
383
386
|
prompt = agent_response
|
|
384
387
|
|
|
385
|
-
prompt
|
|
386
|
-
|
|
388
|
+
if prompt is not None:
|
|
389
|
+
prompt = self._render_template_string(prompt, state)
|
|
390
|
+
conversation.append({"role": "assistant", "content": prompt})
|
|
387
391
|
|
|
388
392
|
return prompt
|
|
389
393
|
|
|
@@ -490,7 +494,7 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
490
494
|
def _handle_structured_output_transition(self, state: Dict[str, Any], conversation: List, agent_response: Any) -> Dict[str, Any]:
|
|
491
495
|
|
|
492
496
|
try:
|
|
493
|
-
agent_response = json.loads(agent_response)
|
|
497
|
+
agent_response = json.loads(agent_response) if isinstance(agent_response, str) else agent_response
|
|
494
498
|
except (json.JSONDecodeError, TypeError, ValueError):
|
|
495
499
|
pass
|
|
496
500
|
|
soprano_sdk/tools.py
CHANGED
|
@@ -83,21 +83,35 @@ class WorkflowTool:
|
|
|
83
83
|
callback_handler = CallbackHandler()
|
|
84
84
|
config = {"configurable": {"thread_id": thread_id}, "callbacks": [callback_handler]}
|
|
85
85
|
|
|
86
|
-
self.engine.update_context(initial_context)
|
|
87
|
-
span.add_event("context.updated", {"fields": list(initial_context.keys())})
|
|
88
|
-
|
|
89
86
|
state = self.graph.get_state(config)
|
|
90
87
|
|
|
88
|
+
# Intelligently update context based on workflow state
|
|
91
89
|
if state.next:
|
|
90
|
+
# Workflow is resuming - only update fields that haven't been collected yet
|
|
92
91
|
span.set_attribute("workflow.resumed", True)
|
|
93
92
|
logger.info(f"[WorkflowTool] Resuming interrupted workflow {self.name} (thread: {thread_id})")
|
|
93
|
+
|
|
94
|
+
filtered_context = self._filter_already_collected_fields(state.values, initial_context)
|
|
95
|
+
self.engine.update_context(filtered_context)
|
|
96
|
+
|
|
97
|
+
span.add_event("context.updated", {
|
|
98
|
+
"fields": list(filtered_context.keys()),
|
|
99
|
+
"filtered_out": list(set(initial_context.keys()) - set(filtered_context.keys()))
|
|
100
|
+
})
|
|
101
|
+
|
|
94
102
|
result = self.graph.invoke(
|
|
95
|
-
Command(resume=user_message or "", update=
|
|
103
|
+
Command(resume=user_message or "", update=filtered_context),
|
|
96
104
|
config=config
|
|
97
105
|
)
|
|
106
|
+
|
|
98
107
|
else:
|
|
108
|
+
# Fresh start - update all fields from initial_context
|
|
99
109
|
span.set_attribute("workflow.resumed", False)
|
|
100
110
|
logger.info(f"[WorkflowTool] Starting fresh workflow {self.name} (thread: {thread_id})")
|
|
111
|
+
|
|
112
|
+
self.engine.update_context(initial_context)
|
|
113
|
+
span.add_event("context.updated", {"fields": list(initial_context.keys())})
|
|
114
|
+
|
|
101
115
|
result = self.graph.invoke(initial_context, config=config)
|
|
102
116
|
|
|
103
117
|
final_state = self.graph.get_state(config)
|
|
@@ -124,6 +138,51 @@ class WorkflowTool:
|
|
|
124
138
|
span.set_attribute("workflow.status", "completed")
|
|
125
139
|
return self.engine.get_outcome_message(result)
|
|
126
140
|
|
|
141
|
+
def _filter_already_collected_fields(
|
|
142
|
+
self,
|
|
143
|
+
current_state: Dict[str, Any],
|
|
144
|
+
initial_context: Optional[Dict[str, Any]]
|
|
145
|
+
) -> Dict[str, Any]:
|
|
146
|
+
"""
|
|
147
|
+
Filter initial_context to exclude fields that have already been collected.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
current_state: Current workflow state
|
|
151
|
+
initial_context: Context to filter
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Filtered context with only uncollected fields
|
|
155
|
+
"""
|
|
156
|
+
if not initial_context:
|
|
157
|
+
return {}
|
|
158
|
+
|
|
159
|
+
from .core.constants import WorkflowKeys
|
|
160
|
+
|
|
161
|
+
execution_order = current_state.get(WorkflowKeys.NODE_EXECUTION_ORDER, [])
|
|
162
|
+
|
|
163
|
+
node_to_field_map = self.engine.collector_node_field_map
|
|
164
|
+
|
|
165
|
+
# Determine which fields have already been collected
|
|
166
|
+
collected_fields = set()
|
|
167
|
+
for executed_node_id in execution_order:
|
|
168
|
+
if executed_node_id in node_to_field_map:
|
|
169
|
+
collected_fields.add(node_to_field_map[executed_node_id])
|
|
170
|
+
|
|
171
|
+
# Filter initial_context to exclude already-collected fields
|
|
172
|
+
filtered_context = {
|
|
173
|
+
field: value
|
|
174
|
+
for field, value in initial_context.items()
|
|
175
|
+
if field not in collected_fields
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if collected_fields:
|
|
179
|
+
logger.info(
|
|
180
|
+
f"[WorkflowTool] Filtered out already-collected fields: {collected_fields}. "
|
|
181
|
+
f"Updating context with: {list(filtered_context.keys())}"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
return filtered_context
|
|
185
|
+
|
|
127
186
|
def resume(
|
|
128
187
|
self,
|
|
129
188
|
thread_id: str,
|
soprano_sdk/validation/schema.py
CHANGED
|
@@ -234,7 +234,7 @@ WORKFLOW_SCHEMA = {
|
|
|
234
234
|
"mfa": {
|
|
235
235
|
"type": "object",
|
|
236
236
|
"description": "Multi-factor authentication configuration",
|
|
237
|
-
"required": ["type", "
|
|
237
|
+
"required": ["type", "payload"],
|
|
238
238
|
"properties": {
|
|
239
239
|
"model": {
|
|
240
240
|
"type": "string",
|
|
@@ -122,10 +122,6 @@ class WorkflowValidator:
|
|
|
122
122
|
f"MFA is enabled in step({step['id']}). MFA is supported only for `call_function` nodes"
|
|
123
123
|
)
|
|
124
124
|
|
|
125
|
-
model = mfa_authorizer.get('model')
|
|
126
|
-
if not model:
|
|
127
|
-
self.errors.append(f"step({step['id']}) -> mfa -> model is missing")
|
|
128
|
-
|
|
129
125
|
mfa_type = mfa_authorizer.get('type')
|
|
130
126
|
if mfa_type and mfa_type != 'REST':
|
|
131
127
|
self.errors.append(f"step({step['id']}) -> mfa -> type '{mfa_type}' is unsupported. Only 'REST' is supported.")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: soprano-sdk
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.17
|
|
4
4
|
Summary: YAML-driven workflow engine with AI agent integration for building conversational SOPs
|
|
5
5
|
Author: Arvind Thangamani
|
|
6
6
|
License: MIT
|
|
@@ -21,6 +21,7 @@ Requires-Dist: langchain-openai>=1.0.3
|
|
|
21
21
|
Requires-Dist: langchain>=1.0.7
|
|
22
22
|
Requires-Dist: langfuse>=3.10.1
|
|
23
23
|
Requires-Dist: langgraph==1.0.2
|
|
24
|
+
Requires-Dist: litellm>=1.74.9
|
|
24
25
|
Requires-Dist: openai>=1.92.1
|
|
25
26
|
Requires-Dist: pydantic-ai>=1.22.0
|
|
26
27
|
Requires-Dist: pydantic>=2.0.0
|
|
@@ -29,6 +30,7 @@ Requires-Dist: pyyaml>=6.0
|
|
|
29
30
|
Provides-Extra: dev
|
|
30
31
|
Requires-Dist: gradio>=5.46.0; extra == 'dev'
|
|
31
32
|
Requires-Dist: pytest>=7.0.0; extra == 'dev'
|
|
33
|
+
Requires-Dist: ruff==0.14.13; extra == 'dev'
|
|
32
34
|
Provides-Extra: persistence
|
|
33
35
|
Requires-Dist: langgraph-checkpoint-mongodb>=0.2.0; extra == 'persistence'
|
|
34
36
|
Requires-Dist: pymongo>=4.0.0; extra == 'persistence'
|
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
soprano_sdk/__init__.py,sha256=YZVl_SwQ0C-E_5_f1AwUe_hPcbgCt8k7k4_WAHM8vjE,243
|
|
2
2
|
soprano_sdk/engine.py,sha256=EFK91iTHjp72otLN6Kg-yeLye2J3CAKN0QH4FI2taL8,14838
|
|
3
|
-
soprano_sdk/tools.py,sha256=
|
|
3
|
+
soprano_sdk/tools.py,sha256=dmJ0OZ7Bj3rvjBQvLzgWlYRFVtNJOyMO2jLqaS13cAc,10971
|
|
4
4
|
soprano_sdk/agents/__init__.py,sha256=Yzbtv6iP_ABRgZo0IUjy9vDofEvLFbOjuABw758176A,636
|
|
5
|
-
soprano_sdk/agents/adaptor.py,sha256=
|
|
6
|
-
soprano_sdk/agents/factory.py,sha256=
|
|
5
|
+
soprano_sdk/agents/adaptor.py,sha256=xJ3MBTU91fQxA9O7V5Xds6m8-n_NxkF0YiKGaWKLmrQ,4959
|
|
6
|
+
soprano_sdk/agents/factory.py,sha256=CvXhpvtjf_Hb4Ce8WKAa0FzyY5Jm6lssvIIfSXu7jPY,9788
|
|
7
7
|
soprano_sdk/agents/structured_output.py,sha256=7DSVzfMPsZAqBwI3v6XL15qG5Gh4jJ-qddcVPaa3gdc,3326
|
|
8
8
|
soprano_sdk/authenticators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
-
soprano_sdk/authenticators/mfa.py,sha256=
|
|
9
|
+
soprano_sdk/authenticators/mfa.py,sha256=bEV9edY2PvyRnb8YoQWt8UXAETMKLFVKrTvgSpfRer4,7341
|
|
10
10
|
soprano_sdk/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
soprano_sdk/core/constants.py,sha256=UPXlRbF7gsOUNOV0Lm0jvgFfgZX7JrsV6n9I5csMfns,3508
|
|
12
|
-
soprano_sdk/core/engine.py,sha256=
|
|
12
|
+
soprano_sdk/core/engine.py,sha256=HKYoqwDm541pWSWwEKHxLlL3PX90Ux_5l_-HqihgL-g,12245
|
|
13
13
|
soprano_sdk/core/rollback_strategies.py,sha256=NjDTtBCZlqyDql5PSwI9SMDLK7_BNlTxbW_cq_5gV0g,7783
|
|
14
14
|
soprano_sdk/core/state.py,sha256=k8ojLfWgjES3p9XWMeGU5s4UK-Xa5T8mS4VtZzTrcDw,2961
|
|
15
15
|
soprano_sdk/nodes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
soprano_sdk/nodes/async_function.py,sha256=v6WujLKm8NXX2iAkJ7Gz_QIVCtWFrpC6nnPyyfuCxXs,9354
|
|
17
17
|
soprano_sdk/nodes/base.py,sha256=idFyOGGPnjsASYnrOF_NIh7eFcSuJqw61EoVN_WCTaU,2360
|
|
18
18
|
soprano_sdk/nodes/call_function.py,sha256=afYBmj5Aditbkvb_7gD3CsXBEEUohcsC1_cdHfcOunE,5847
|
|
19
|
-
soprano_sdk/nodes/collect_input.py,sha256=
|
|
19
|
+
soprano_sdk/nodes/collect_input.py,sha256=PySlghXOWDl6AYKgimY_7BnVFN7odYG656aBK_z4ACE,24617
|
|
20
20
|
soprano_sdk/nodes/factory.py,sha256=IbBzT4FKBnYw5PuSo7uDONV3HSFtoyqjBQQtXtUY2IY,1756
|
|
21
21
|
soprano_sdk/routing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
22
|
soprano_sdk/routing/router.py,sha256=Z218r4BMbmlL9282ombutAoKsIs1WHZ2d5YHnbCeet8,3698
|
|
@@ -27,9 +27,9 @@ soprano_sdk/utils/template.py,sha256=MG_B9TMx1ShpnSGo7s7TO-VfQzuFByuRNhJTvZ668kM
|
|
|
27
27
|
soprano_sdk/utils/tool.py,sha256=hWN826HIKmLdswLCTURLH8hWlb2WU0MB8nIUErbpB-8,1877
|
|
28
28
|
soprano_sdk/utils/tracing.py,sha256=gSHeBDLe-MbAZ9rkzpCoGFveeMdR9KLaA6tteB0IWjk,1991
|
|
29
29
|
soprano_sdk/validation/__init__.py,sha256=ImChmO86jYHU90xzTttto2-LmOUOmvY_ibOQaLRz5BA,262
|
|
30
|
-
soprano_sdk/validation/schema.py,sha256=
|
|
31
|
-
soprano_sdk/validation/validator.py,sha256=
|
|
32
|
-
soprano_sdk-0.2.
|
|
33
|
-
soprano_sdk-0.2.
|
|
34
|
-
soprano_sdk-0.2.
|
|
35
|
-
soprano_sdk-0.2.
|
|
30
|
+
soprano_sdk/validation/schema.py,sha256=eRGXXMDlJyaH0XYMOXY1azvJlJwOdbHFrHjB1yuGROg,15434
|
|
31
|
+
soprano_sdk/validation/validator.py,sha256=f-e2MMRL70asOIXr_0Fsd5CgGKVRiQp7AaYsHA45Km0,8792
|
|
32
|
+
soprano_sdk-0.2.17.dist-info/METADATA,sha256=SEWpX1K-uAsRJQg8lWXWy2MhRsDZHchSJh8UQ8uZtN8,11374
|
|
33
|
+
soprano_sdk-0.2.17.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
34
|
+
soprano_sdk-0.2.17.dist-info/licenses/LICENSE,sha256=A1aBauSjPNtVehOXJe3WuvdU2xvM9H8XmigFMm6665s,1073
|
|
35
|
+
soprano_sdk-0.2.17.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|