quraite 0.0.1__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quraite/__init__.py +3 -3
- quraite/adapters/__init__.py +134 -134
- quraite/adapters/agno_adapter.py +159 -159
- quraite/adapters/base.py +123 -123
- quraite/adapters/bedrock_agents_adapter.py +343 -343
- quraite/adapters/flowise_adapter.py +275 -275
- quraite/adapters/google_adk_adapter.py +209 -209
- quraite/adapters/http_adapter.py +239 -239
- quraite/adapters/langflow_adapter.py +192 -192
- quraite/adapters/langgraph_adapter.py +304 -304
- quraite/adapters/langgraph_server_adapter.py +252 -252
- quraite/adapters/n8n_adapter.py +220 -220
- quraite/adapters/openai_agents_adapter.py +269 -269
- quraite/adapters/pydantic_ai_adapter.py +312 -312
- quraite/adapters/smolagents_adapter.py +152 -152
- quraite/logger.py +61 -62
- quraite/schema/message.py +91 -54
- quraite/schema/response.py +16 -16
- quraite/serve/__init__.py +1 -1
- quraite/serve/cloudflared.py +210 -210
- quraite/serve/local_agent.py +360 -360
- quraite/tracing/__init__.py +24 -24
- quraite/tracing/constants.py +16 -16
- quraite/tracing/span_exporter.py +115 -115
- quraite/tracing/span_processor.py +49 -49
- quraite/tracing/tool_extractors.py +290 -290
- quraite/tracing/trace.py +564 -494
- quraite/tracing/types.py +179 -179
- quraite/tracing/utils.py +170 -170
- quraite/utils/json_utils.py +269 -269
- {quraite-0.0.1.dist-info → quraite-0.1.0.dist-info}/METADATA +9 -9
- quraite-0.1.0.dist-info/RECORD +35 -0
- {quraite-0.0.1.dist-info → quraite-0.1.0.dist-info}/WHEEL +1 -1
- quraite/traces/traces_adk_openinference.json +0 -379
- quraite/traces/traces_agno_multi_agent.json +0 -669
- quraite/traces/traces_agno_openinference.json +0 -321
- quraite/traces/traces_crewai_openinference.json +0 -155
- quraite/traces/traces_langgraph_openinference.json +0 -349
- quraite/traces/traces_langgraph_openinference_multi_agent.json +0 -2705
- quraite/traces/traces_langgraph_traceloop.json +0 -510
- quraite/traces/traces_openai_agents_multi_agent_1.json +0 -402
- quraite/traces/traces_openai_agents_openinference.json +0 -341
- quraite/traces/traces_pydantic_openinference.json +0 -286
- quraite/traces/traces_pydantic_openinference_multi_agent_1.json +0 -399
- quraite/traces/traces_pydantic_openinference_multi_agent_2.json +0 -398
- quraite/traces/traces_smol_agents_openinference.json +0 -397
- quraite/traces/traces_smol_agents_tool_calling_openinference.json +0 -704
- quraite-0.0.1.dist-info/RECORD +0 -49
quraite/adapters/n8n_adapter.py
CHANGED
|
@@ -1,220 +1,220 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
|
-
import uuid
|
|
4
|
-
from typing import Any, Dict, List, Optional, Union
|
|
5
|
-
|
|
6
|
-
import aiohttp
|
|
7
|
-
|
|
8
|
-
from quraite.adapters.base import BaseAdapter
|
|
9
|
-
from quraite.logger import get_logger
|
|
10
|
-
from quraite.schema.message import (
|
|
11
|
-
AgentMessage,
|
|
12
|
-
AssistantMessage,
|
|
13
|
-
MessageContentText,
|
|
14
|
-
ToolCall,
|
|
15
|
-
ToolMessage,
|
|
16
|
-
)
|
|
17
|
-
from quraite.schema.response import AgentInvocationResponse
|
|
18
|
-
|
|
19
|
-
logger = get_logger(__name__)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class N8nAdapter(BaseAdapter):
|
|
23
|
-
def __init__(
|
|
24
|
-
self, api_url: str, headers: Optional[Dict[str, str]] = None, timeout: int = 60
|
|
25
|
-
):
|
|
26
|
-
self.api_url = api_url
|
|
27
|
-
self.headers = headers or {}
|
|
28
|
-
self.timeout = timeout
|
|
29
|
-
|
|
30
|
-
if "Content-Type" not in self.headers:
|
|
31
|
-
self.headers["Content-Type"] = "application/json"
|
|
32
|
-
logger.info(
|
|
33
|
-
"N8nAdapter initialized (api_url=%s, timeout=%s)", self.api_url, timeout
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
def _convert_api_output_to_messages(
|
|
37
|
-
self,
|
|
38
|
-
response: Dict[str, Any],
|
|
39
|
-
) -> List[AgentMessage]:
|
|
40
|
-
logger.debug(
|
|
41
|
-
"Converting n8n response (steps=%d)",
|
|
42
|
-
len(response[0].get("intermediateSteps", [])),
|
|
43
|
-
)
|
|
44
|
-
messages: List[AgentMessage] = []
|
|
45
|
-
output = response[0]["output"]
|
|
46
|
-
intermediateSteps = response[0]["intermediateSteps"]
|
|
47
|
-
|
|
48
|
-
if not intermediateSteps:
|
|
49
|
-
return [
|
|
50
|
-
AssistantMessage(
|
|
51
|
-
content=[MessageContentText(type="text", text=output)],
|
|
52
|
-
)
|
|
53
|
-
]
|
|
54
|
-
|
|
55
|
-
def flush_messages(tool_calls_dict: Dict[str, Any]):
|
|
56
|
-
nonlocal messages
|
|
57
|
-
tool_calls_list: List[ToolCall] = []
|
|
58
|
-
tool_results: List[ToolMessage] = []
|
|
59
|
-
|
|
60
|
-
for tool_call_id, tool_call_dict in tool_calls_dict.items():
|
|
61
|
-
tool_name = tool_call_dict.get("name", "")
|
|
62
|
-
tool_args = tool_call_dict.get("arguments", {})
|
|
63
|
-
if not isinstance(tool_args, dict):
|
|
64
|
-
tool_args = {}
|
|
65
|
-
|
|
66
|
-
tool_calls_list.append(
|
|
67
|
-
ToolCall(
|
|
68
|
-
id=tool_call_id,
|
|
69
|
-
name=tool_name,
|
|
70
|
-
arguments=tool_args,
|
|
71
|
-
)
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
tool_result = tool_call_dict.get("result", "")
|
|
75
|
-
tool_results.append(
|
|
76
|
-
ToolMessage(
|
|
77
|
-
tool_name=tool_name,
|
|
78
|
-
tool_call_id=tool_call_id,
|
|
79
|
-
content=[
|
|
80
|
-
MessageContentText(type="text", text=str(tool_result))
|
|
81
|
-
],
|
|
82
|
-
)
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
if tool_calls_list:
|
|
86
|
-
messages.append(AssistantMessage(tool_calls=tool_calls_list))
|
|
87
|
-
|
|
88
|
-
messages.extend(tool_results)
|
|
89
|
-
|
|
90
|
-
current_step_tool_calls_dict: Dict[str, Any] = {}
|
|
91
|
-
for step in intermediateSteps:
|
|
92
|
-
message_log = step.get("action", {}).get("messageLog", {})
|
|
93
|
-
if message_log:
|
|
94
|
-
tool_calls = message_log[0].get("kwargs", {}).get("tool_calls", [])
|
|
95
|
-
|
|
96
|
-
if tool_calls:
|
|
97
|
-
# this condition means that we are at the start of a new step,
|
|
98
|
-
# so we need to flush the previous step's tool calls and tool results
|
|
99
|
-
if current_step_tool_calls_dict:
|
|
100
|
-
flush_messages(current_step_tool_calls_dict)
|
|
101
|
-
current_step_tool_calls_dict = {}
|
|
102
|
-
|
|
103
|
-
for tool_call in tool_calls:
|
|
104
|
-
current_step_tool_calls_dict[tool_call.get("id")] = {
|
|
105
|
-
"name": tool_call.get("name"),
|
|
106
|
-
"arguments": tool_call.get("args"),
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
tool_id = step.get("action", {}).get("toolCallId")
|
|
110
|
-
if tool_id not in current_step_tool_calls_dict:
|
|
111
|
-
continue
|
|
112
|
-
|
|
113
|
-
current_step_tool_calls_dict[tool_id]["result"] = step.get("observation")
|
|
114
|
-
|
|
115
|
-
# flush the last step's tool calls and tool results
|
|
116
|
-
flush_messages(current_step_tool_calls_dict)
|
|
117
|
-
messages.append(
|
|
118
|
-
AssistantMessage(
|
|
119
|
-
content=[MessageContentText(type="text", text=output)],
|
|
120
|
-
)
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
logger.info(
|
|
124
|
-
"n8n conversion produced %d messages (final_output_length=%d)",
|
|
125
|
-
len(messages),
|
|
126
|
-
len(str(output)),
|
|
127
|
-
)
|
|
128
|
-
return messages
|
|
129
|
-
|
|
130
|
-
def _prepare_input(self, input: List[AgentMessage]) -> str:
|
|
131
|
-
logger.debug("Preparing n8n input from %d messages", len(input))
|
|
132
|
-
if not input or input[-1].role != "user":
|
|
133
|
-
logger.error("n8n input missing user message")
|
|
134
|
-
raise ValueError("No user message found in the input")
|
|
135
|
-
|
|
136
|
-
last_user_message = input[-1]
|
|
137
|
-
if not last_user_message.content:
|
|
138
|
-
logger.error("n8n user message missing content")
|
|
139
|
-
raise ValueError("User message has no content")
|
|
140
|
-
|
|
141
|
-
text_content = None
|
|
142
|
-
for content_item in last_user_message.content:
|
|
143
|
-
if content_item.type == "text" and content_item.text:
|
|
144
|
-
text_content = content_item.text
|
|
145
|
-
break
|
|
146
|
-
|
|
147
|
-
if not text_content:
|
|
148
|
-
logger.error("n8n user message missing text content")
|
|
149
|
-
raise ValueError("No text content found in user message")
|
|
150
|
-
|
|
151
|
-
logger.debug("Prepared n8n input (text_length=%d)", len(text_content))
|
|
152
|
-
return text_content
|
|
153
|
-
|
|
154
|
-
async def _aapi_call(
|
|
155
|
-
self,
|
|
156
|
-
query: str,
|
|
157
|
-
sessionId: str,
|
|
158
|
-
) -> Dict[str, Any]:
|
|
159
|
-
payload = {
|
|
160
|
-
"query": query,
|
|
161
|
-
"sessionId": sessionId,
|
|
162
|
-
}
|
|
163
|
-
logger.debug(
|
|
164
|
-
"Calling n8n API (sessionId=%s, query_length=%d)", sessionId, len(query)
|
|
165
|
-
)
|
|
166
|
-
async with aiohttp.ClientSession() as session:
|
|
167
|
-
try:
|
|
168
|
-
async with session.post(
|
|
169
|
-
self.api_url,
|
|
170
|
-
headers=self.headers,
|
|
171
|
-
json=payload,
|
|
172
|
-
timeout=aiohttp.ClientTimeout(total=self.timeout),
|
|
173
|
-
) as response:
|
|
174
|
-
response.raise_for_status()
|
|
175
|
-
logger.info("n8n API call succeeded (status=%s)", response.status)
|
|
176
|
-
return await response.json()
|
|
177
|
-
|
|
178
|
-
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
|
179
|
-
logger.exception("n8n API request failed")
|
|
180
|
-
raise aiohttp.ClientError(f"Async API request failed: {str(e)}") from e
|
|
181
|
-
|
|
182
|
-
except json.JSONDecodeError as e:
|
|
183
|
-
logger.exception("n8n API response decoding failed")
|
|
184
|
-
raise ValueError(f"Failed to decode JSON response: {e}") from e
|
|
185
|
-
|
|
186
|
-
async def ainvoke(
|
|
187
|
-
self,
|
|
188
|
-
input: List[AgentMessage],
|
|
189
|
-
session_id: Union[str, None],
|
|
190
|
-
) -> AgentInvocationResponse:
|
|
191
|
-
logger.info(
|
|
192
|
-
"n8n ainvoke called (session_id=%s, input_messages=%d)",
|
|
193
|
-
session_id,
|
|
194
|
-
len(input),
|
|
195
|
-
)
|
|
196
|
-
agent_input = self._prepare_input(input)
|
|
197
|
-
|
|
198
|
-
try:
|
|
199
|
-
agent_output = await self._aapi_call(
|
|
200
|
-
query=agent_input,
|
|
201
|
-
sessionId=session_id if session_id else uuid.uuid4(),
|
|
202
|
-
)
|
|
203
|
-
logger.debug(
|
|
204
|
-
"n8n API returned payload keys: %s", list(agent_output[0].keys())
|
|
205
|
-
)
|
|
206
|
-
except Exception as e:
|
|
207
|
-
logger.exception("Error calling n8n endpoint")
|
|
208
|
-
raise RuntimeError(f"Error calling n8n endpoint: {e}") from e
|
|
209
|
-
|
|
210
|
-
try:
|
|
211
|
-
agent_trajectory = self._convert_api_output_to_messages(agent_output)
|
|
212
|
-
logger.info(
|
|
213
|
-
"n8n conversion produced %d trajectory messages", len(agent_trajectory)
|
|
214
|
-
)
|
|
215
|
-
return AgentInvocationResponse(
|
|
216
|
-
agent_trajectory=agent_trajectory,
|
|
217
|
-
)
|
|
218
|
-
except Exception as e:
|
|
219
|
-
logger.exception("Error processing n8n response")
|
|
220
|
-
raise RuntimeError(f"Error processing n8n response: {e}") from e
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import Any, Dict, List, Optional, Union
|
|
5
|
+
|
|
6
|
+
import aiohttp
|
|
7
|
+
|
|
8
|
+
from quraite.adapters.base import BaseAdapter
|
|
9
|
+
from quraite.logger import get_logger
|
|
10
|
+
from quraite.schema.message import (
|
|
11
|
+
AgentMessage,
|
|
12
|
+
AssistantMessage,
|
|
13
|
+
MessageContentText,
|
|
14
|
+
ToolCall,
|
|
15
|
+
ToolMessage,
|
|
16
|
+
)
|
|
17
|
+
from quraite.schema.response import AgentInvocationResponse
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class N8nAdapter(BaseAdapter):
|
|
23
|
+
def __init__(
|
|
24
|
+
self, api_url: str, headers: Optional[Dict[str, str]] = None, timeout: int = 60
|
|
25
|
+
):
|
|
26
|
+
self.api_url = api_url
|
|
27
|
+
self.headers = headers or {}
|
|
28
|
+
self.timeout = timeout
|
|
29
|
+
|
|
30
|
+
if "Content-Type" not in self.headers:
|
|
31
|
+
self.headers["Content-Type"] = "application/json"
|
|
32
|
+
logger.info(
|
|
33
|
+
"N8nAdapter initialized (api_url=%s, timeout=%s)", self.api_url, timeout
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def _convert_api_output_to_messages(
|
|
37
|
+
self,
|
|
38
|
+
response: Dict[str, Any],
|
|
39
|
+
) -> List[AgentMessage]:
|
|
40
|
+
logger.debug(
|
|
41
|
+
"Converting n8n response (steps=%d)",
|
|
42
|
+
len(response[0].get("intermediateSteps", [])),
|
|
43
|
+
)
|
|
44
|
+
messages: List[AgentMessage] = []
|
|
45
|
+
output = response[0]["output"]
|
|
46
|
+
intermediateSteps = response[0]["intermediateSteps"]
|
|
47
|
+
|
|
48
|
+
if not intermediateSteps:
|
|
49
|
+
return [
|
|
50
|
+
AssistantMessage(
|
|
51
|
+
content=[MessageContentText(type="text", text=output)],
|
|
52
|
+
)
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
def flush_messages(tool_calls_dict: Dict[str, Any]):
|
|
56
|
+
nonlocal messages
|
|
57
|
+
tool_calls_list: List[ToolCall] = []
|
|
58
|
+
tool_results: List[ToolMessage] = []
|
|
59
|
+
|
|
60
|
+
for tool_call_id, tool_call_dict in tool_calls_dict.items():
|
|
61
|
+
tool_name = tool_call_dict.get("name", "")
|
|
62
|
+
tool_args = tool_call_dict.get("arguments", {})
|
|
63
|
+
if not isinstance(tool_args, dict):
|
|
64
|
+
tool_args = {}
|
|
65
|
+
|
|
66
|
+
tool_calls_list.append(
|
|
67
|
+
ToolCall(
|
|
68
|
+
id=tool_call_id,
|
|
69
|
+
name=tool_name,
|
|
70
|
+
arguments=tool_args,
|
|
71
|
+
)
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
tool_result = tool_call_dict.get("result", "")
|
|
75
|
+
tool_results.append(
|
|
76
|
+
ToolMessage(
|
|
77
|
+
tool_name=tool_name,
|
|
78
|
+
tool_call_id=tool_call_id,
|
|
79
|
+
content=[
|
|
80
|
+
MessageContentText(type="text", text=str(tool_result))
|
|
81
|
+
],
|
|
82
|
+
)
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if tool_calls_list:
|
|
86
|
+
messages.append(AssistantMessage(tool_calls=tool_calls_list))
|
|
87
|
+
|
|
88
|
+
messages.extend(tool_results)
|
|
89
|
+
|
|
90
|
+
current_step_tool_calls_dict: Dict[str, Any] = {}
|
|
91
|
+
for step in intermediateSteps:
|
|
92
|
+
message_log = step.get("action", {}).get("messageLog", {})
|
|
93
|
+
if message_log:
|
|
94
|
+
tool_calls = message_log[0].get("kwargs", {}).get("tool_calls", [])
|
|
95
|
+
|
|
96
|
+
if tool_calls:
|
|
97
|
+
# this condition means that we are at the start of a new step,
|
|
98
|
+
# so we need to flush the previous step's tool calls and tool results
|
|
99
|
+
if current_step_tool_calls_dict:
|
|
100
|
+
flush_messages(current_step_tool_calls_dict)
|
|
101
|
+
current_step_tool_calls_dict = {}
|
|
102
|
+
|
|
103
|
+
for tool_call in tool_calls:
|
|
104
|
+
current_step_tool_calls_dict[tool_call.get("id")] = {
|
|
105
|
+
"name": tool_call.get("name"),
|
|
106
|
+
"arguments": tool_call.get("args"),
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
tool_id = step.get("action", {}).get("toolCallId")
|
|
110
|
+
if tool_id not in current_step_tool_calls_dict:
|
|
111
|
+
continue
|
|
112
|
+
|
|
113
|
+
current_step_tool_calls_dict[tool_id]["result"] = step.get("observation")
|
|
114
|
+
|
|
115
|
+
# flush the last step's tool calls and tool results
|
|
116
|
+
flush_messages(current_step_tool_calls_dict)
|
|
117
|
+
messages.append(
|
|
118
|
+
AssistantMessage(
|
|
119
|
+
content=[MessageContentText(type="text", text=output)],
|
|
120
|
+
)
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
logger.info(
|
|
124
|
+
"n8n conversion produced %d messages (final_output_length=%d)",
|
|
125
|
+
len(messages),
|
|
126
|
+
len(str(output)),
|
|
127
|
+
)
|
|
128
|
+
return messages
|
|
129
|
+
|
|
130
|
+
def _prepare_input(self, input: List[AgentMessage]) -> str:
|
|
131
|
+
logger.debug("Preparing n8n input from %d messages", len(input))
|
|
132
|
+
if not input or input[-1].role != "user":
|
|
133
|
+
logger.error("n8n input missing user message")
|
|
134
|
+
raise ValueError("No user message found in the input")
|
|
135
|
+
|
|
136
|
+
last_user_message = input[-1]
|
|
137
|
+
if not last_user_message.content:
|
|
138
|
+
logger.error("n8n user message missing content")
|
|
139
|
+
raise ValueError("User message has no content")
|
|
140
|
+
|
|
141
|
+
text_content = None
|
|
142
|
+
for content_item in last_user_message.content:
|
|
143
|
+
if content_item.type == "text" and content_item.text:
|
|
144
|
+
text_content = content_item.text
|
|
145
|
+
break
|
|
146
|
+
|
|
147
|
+
if not text_content:
|
|
148
|
+
logger.error("n8n user message missing text content")
|
|
149
|
+
raise ValueError("No text content found in user message")
|
|
150
|
+
|
|
151
|
+
logger.debug("Prepared n8n input (text_length=%d)", len(text_content))
|
|
152
|
+
return text_content
|
|
153
|
+
|
|
154
|
+
async def _aapi_call(
|
|
155
|
+
self,
|
|
156
|
+
query: str,
|
|
157
|
+
sessionId: str,
|
|
158
|
+
) -> Dict[str, Any]:
|
|
159
|
+
payload = {
|
|
160
|
+
"query": query,
|
|
161
|
+
"sessionId": sessionId,
|
|
162
|
+
}
|
|
163
|
+
logger.debug(
|
|
164
|
+
"Calling n8n API (sessionId=%s, query_length=%d)", sessionId, len(query)
|
|
165
|
+
)
|
|
166
|
+
async with aiohttp.ClientSession() as session:
|
|
167
|
+
try:
|
|
168
|
+
async with session.post(
|
|
169
|
+
self.api_url,
|
|
170
|
+
headers=self.headers,
|
|
171
|
+
json=payload,
|
|
172
|
+
timeout=aiohttp.ClientTimeout(total=self.timeout),
|
|
173
|
+
) as response:
|
|
174
|
+
response.raise_for_status()
|
|
175
|
+
logger.info("n8n API call succeeded (status=%s)", response.status)
|
|
176
|
+
return await response.json()
|
|
177
|
+
|
|
178
|
+
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
|
179
|
+
logger.exception("n8n API request failed")
|
|
180
|
+
raise aiohttp.ClientError(f"Async API request failed: {str(e)}") from e
|
|
181
|
+
|
|
182
|
+
except json.JSONDecodeError as e:
|
|
183
|
+
logger.exception("n8n API response decoding failed")
|
|
184
|
+
raise ValueError(f"Failed to decode JSON response: {e}") from e
|
|
185
|
+
|
|
186
|
+
async def ainvoke(
|
|
187
|
+
self,
|
|
188
|
+
input: List[AgentMessage],
|
|
189
|
+
session_id: Union[str, None],
|
|
190
|
+
) -> AgentInvocationResponse:
|
|
191
|
+
logger.info(
|
|
192
|
+
"n8n ainvoke called (session_id=%s, input_messages=%d)",
|
|
193
|
+
session_id,
|
|
194
|
+
len(input),
|
|
195
|
+
)
|
|
196
|
+
agent_input = self._prepare_input(input)
|
|
197
|
+
|
|
198
|
+
try:
|
|
199
|
+
agent_output = await self._aapi_call(
|
|
200
|
+
query=agent_input,
|
|
201
|
+
sessionId=session_id if session_id else uuid.uuid4(),
|
|
202
|
+
)
|
|
203
|
+
logger.debug(
|
|
204
|
+
"n8n API returned payload keys: %s", list(agent_output[0].keys())
|
|
205
|
+
)
|
|
206
|
+
except Exception as e:
|
|
207
|
+
logger.exception("Error calling n8n endpoint")
|
|
208
|
+
raise RuntimeError(f"Error calling n8n endpoint: {e}") from e
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
agent_trajectory = self._convert_api_output_to_messages(agent_output)
|
|
212
|
+
logger.info(
|
|
213
|
+
"n8n conversion produced %d trajectory messages", len(agent_trajectory)
|
|
214
|
+
)
|
|
215
|
+
return AgentInvocationResponse(
|
|
216
|
+
agent_trajectory=agent_trajectory,
|
|
217
|
+
)
|
|
218
|
+
except Exception as e:
|
|
219
|
+
logger.exception("Error processing n8n response")
|
|
220
|
+
raise RuntimeError(f"Error processing n8n response: {e}") from e
|