prela 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prela/__init__.py +394 -0
- prela/_version.py +3 -0
- prela/contrib/CLI.md +431 -0
- prela/contrib/README.md +118 -0
- prela/contrib/__init__.py +5 -0
- prela/contrib/cli.py +1063 -0
- prela/contrib/explorer.py +571 -0
- prela/core/__init__.py +64 -0
- prela/core/clock.py +98 -0
- prela/core/context.py +228 -0
- prela/core/replay.py +403 -0
- prela/core/sampler.py +178 -0
- prela/core/span.py +295 -0
- prela/core/tracer.py +498 -0
- prela/evals/__init__.py +94 -0
- prela/evals/assertions/README.md +484 -0
- prela/evals/assertions/__init__.py +78 -0
- prela/evals/assertions/base.py +90 -0
- prela/evals/assertions/multi_agent.py +625 -0
- prela/evals/assertions/semantic.py +223 -0
- prela/evals/assertions/structural.py +443 -0
- prela/evals/assertions/tool.py +380 -0
- prela/evals/case.py +370 -0
- prela/evals/n8n/__init__.py +69 -0
- prela/evals/n8n/assertions.py +450 -0
- prela/evals/n8n/runner.py +497 -0
- prela/evals/reporters/README.md +184 -0
- prela/evals/reporters/__init__.py +32 -0
- prela/evals/reporters/console.py +251 -0
- prela/evals/reporters/json.py +176 -0
- prela/evals/reporters/junit.py +278 -0
- prela/evals/runner.py +525 -0
- prela/evals/suite.py +316 -0
- prela/exporters/__init__.py +27 -0
- prela/exporters/base.py +189 -0
- prela/exporters/console.py +443 -0
- prela/exporters/file.py +322 -0
- prela/exporters/http.py +394 -0
- prela/exporters/multi.py +154 -0
- prela/exporters/otlp.py +388 -0
- prela/instrumentation/ANTHROPIC.md +297 -0
- prela/instrumentation/LANGCHAIN.md +480 -0
- prela/instrumentation/OPENAI.md +59 -0
- prela/instrumentation/__init__.py +49 -0
- prela/instrumentation/anthropic.py +1436 -0
- prela/instrumentation/auto.py +129 -0
- prela/instrumentation/base.py +436 -0
- prela/instrumentation/langchain.py +959 -0
- prela/instrumentation/llamaindex.py +719 -0
- prela/instrumentation/multi_agent/__init__.py +48 -0
- prela/instrumentation/multi_agent/autogen.py +357 -0
- prela/instrumentation/multi_agent/crewai.py +404 -0
- prela/instrumentation/multi_agent/langgraph.py +299 -0
- prela/instrumentation/multi_agent/models.py +203 -0
- prela/instrumentation/multi_agent/swarm.py +231 -0
- prela/instrumentation/n8n/__init__.py +68 -0
- prela/instrumentation/n8n/code_node.py +534 -0
- prela/instrumentation/n8n/models.py +336 -0
- prela/instrumentation/n8n/webhook.py +489 -0
- prela/instrumentation/openai.py +1198 -0
- prela/license.py +245 -0
- prela/replay/__init__.py +31 -0
- prela/replay/comparison.py +390 -0
- prela/replay/engine.py +1227 -0
- prela/replay/loader.py +231 -0
- prela/replay/result.py +196 -0
- prela-0.1.0.dist-info/METADATA +399 -0
- prela-0.1.0.dist-info/RECORD +71 -0
- prela-0.1.0.dist-info/WHEEL +4 -0
- prela-0.1.0.dist-info/entry_points.txt +2 -0
- prela-0.1.0.dist-info/licenses/LICENSE +190 -0
|
@@ -0,0 +1,450 @@
|
|
|
1
|
+
"""
|
|
2
|
+
n8n-specific assertions for workflow evaluation.
|
|
3
|
+
|
|
4
|
+
This module provides specialized assertions designed for testing n8n workflows,
|
|
5
|
+
including node completion checks, output validation, performance assertions,
|
|
6
|
+
and AI-specific validations.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import Any, Optional
|
|
12
|
+
|
|
13
|
+
from prela.core.span import Span
|
|
14
|
+
from prela.evals.assertions.base import AssertionResult, BaseAssertion
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class N8nNodeCompleted(BaseAssertion):
|
|
18
|
+
"""Assert that a specific node completed successfully.
|
|
19
|
+
|
|
20
|
+
Example:
|
|
21
|
+
>>> assertion = N8nNodeCompleted(node_name="Data Processor")
|
|
22
|
+
>>> result = assertion.evaluate(execution_result, None, None)
|
|
23
|
+
>>> assert result.passed
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, node_name: str):
|
|
27
|
+
"""Initialize node completion assertion.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
node_name: Name of the node to check
|
|
31
|
+
"""
|
|
32
|
+
self.node_name = node_name
|
|
33
|
+
|
|
34
|
+
def evaluate(
|
|
35
|
+
self, output: Any, expected: Any | None, trace: list[Span] | None
|
|
36
|
+
) -> AssertionResult:
|
|
37
|
+
"""Evaluate if the node completed successfully.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
output: Execution result from n8n
|
|
41
|
+
expected: Not used
|
|
42
|
+
trace: Not used
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
AssertionResult with pass/fail status
|
|
46
|
+
"""
|
|
47
|
+
execution_result = output if isinstance(output, dict) else {}
|
|
48
|
+
|
|
49
|
+
for node in execution_result.get("nodes", []):
|
|
50
|
+
if node.get("name") == self.node_name:
|
|
51
|
+
status = node.get("status")
|
|
52
|
+
passed = status == "success"
|
|
53
|
+
return AssertionResult(
|
|
54
|
+
passed=passed,
|
|
55
|
+
assertion_type="n8n_node_completed",
|
|
56
|
+
message=f"Node '{self.node_name}' {'completed successfully' if passed else f'failed with status: {status}'}",
|
|
57
|
+
expected="success",
|
|
58
|
+
actual=status,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
return AssertionResult(
|
|
62
|
+
passed=False,
|
|
63
|
+
assertion_type="n8n_node_completed",
|
|
64
|
+
message=f"Node '{self.node_name}' not found in execution",
|
|
65
|
+
expected="node present",
|
|
66
|
+
actual="node not found",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def from_config(cls, config: dict[str, Any]) -> N8nNodeCompleted:
|
|
71
|
+
"""Create assertion from configuration dict.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
config: Dictionary with 'node_name' key
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
N8nNodeCompleted instance
|
|
78
|
+
"""
|
|
79
|
+
return cls(node_name=config["node_name"])
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class N8nNodeOutput(BaseAssertion):
|
|
83
|
+
"""Assert on the output of a specific node using path notation.
|
|
84
|
+
|
|
85
|
+
Example:
|
|
86
|
+
>>> assertion = N8nNodeOutput(
|
|
87
|
+
... node_name="API Call",
|
|
88
|
+
... path="response.status",
|
|
89
|
+
... expected_value=200
|
|
90
|
+
... )
|
|
91
|
+
>>> result = assertion.evaluate(execution_result, None, None)
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(self, node_name: str, path: str, expected_value: Any):
|
|
95
|
+
"""Initialize node output assertion.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
node_name: Name of the node to check
|
|
99
|
+
path: Dot-separated path to value (e.g., "response.data.id")
|
|
100
|
+
expected_value: Expected value at the path
|
|
101
|
+
"""
|
|
102
|
+
self.node_name = node_name
|
|
103
|
+
self.path = path
|
|
104
|
+
self.expected_value = expected_value
|
|
105
|
+
|
|
106
|
+
def evaluate(
|
|
107
|
+
self, output: Any, expected: Any | None, trace: list[Span] | None
|
|
108
|
+
) -> AssertionResult:
|
|
109
|
+
"""Evaluate if node output at path matches expected value.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
output: Execution result from n8n
|
|
113
|
+
expected: Not used (expected_value from __init__ is used)
|
|
114
|
+
trace: Not used
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
AssertionResult with pass/fail status
|
|
118
|
+
"""
|
|
119
|
+
execution_result = output if isinstance(output, dict) else {}
|
|
120
|
+
|
|
121
|
+
node_data = self._get_node(execution_result)
|
|
122
|
+
if not node_data:
|
|
123
|
+
return AssertionResult(
|
|
124
|
+
passed=False,
|
|
125
|
+
assertion_type="n8n_node_output",
|
|
126
|
+
message=f"Node '{self.node_name}' not found",
|
|
127
|
+
expected=f"{self.path} = {self.expected_value}",
|
|
128
|
+
actual="node not found",
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
actual = self._get_path(node_data.get("output", {}), self.path)
|
|
132
|
+
passed = actual == self.expected_value
|
|
133
|
+
|
|
134
|
+
return AssertionResult(
|
|
135
|
+
passed=passed,
|
|
136
|
+
assertion_type="n8n_node_output",
|
|
137
|
+
message=f"Node '{self.node_name}' output at '{self.path}' {'matches' if passed else 'does not match'}",
|
|
138
|
+
expected=self.expected_value,
|
|
139
|
+
actual=actual,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
def _get_node(self, result: dict) -> Optional[dict]:
|
|
143
|
+
"""Find node by name in execution result."""
|
|
144
|
+
for node in result.get("nodes", []):
|
|
145
|
+
if node.get("name") == self.node_name:
|
|
146
|
+
return node
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
def _get_path(self, data: Any, path: str) -> Any:
|
|
150
|
+
"""Extract value from nested dict using dot notation."""
|
|
151
|
+
parts = path.split(".")
|
|
152
|
+
for part in parts:
|
|
153
|
+
if isinstance(data, dict):
|
|
154
|
+
data = data.get(part)
|
|
155
|
+
else:
|
|
156
|
+
return None
|
|
157
|
+
return data
|
|
158
|
+
|
|
159
|
+
@classmethod
|
|
160
|
+
def from_config(cls, config: dict[str, Any]) -> N8nNodeOutput:
|
|
161
|
+
"""Create assertion from configuration dict.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
config: Dictionary with 'node_name', 'path', and 'expected_value' keys
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
N8nNodeOutput instance
|
|
168
|
+
"""
|
|
169
|
+
return cls(
|
|
170
|
+
node_name=config["node_name"],
|
|
171
|
+
path=config["path"],
|
|
172
|
+
expected_value=config["expected_value"],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class N8nWorkflowDuration(BaseAssertion):
|
|
177
|
+
"""Assert workflow completed within time limit.
|
|
178
|
+
|
|
179
|
+
Example:
|
|
180
|
+
>>> assertion = N8nWorkflowDuration(max_seconds=5.0)
|
|
181
|
+
>>> result = assertion.evaluate(execution_result, None, None)
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
def __init__(self, max_seconds: float):
|
|
185
|
+
"""Initialize workflow duration assertion.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
max_seconds: Maximum allowed execution time in seconds
|
|
189
|
+
"""
|
|
190
|
+
self.max_seconds = max_seconds
|
|
191
|
+
self.max_ms = max_seconds * 1000
|
|
192
|
+
|
|
193
|
+
def evaluate(
|
|
194
|
+
self, output: Any, expected: Any | None, trace: list[Span] | None
|
|
195
|
+
) -> AssertionResult:
|
|
196
|
+
"""Evaluate if workflow duration is within limit.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
output: Execution result from n8n
|
|
200
|
+
expected: Not used
|
|
201
|
+
trace: Not used
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
AssertionResult with pass/fail status
|
|
205
|
+
"""
|
|
206
|
+
execution_result = output if isinstance(output, dict) else {}
|
|
207
|
+
|
|
208
|
+
duration_ms = execution_result.get("duration_ms", float("inf"))
|
|
209
|
+
passed = duration_ms <= self.max_ms
|
|
210
|
+
|
|
211
|
+
return AssertionResult(
|
|
212
|
+
passed=passed,
|
|
213
|
+
assertion_type="n8n_workflow_duration",
|
|
214
|
+
message=f"Workflow duration: {duration_ms:.1f}ms {'within' if passed else 'exceeds'} limit of {self.max_ms}ms",
|
|
215
|
+
expected=f"<= {self.max_ms}ms",
|
|
216
|
+
actual=f"{duration_ms:.1f}ms",
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def from_config(cls, config: dict[str, Any]) -> N8nWorkflowDuration:
|
|
221
|
+
"""Create assertion from configuration dict.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
config: Dictionary with 'max_seconds' key
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
N8nWorkflowDuration instance
|
|
228
|
+
"""
|
|
229
|
+
return cls(max_seconds=config["max_seconds"])
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class N8nAINodeTokens(BaseAssertion):
|
|
233
|
+
"""Assert AI node token usage is within budget.
|
|
234
|
+
|
|
235
|
+
Example:
|
|
236
|
+
>>> assertion = N8nAINodeTokens(node_name="GPT-4", max_tokens=1000)
|
|
237
|
+
>>> result = assertion.evaluate(execution_result, None, None)
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
def __init__(self, node_name: str, max_tokens: int):
|
|
241
|
+
"""Initialize AI node token assertion.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
node_name: Name of the AI node to check
|
|
245
|
+
max_tokens: Maximum allowed token count
|
|
246
|
+
"""
|
|
247
|
+
self.node_name = node_name
|
|
248
|
+
self.max_tokens = max_tokens
|
|
249
|
+
|
|
250
|
+
def evaluate(
|
|
251
|
+
self, output: Any, expected: Any | None, trace: list[Span] | None
|
|
252
|
+
) -> AssertionResult:
|
|
253
|
+
"""Evaluate if AI node token usage is within budget.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
output: Execution result from n8n
|
|
257
|
+
expected: Not used
|
|
258
|
+
trace: Not used
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
AssertionResult with pass/fail status
|
|
262
|
+
"""
|
|
263
|
+
execution_result = output if isinstance(output, dict) else {}
|
|
264
|
+
|
|
265
|
+
for node in execution_result.get("nodes", []):
|
|
266
|
+
if node.get("name") == self.node_name:
|
|
267
|
+
tokens = node.get("total_tokens", 0)
|
|
268
|
+
passed = tokens <= self.max_tokens
|
|
269
|
+
|
|
270
|
+
return AssertionResult(
|
|
271
|
+
passed=passed,
|
|
272
|
+
assertion_type="n8n_ai_node_tokens",
|
|
273
|
+
message=f"Node '{self.node_name}' used {tokens} tokens {'within' if passed else 'exceeds'} budget of {self.max_tokens}",
|
|
274
|
+
expected=f"<= {self.max_tokens}",
|
|
275
|
+
actual=str(tokens),
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
return AssertionResult(
|
|
279
|
+
passed=False,
|
|
280
|
+
assertion_type="n8n_ai_node_tokens",
|
|
281
|
+
message=f"AI node '{self.node_name}' not found in execution",
|
|
282
|
+
expected=f"node with <= {self.max_tokens} tokens",
|
|
283
|
+
actual="node not found",
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
@classmethod
|
|
287
|
+
def from_config(cls, config: dict[str, Any]) -> N8nAINodeTokens:
|
|
288
|
+
"""Create assertion from configuration dict.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
config: Dictionary with 'node_name' and 'max_tokens' keys
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
N8nAINodeTokens instance
|
|
295
|
+
"""
|
|
296
|
+
return cls(node_name=config["node_name"], max_tokens=config["max_tokens"])
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
class N8nWorkflowStatus(BaseAssertion):
|
|
300
|
+
"""Assert workflow completed with expected status.
|
|
301
|
+
|
|
302
|
+
Example:
|
|
303
|
+
>>> assertion = N8nWorkflowStatus(expected_status="success")
|
|
304
|
+
>>> result = assertion.evaluate(execution_result, None, None)
|
|
305
|
+
"""
|
|
306
|
+
|
|
307
|
+
def __init__(self, expected_status: str = "success"):
|
|
308
|
+
"""Initialize workflow status assertion.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
expected_status: Expected workflow status (default: "success")
|
|
312
|
+
"""
|
|
313
|
+
self.expected_status = expected_status
|
|
314
|
+
|
|
315
|
+
def evaluate(
|
|
316
|
+
self, output: Any, expected: Any | None, trace: list[Span] | None
|
|
317
|
+
) -> AssertionResult:
|
|
318
|
+
"""Evaluate if workflow status matches expected.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
output: Execution result from n8n
|
|
322
|
+
expected: Not used (expected_status from __init__ is used)
|
|
323
|
+
trace: Not used
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
AssertionResult with pass/fail status
|
|
327
|
+
"""
|
|
328
|
+
execution_result = output if isinstance(output, dict) else {}
|
|
329
|
+
|
|
330
|
+
actual_status = execution_result.get("status", "unknown")
|
|
331
|
+
passed = actual_status == self.expected_status
|
|
332
|
+
|
|
333
|
+
return AssertionResult(
|
|
334
|
+
passed=passed,
|
|
335
|
+
assertion_type="n8n_workflow_status",
|
|
336
|
+
message=f"Workflow status: {actual_status} ({'matches' if passed else 'does not match'} expected: {self.expected_status})",
|
|
337
|
+
expected=self.expected_status,
|
|
338
|
+
actual=actual_status,
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
@classmethod
|
|
342
|
+
def from_config(cls, config: dict[str, Any]) -> N8nWorkflowStatus:
|
|
343
|
+
"""Create assertion from configuration dict.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
config: Dictionary with 'expected_status' key
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
N8nWorkflowStatus instance
|
|
350
|
+
"""
|
|
351
|
+
return cls(expected_status=config.get("expected_status", "success"))
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
# Convenience factory functions
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def node_completed(node_name: str) -> N8nNodeCompleted:
|
|
358
|
+
"""Create assertion that node completed successfully.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
node_name: Name of the node to check
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
N8nNodeCompleted assertion
|
|
365
|
+
|
|
366
|
+
Example:
|
|
367
|
+
>>> from prela.evals.n8n.assertions import node_completed
|
|
368
|
+
>>> assertion = node_completed("Data Processor")
|
|
369
|
+
"""
|
|
370
|
+
return N8nNodeCompleted(node_name)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def node_output(node_name: str, path: str, expected_value: Any) -> N8nNodeOutput:
|
|
374
|
+
"""Create assertion for node output at path.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
node_name: Name of the node to check
|
|
378
|
+
path: Dot-separated path to value
|
|
379
|
+
expected_value: Expected value at the path
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
N8nNodeOutput assertion
|
|
383
|
+
|
|
384
|
+
Example:
|
|
385
|
+
>>> from prela.evals.n8n.assertions import node_output
|
|
386
|
+
>>> assertion = node_output("API Call", "response.status", 200)
|
|
387
|
+
"""
|
|
388
|
+
return N8nNodeOutput(node_name, path, expected_value)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def duration_under(seconds: float) -> N8nWorkflowDuration:
|
|
392
|
+
"""Create assertion for workflow duration limit.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
seconds: Maximum allowed duration in seconds
|
|
396
|
+
|
|
397
|
+
Returns:
|
|
398
|
+
N8nWorkflowDuration assertion
|
|
399
|
+
|
|
400
|
+
Example:
|
|
401
|
+
>>> from prela.evals.n8n.assertions import duration_under
|
|
402
|
+
>>> assertion = duration_under(5.0)
|
|
403
|
+
"""
|
|
404
|
+
return N8nWorkflowDuration(seconds)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def tokens_under(node_name: str, max_tokens: int) -> N8nAINodeTokens:
|
|
408
|
+
"""Create assertion for AI node token budget.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
node_name: Name of the AI node
|
|
412
|
+
max_tokens: Maximum allowed tokens
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
N8nAINodeTokens assertion
|
|
416
|
+
|
|
417
|
+
Example:
|
|
418
|
+
>>> from prela.evals.n8n.assertions import tokens_under
|
|
419
|
+
>>> assertion = tokens_under("GPT-4", 1000)
|
|
420
|
+
"""
|
|
421
|
+
return N8nAINodeTokens(node_name, max_tokens)
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def workflow_completed() -> N8nWorkflowStatus:
|
|
425
|
+
"""Create assertion that workflow completed successfully.
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
N8nWorkflowStatus assertion with expected_status="success"
|
|
429
|
+
|
|
430
|
+
Example:
|
|
431
|
+
>>> from prela.evals.n8n.assertions import workflow_completed
|
|
432
|
+
>>> assertion = workflow_completed()
|
|
433
|
+
"""
|
|
434
|
+
return N8nWorkflowStatus(expected_status="success")
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def workflow_status(expected_status: str) -> N8nWorkflowStatus:
|
|
438
|
+
"""Create assertion for specific workflow status.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
expected_status: Expected workflow status
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
N8nWorkflowStatus assertion
|
|
445
|
+
|
|
446
|
+
Example:
|
|
447
|
+
>>> from prela.evals.n8n.assertions import workflow_status
|
|
448
|
+
>>> assertion = workflow_status("error")
|
|
449
|
+
"""
|
|
450
|
+
return N8nWorkflowStatus(expected_status=expected_status)
|