prela 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. prela/__init__.py +394 -0
  2. prela/_version.py +3 -0
  3. prela/contrib/CLI.md +431 -0
  4. prela/contrib/README.md +118 -0
  5. prela/contrib/__init__.py +5 -0
  6. prela/contrib/cli.py +1063 -0
  7. prela/contrib/explorer.py +571 -0
  8. prela/core/__init__.py +64 -0
  9. prela/core/clock.py +98 -0
  10. prela/core/context.py +228 -0
  11. prela/core/replay.py +403 -0
  12. prela/core/sampler.py +178 -0
  13. prela/core/span.py +295 -0
  14. prela/core/tracer.py +498 -0
  15. prela/evals/__init__.py +94 -0
  16. prela/evals/assertions/README.md +484 -0
  17. prela/evals/assertions/__init__.py +78 -0
  18. prela/evals/assertions/base.py +90 -0
  19. prela/evals/assertions/multi_agent.py +625 -0
  20. prela/evals/assertions/semantic.py +223 -0
  21. prela/evals/assertions/structural.py +443 -0
  22. prela/evals/assertions/tool.py +380 -0
  23. prela/evals/case.py +370 -0
  24. prela/evals/n8n/__init__.py +69 -0
  25. prela/evals/n8n/assertions.py +450 -0
  26. prela/evals/n8n/runner.py +497 -0
  27. prela/evals/reporters/README.md +184 -0
  28. prela/evals/reporters/__init__.py +32 -0
  29. prela/evals/reporters/console.py +251 -0
  30. prela/evals/reporters/json.py +176 -0
  31. prela/evals/reporters/junit.py +278 -0
  32. prela/evals/runner.py +525 -0
  33. prela/evals/suite.py +316 -0
  34. prela/exporters/__init__.py +27 -0
  35. prela/exporters/base.py +189 -0
  36. prela/exporters/console.py +443 -0
  37. prela/exporters/file.py +322 -0
  38. prela/exporters/http.py +394 -0
  39. prela/exporters/multi.py +154 -0
  40. prela/exporters/otlp.py +388 -0
  41. prela/instrumentation/ANTHROPIC.md +297 -0
  42. prela/instrumentation/LANGCHAIN.md +480 -0
  43. prela/instrumentation/OPENAI.md +59 -0
  44. prela/instrumentation/__init__.py +49 -0
  45. prela/instrumentation/anthropic.py +1436 -0
  46. prela/instrumentation/auto.py +129 -0
  47. prela/instrumentation/base.py +436 -0
  48. prela/instrumentation/langchain.py +959 -0
  49. prela/instrumentation/llamaindex.py +719 -0
  50. prela/instrumentation/multi_agent/__init__.py +48 -0
  51. prela/instrumentation/multi_agent/autogen.py +357 -0
  52. prela/instrumentation/multi_agent/crewai.py +404 -0
  53. prela/instrumentation/multi_agent/langgraph.py +299 -0
  54. prela/instrumentation/multi_agent/models.py +203 -0
  55. prela/instrumentation/multi_agent/swarm.py +231 -0
  56. prela/instrumentation/n8n/__init__.py +68 -0
  57. prela/instrumentation/n8n/code_node.py +534 -0
  58. prela/instrumentation/n8n/models.py +336 -0
  59. prela/instrumentation/n8n/webhook.py +489 -0
  60. prela/instrumentation/openai.py +1198 -0
  61. prela/license.py +245 -0
  62. prela/replay/__init__.py +31 -0
  63. prela/replay/comparison.py +390 -0
  64. prela/replay/engine.py +1227 -0
  65. prela/replay/loader.py +231 -0
  66. prela/replay/result.py +196 -0
  67. prela-0.1.0.dist-info/METADATA +399 -0
  68. prela-0.1.0.dist-info/RECORD +71 -0
  69. prela-0.1.0.dist-info/WHEEL +4 -0
  70. prela-0.1.0.dist-info/entry_points.txt +2 -0
  71. prela-0.1.0.dist-info/licenses/LICENSE +190 -0
@@ -0,0 +1,489 @@
1
+ """
2
+ n8n webhook handler for receiving workflow execution traces via HTTP.
3
+
4
+ This module handles traces sent from n8n workflows via HTTP webhook nodes,
5
+ parsing the payload and converting it into Prela spans.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ from datetime import datetime
12
+ from typing import Any, Optional
13
+
14
+ from pydantic import BaseModel, Field
15
+
16
+ from prela.core.clock import now
17
+ from prela.core.span import Span, SpanStatus, SpanType
18
+ from prela.core.tracer import Tracer
19
+ from prela.instrumentation.n8n.models import (
20
+ N8nAINodeExecution,
21
+ N8nNodeExecution,
22
+ N8nSpanType,
23
+ N8nWorkflowExecution,
24
+ )
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ # Mapping of n8n node types to AI categories
30
+ N8N_AI_NODE_TYPES = {
31
+ # LangChain Agent nodes
32
+ "n8n-nodes-langchain.agent": "ai_agent",
33
+ "n8n-nodes-langchain.agentExecutor": "ai_agent",
34
+ # LangChain Chain nodes
35
+ "n8n-nodes-langchain.chainLlm": "llm_chain",
36
+ "n8n-nodes-langchain.chainSummarization": "llm_chain",
37
+ "n8n-nodes-langchain.chainRetrievalQa": "llm_chain",
38
+ # LLM Chat nodes (OpenAI)
39
+ "@n8n/n8n-nodes-langchain.lmChatOpenAi": "llm",
40
+ "n8n-nodes-langchain.lmChatOpenAi": "llm",
41
+ # LLM Chat nodes (Anthropic)
42
+ "@n8n/n8n-nodes-langchain.lmChatAnthropic": "llm",
43
+ "n8n-nodes-langchain.lmChatAnthropic": "llm",
44
+ # LLM Chat nodes (Ollama)
45
+ "@n8n/n8n-nodes-langchain.lmChatOllama": "llm",
46
+ "n8n-nodes-langchain.lmChatOllama": "llm",
47
+ # LLM Chat nodes (Other providers)
48
+ "n8n-nodes-langchain.lmChatAzureOpenAi": "llm",
49
+ "n8n-nodes-langchain.lmChatMistralCloud": "llm",
50
+ "n8n-nodes-langchain.lmChatGoogleVertex": "llm",
51
+ # Vector Store nodes
52
+ "n8n-nodes-langchain.vectorStoreQdrant": "retrieval",
53
+ "n8n-nodes-langchain.vectorStorePinecone": "retrieval",
54
+ "n8n-nodes-langchain.vectorStoreSupabase": "retrieval",
55
+ "n8n-nodes-langchain.vectorStoreInMemory": "retrieval",
56
+ # Memory nodes
57
+ "n8n-nodes-langchain.memoryBufferWindow": "memory",
58
+ "n8n-nodes-langchain.memoryBuffer": "memory",
59
+ "n8n-nodes-langchain.memoryChatSummary": "memory",
60
+ # Tool nodes
61
+ "n8n-nodes-langchain.toolCalculator": "tool",
62
+ "n8n-nodes-langchain.toolCode": "tool",
63
+ "n8n-nodes-langchain.toolHttpRequest": "tool",
64
+ "n8n-nodes-langchain.toolWorkflow": "tool",
65
+ }
66
+
67
+
68
+ class N8nWebhookPayload(BaseModel):
69
+ """
70
+ Represents the payload received from an n8n webhook.
71
+
72
+ n8n webhooks send execution data in a specific format with workflow,
73
+ execution, and node metadata, along with the actual data items.
74
+ """
75
+
76
+ workflow: dict = Field(..., description="Workflow metadata (id, name, active)")
77
+ execution: dict = Field(
78
+ ..., description="Execution metadata (id, mode, startedAt)"
79
+ )
80
+ node: dict = Field(..., description="Node metadata (name, type, parameters)")
81
+ data: list[dict] = Field(
82
+ default_factory=list,
83
+ description="n8n item array (list of {json: {...}} objects)",
84
+ )
85
+ metadata: Optional[dict] = Field(
86
+ None, description="Additional metadata from n8n"
87
+ )
88
+
89
+
90
+ def is_ai_node(node_type: str) -> bool:
91
+ """Check if a node type is AI-related."""
92
+ return node_type in N8N_AI_NODE_TYPES
93
+
94
+
95
+ def get_ai_node_category(node_type: str) -> Optional[str]:
96
+ """Get the AI category for a node type."""
97
+ return N8N_AI_NODE_TYPES.get(node_type)
98
+
99
+
100
+ def map_n8n_span_type_to_prela(n8n_type: N8nSpanType) -> SpanType:
101
+ """Map n8n span types to Prela SpanType enum."""
102
+ mapping = {
103
+ N8nSpanType.WORKFLOW: SpanType.AGENT,
104
+ N8nSpanType.NODE: SpanType.CUSTOM,
105
+ N8nSpanType.AI_AGENT: SpanType.AGENT,
106
+ N8nSpanType.LLM: SpanType.LLM,
107
+ N8nSpanType.TOOL: SpanType.TOOL,
108
+ N8nSpanType.RETRIEVAL: SpanType.RETRIEVAL,
109
+ N8nSpanType.MEMORY: SpanType.CUSTOM,
110
+ }
111
+ return mapping.get(n8n_type, SpanType.CUSTOM)
112
+
113
+
114
+ def extract_ai_attributes(
115
+ node_type: str, node_params: dict, items: list[dict]
116
+ ) -> dict[str, Any]:
117
+ """
118
+ Extract AI-specific attributes from node parameters and output items.
119
+
120
+ Args:
121
+ node_type: The n8n node type
122
+ node_params: Node parameters/configuration
123
+ items: Output items from the node
124
+
125
+ Returns:
126
+ Dictionary of AI-specific attributes (model, tokens, prompts, etc.)
127
+ """
128
+ attrs: dict[str, Any] = {}
129
+
130
+ try:
131
+ # Extract model information
132
+ if "model" in node_params:
133
+ attrs["model"] = node_params["model"]
134
+ elif "modelName" in node_params:
135
+ attrs["model"] = node_params["modelName"]
136
+
137
+ # Extract temperature
138
+ if "temperature" in node_params:
139
+ attrs["temperature"] = float(node_params["temperature"])
140
+
141
+ # Extract system prompt
142
+ if "systemMessage" in node_params:
143
+ attrs["system_prompt"] = str(node_params["systemMessage"])[:500]
144
+
145
+ # Determine provider from node type
146
+ node_lower = node_type.lower()
147
+ if "openai" in node_lower:
148
+ attrs["provider"] = "openai"
149
+ elif "anthropic" in node_lower:
150
+ attrs["provider"] = "anthropic"
151
+ elif "ollama" in node_lower:
152
+ attrs["provider"] = "ollama"
153
+ elif "mistral" in node_lower:
154
+ attrs["provider"] = "mistral"
155
+ elif "vertex" in node_lower or "google" in node_lower:
156
+ attrs["provider"] = "google"
157
+
158
+ # Extract token usage from items if available
159
+ for item in items:
160
+ json_data = item.get("json", {})
161
+
162
+ # OpenAI/Anthropic response format
163
+ if "usage" in json_data:
164
+ usage = json_data["usage"]
165
+ if "prompt_tokens" in usage:
166
+ attrs["prompt_tokens"] = usage["prompt_tokens"]
167
+ if "completion_tokens" in usage:
168
+ attrs["completion_tokens"] = usage["completion_tokens"]
169
+ if "total_tokens" in usage:
170
+ attrs["total_tokens"] = usage["total_tokens"]
171
+
172
+ # Extract response content
173
+ if "response" in json_data:
174
+ response = json_data["response"]
175
+ if isinstance(response, str):
176
+ attrs["response_content"] = response[:500]
177
+ elif isinstance(response, dict):
178
+ if "text" in response:
179
+ attrs["response_content"] = str(response["text"])[:500]
180
+ elif "content" in response:
181
+ attrs["response_content"] = str(response["content"])[:500]
182
+
183
+ # Extract tool calls
184
+ if "tool_calls" in json_data:
185
+ attrs["tool_calls"] = json_data["tool_calls"]
186
+ elif "function_call" in json_data:
187
+ attrs["tool_calls"] = [json_data["function_call"]]
188
+
189
+ # Extract retrieval query (for vector store nodes)
190
+ if "query" in json_data:
191
+ attrs["retrieval_query"] = str(json_data["query"])[:200]
192
+
193
+ # Extract retrieved documents
194
+ if "documents" in json_data:
195
+ docs = json_data["documents"]
196
+ if isinstance(docs, list):
197
+ attrs["retrieved_documents"] = docs[:5] # Limit to 5 docs
198
+
199
+ except Exception as e:
200
+ logger.debug(f"Error extracting AI attributes: {e}")
201
+
202
+ return attrs
203
+
204
+
205
+ def parse_n8n_webhook(payload: dict) -> list[Span]:
206
+ """
207
+ Convert n8n webhook payload into Prela spans.
208
+
209
+ This function creates a hierarchy of spans:
210
+ 1. Workflow-level span (parent)
211
+ 2. Node-level span(s) (children)
212
+
213
+ Args:
214
+ payload: Raw webhook payload from n8n
215
+
216
+ Returns:
217
+ List of Span objects representing the execution
218
+ """
219
+ try:
220
+ webhook_data = N8nWebhookPayload(**payload)
221
+ except Exception as e:
222
+ logger.error(f"Failed to parse n8n webhook payload: {e}")
223
+ return []
224
+
225
+ spans: list[Span] = []
226
+
227
+ # Extract workflow metadata
228
+ workflow_id = webhook_data.workflow.get("id", "unknown")
229
+ workflow_name = webhook_data.workflow.get("name", "Unknown Workflow")
230
+ execution_id = webhook_data.execution.get("id", "unknown")
231
+ execution_mode = webhook_data.execution.get("mode", "manual")
232
+
233
+ # Parse timestamps
234
+ started_at_str = webhook_data.execution.get("startedAt")
235
+ started_at = (
236
+ datetime.fromisoformat(started_at_str.replace("Z", "+00:00"))
237
+ if started_at_str
238
+ else now()
239
+ )
240
+
241
+ # Generate trace_id from execution_id
242
+ trace_id = f"n8n-{execution_id}"
243
+
244
+ # Create workflow-level span
245
+ workflow_span = Span(
246
+ trace_id=trace_id,
247
+ parent_span_id=None,
248
+ name=f"n8n.workflow.{workflow_name}",
249
+ span_type=SpanType.AGENT,
250
+ started_at=started_at,
251
+ attributes={
252
+ "n8n.workflow_id": workflow_id,
253
+ "n8n.workflow_name": workflow_name,
254
+ "n8n.execution_id": execution_id,
255
+ "n8n.execution_mode": execution_mode,
256
+ "service.name": "n8n",
257
+ },
258
+ )
259
+ spans.append(workflow_span)
260
+
261
+ # Extract node metadata
262
+ node_name = webhook_data.node.get("name", "Unknown Node")
263
+ node_type = webhook_data.node.get("type", "unknown")
264
+ node_params = webhook_data.node.get("parameters", {})
265
+
266
+ # Determine if this is an AI node
267
+ is_ai = is_ai_node(node_type)
268
+ ai_category = get_ai_node_category(node_type) if is_ai else None
269
+
270
+ # Create node-level span
271
+ node_span_name = f"n8n.node.{node_name}"
272
+ node_attributes = {
273
+ "n8n.node_name": node_name,
274
+ "n8n.node_type": node_type,
275
+ "service.name": "n8n",
276
+ }
277
+
278
+ # Add AI-specific attributes if applicable
279
+ if is_ai:
280
+ node_attributes["n8n.ai_category"] = ai_category
281
+ ai_attrs = extract_ai_attributes(
282
+ node_type, node_params, webhook_data.data
283
+ )
284
+ node_attributes.update(ai_attrs)
285
+
286
+ # Determine span type
287
+ if is_ai:
288
+ if ai_category == "ai_agent":
289
+ span_type = SpanType.AGENT
290
+ elif ai_category in ["llm", "llm_chain"]:
291
+ span_type = SpanType.LLM
292
+ elif ai_category == "tool":
293
+ span_type = SpanType.TOOL
294
+ elif ai_category == "retrieval":
295
+ span_type = SpanType.RETRIEVAL
296
+ else:
297
+ span_type = SpanType.CUSTOM
298
+ else:
299
+ span_type = SpanType.CUSTOM
300
+
301
+ node_span = Span(
302
+ trace_id=trace_id,
303
+ parent_span_id=workflow_span.span_id,
304
+ name=node_span_name,
305
+ span_type=span_type,
306
+ started_at=started_at,
307
+ attributes=node_attributes,
308
+ )
309
+ spans.append(node_span)
310
+
311
+ # Add input/output data as events
312
+ if webhook_data.data:
313
+ node_span.add_event(
314
+ name="n8n.node.output",
315
+ attributes={
316
+ "item_count": len(webhook_data.data),
317
+ "items": str(webhook_data.data)[:1000], # Truncate
318
+ },
319
+ )
320
+
321
+ # End both spans (since webhook is sent after execution)
322
+ # Note: Span.end() automatically sets status to SUCCESS if still PENDING
323
+ node_span.end()
324
+ workflow_span.end()
325
+
326
+ return spans
327
+
328
+
329
+ class N8nWebhookHandler:
330
+ """
331
+ HTTP server for receiving n8n webhook traces locally.
332
+
333
+ This handler runs a lightweight HTTP server that receives webhook
334
+ POST requests from n8n workflows and automatically converts them
335
+ into Prela spans.
336
+
337
+ Example:
338
+ ```python
339
+ from prela import init
340
+ from prela.instrumentation.n8n.webhook import N8nWebhookHandler
341
+
342
+ tracer = init(service_name="n8n-workflows")
343
+ handler = N8nWebhookHandler(tracer, port=8787)
344
+ handler.start()
345
+
346
+ # Configure n8n webhook node to POST to http://localhost:8787/webhook
347
+ # Handler will automatically trace all workflow executions
348
+ ```
349
+ """
350
+
351
+ def __init__(self, tracer: Tracer, port: int = 8787, host: str = "0.0.0.0"):
352
+ """
353
+ Initialize the webhook handler.
354
+
355
+ Args:
356
+ tracer: Prela tracer instance for creating spans
357
+ port: Port to listen on (default: 8787)
358
+ host: Host to bind to (default: 0.0.0.0)
359
+ """
360
+ self.tracer = tracer
361
+ self.port = port
362
+ self.host = host
363
+ self.app = None
364
+ self.runner = None
365
+
366
+ async def handle_webhook(self, request) -> Any:
367
+ """
368
+ Handle incoming webhook POST request.
369
+
370
+ Args:
371
+ request: aiohttp request object
372
+
373
+ Returns:
374
+ JSON response with status
375
+ """
376
+ try:
377
+ # Parse JSON payload
378
+ payload = await request.json()
379
+
380
+ # Convert to spans
381
+ spans = parse_n8n_webhook(payload)
382
+
383
+ # Export spans via tracer's exporter
384
+ if spans and self.tracer.exporter:
385
+ for span in spans:
386
+ self.tracer.exporter.export([span])
387
+
388
+ logger.info(f"Received n8n webhook, created {len(spans)} spans")
389
+
390
+ return {
391
+ "status": "success",
392
+ "message": f"Created {len(spans)} spans",
393
+ "trace_id": spans[0].trace_id if spans else None,
394
+ }
395
+
396
+ except Exception as e:
397
+ logger.error(f"Error handling n8n webhook: {e}", exc_info=True)
398
+ return {"status": "error", "message": str(e)}
399
+
400
+ def start(self) -> None:
401
+ """
402
+ Start the HTTP server.
403
+
404
+ This method starts an aiohttp server on the configured host and port.
405
+ It runs in the current event loop, so it should be called from an
406
+ async context or run in a separate thread.
407
+ """
408
+ try:
409
+ from aiohttp import web
410
+ except ImportError:
411
+ raise ImportError(
412
+ "aiohttp is required for N8nWebhookHandler. "
413
+ "Install with: pip install aiohttp"
414
+ )
415
+
416
+ async def _handle_webhook(request):
417
+ result = await self.handle_webhook(request)
418
+ return web.json_response(result)
419
+
420
+ async def _start_server():
421
+ self.app = web.Application()
422
+ self.app.router.add_post("/webhook", _handle_webhook)
423
+ self.app.router.add_post("/", _handle_webhook) # Root endpoint
424
+
425
+ self.runner = web.AppRunner(self.app)
426
+ await self.runner.setup()
427
+ site = web.TCPSite(self.runner, self.host, self.port)
428
+ await site.start()
429
+
430
+ logger.info(
431
+ f"n8n webhook handler listening on http://{self.host}:{self.port}"
432
+ )
433
+
434
+ # Run the server
435
+ import asyncio
436
+
437
+ loop = asyncio.get_event_loop()
438
+ loop.run_until_complete(_start_server())
439
+ loop.run_forever()
440
+
441
+ def start_background(self) -> None:
442
+ """
443
+ Start the HTTP server in a background thread.
444
+
445
+ This method creates a new event loop and runs the server in it.
446
+ Designed to be called from a background thread via threading.Thread.
447
+ """
448
+ try:
449
+ from aiohttp import web
450
+ except ImportError:
451
+ raise ImportError(
452
+ "aiohttp is required for N8nWebhookHandler. "
453
+ "Install with: pip install aiohttp"
454
+ )
455
+
456
+ async def _handle_webhook(request):
457
+ result = await self.handle_webhook(request)
458
+ return web.json_response(result)
459
+
460
+ async def _start_server():
461
+ self.app = web.Application()
462
+ self.app.router.add_post("/webhook", _handle_webhook)
463
+ self.app.router.add_post("/", _handle_webhook) # Root endpoint
464
+
465
+ self.runner = web.AppRunner(self.app)
466
+ await self.runner.setup()
467
+ site = web.TCPSite(self.runner, self.host, self.port)
468
+ await site.start()
469
+
470
+ logger.info(
471
+ f"n8n webhook handler listening on http://{self.host}:{self.port}"
472
+ )
473
+
474
+ # Create new event loop for this thread
475
+ import asyncio
476
+
477
+ loop = asyncio.new_event_loop()
478
+ asyncio.set_event_loop(loop)
479
+ loop.run_until_complete(_start_server())
480
+ loop.run_forever()
481
+
482
+ def stop(self) -> None:
483
+ """Stop the HTTP server."""
484
+ if self.runner:
485
+ import asyncio
486
+
487
+ loop = asyncio.get_event_loop()
488
+ loop.run_until_complete(self.runner.cleanup())
489
+ logger.info("n8n webhook handler stopped")