mermaid-trace 0.4.1__py3-none-any.whl → 0.6.0.post0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,21 @@
1
1
  """
2
- FastAPI Integration Module
2
+ FastAPI Integration Module for MermaidTrace.
3
3
 
4
- This module provides middleware for integrating MermaidTrace with FastAPI applications.
5
- It automatically traces HTTP requests and responses, converting them into Mermaid
6
- sequence diagram events.
4
+ This module provides the middleware necessary to integrate MermaidTrace with
5
+ FastAPI applications. It serves as the bridge between HTTP requests and the
6
+ sequence diagram generation logic.
7
+
8
+ Key functionalities include:
9
+ - Middleware for intercepting all incoming HTTP requests.
10
+ - Automatic extraction of tracing headers (X-Source, X-Trace-ID).
11
+ - Initialization of logging context for request lifecycles.
12
+ - Automatic logging of request start and response completion (success or error).
7
13
  """
8
14
 
9
15
  from typing import Any, TYPE_CHECKING
10
16
  import time
11
17
  import uuid
18
+ import traceback
12
19
 
13
20
  from ..core.events import FlowEvent
14
21
  from ..core.context import LogContext
@@ -16,21 +23,21 @@ from ..core.decorators import get_flow_logger
16
23
 
17
24
  # Conditional imports to support optional FastAPI dependency
18
25
  if TYPE_CHECKING:
19
- # For type checking only, import the actual FastAPI/Starlette types
26
+ # For static type checkers (mypy, pyright), import the actual types.
20
27
  from fastapi import Request, Response
21
28
  from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
22
29
  else:
23
30
  try:
24
- # Try to import FastAPI/Starlette at runtime
31
+ # Runtime import attempt for FastAPI and Starlette.
25
32
  from fastapi import Request, Response
26
33
  from starlette.middleware.base import (
27
34
  BaseHTTPMiddleware,
28
35
  RequestResponseEndpoint,
29
36
  )
30
37
  except ImportError:
31
- # Handle the case where FastAPI/Starlette are not installed
32
- # Define dummy types to prevent NameErrors at import time
33
- # Instantiation will fail explicitly in __init__
38
+ # Fallback for when FastAPI is not installed in the environment.
39
+ # This prevents ImportErrors when importing this module without FastAPI.
40
+ # However, instantiating the middleware will still fail.
34
41
  BaseHTTPMiddleware = object # type: ignore[misc,assignment]
35
42
  Request = Any # type: ignore[assignment]
36
43
  Response = Any # type: ignore[assignment]
@@ -41,12 +48,22 @@ class MermaidTraceMiddleware(BaseHTTPMiddleware):
41
48
  """
42
49
  FastAPI Middleware to trace HTTP requests as interactions in the sequence diagram.
43
50
 
44
- This middleware acts as the entry point for tracing web requests, handling:
45
- 1. Identification of the client (Source participant)
46
- 2. Logging of incoming requests
47
- 3. Initialization of the `LogContext` for the request lifecycle
48
- 4. Logging of responses or errors
49
- 5. Cleanup of context after request completion
51
+ This middleware wraps the entire request processing pipeline. It is responsible for
52
+ recording the initial interaction between an external client (Source) and this
53
+ service (Target).
54
+
55
+ Middleware Logic:
56
+ 1. **Request Interception**: Captures the request before it reaches any route handler.
57
+ 2. **Context Initialization**: Sets up the `LogContext` with the current service name
58
+ and trace ID, ensuring all internal function calls are correctly associated.
59
+ 3. **Event Logging**: Logs the "Request" event (Client -> API) and the corresponding
60
+ "Response" event (API -> Client).
61
+ 4. **Error Handling**: Captures exceptions, logs error events (API --x Client),
62
+ and re-raises them to standard error handlers.
63
+
64
+ Attributes:
65
+ app_name (str): The name of the current service/application. This name will
66
+ appear as a participant in the generated Mermaid sequence diagram.
50
67
  """
51
68
 
52
69
  def __init__(self, app: Any, app_name: str = "FastAPI"):
@@ -54,19 +71,20 @@ class MermaidTraceMiddleware(BaseHTTPMiddleware):
54
71
  Initialize the middleware.
55
72
 
56
73
  Args:
57
- app: The FastAPI application instance
58
- app_name: The name of this service to appear in the diagram (e.g., "UserAPI")
74
+ app (Any): The FastAPI application instance.
75
+ app_name (str): The name of this service to appear in the diagram (e.g., "UserAPI").
76
+ Defaults to "FastAPI".
59
77
 
60
78
  Raises:
61
- ImportError: If FastAPI/Starlette are not installed
79
+ ImportError: If FastAPI or Starlette is not installed in the current environment.
62
80
  """
63
- # Check if FastAPI is installed by verifying BaseHTTPMiddleware is not our dummy object
81
+ # Validate that the necessary dependencies are present.
64
82
  if BaseHTTPMiddleware is object: # type: ignore[comparison-overlap]
65
83
  raise ImportError(
66
84
  "FastAPI/Starlette is required to use MermaidTraceMiddleware"
67
85
  )
68
86
 
69
- # Initialize the parent BaseHTTPMiddleware
87
+ # Initialize the base class.
70
88
  super().__init__(app)
71
89
  self.app_name = app_name
72
90
 
@@ -74,62 +92,88 @@ class MermaidTraceMiddleware(BaseHTTPMiddleware):
74
92
  self, request: Request, call_next: RequestResponseEndpoint
75
93
  ) -> Response:
76
94
  """
77
- Intercepts and processes incoming HTTP requests.
95
+ Dispatch method to handle the incoming request.
96
+
97
+ This is the core logic of the middleware. It wraps the `call_next` execution
98
+ with tracing logic.
78
99
 
79
- This method is called for each incoming request and handles the full
80
- request-response cycle tracing.
100
+ Request Tracing & Header Handling:
101
+ - **X-Source**: Used to identify the caller. If present, the diagram will show
102
+ an arrow from `X-Source` to `app_name`. If missing, defaults to "Client".
103
+ - **X-Trace-ID**: Used for distributed tracing. If provided, it links this
104
+ request to an existing trace. If missing, a new UUID is generated.
81
105
 
82
106
  Args:
83
- request (Request): The incoming HTTP request object
84
- call_next (RequestResponseEndpoint): Function to call the next middleware or endpoint
107
+ request (Request): The incoming HTTP request object.
108
+ call_next (RequestResponseEndpoint): A callable that invokes the next
109
+ middleware or the route handler.
85
110
 
86
111
  Returns:
87
- Response: The HTTP response object
112
+ Response: The HTTP response generated by the application.
88
113
  """
89
- # 1. Determine Source (Client participant)
90
- # Try to get a specific ID from X-Source header (useful for distributed tracing),
91
- # otherwise fallback to "Client"
114
+ # ----------------------------------------------------------------------
115
+ # 1. Header Handling and Metadata Extraction
116
+ # ----------------------------------------------------------------------
117
+
118
+ # Determine the source participant (Who is calling us?).
119
+ # If the request comes from another service traced by MermaidTrace,
120
+ # it might include the 'X-Source' header.
92
121
  source = request.headers.get("X-Source", "Client")
93
122
 
94
- # 2. Determine Trace ID
95
- # Check for X-Trace-ID header (for distributed tracing) or generate new UUID
123
+ # Determine the unique Trace ID.
124
+ # This ID is critical for grouping all logs related to a single request flow.
96
125
  trace_id = request.headers.get("X-Trace-ID") or str(uuid.uuid4())
97
126
 
98
- # 3. Determine Action name
99
- # Format: "METHOD /path" (e.g., "GET /users", "POST /items")
127
+ # Define the action name for the diagram arrow.
128
+ # Format: "METHOD /path" (e.g., "GET /api/v1/users")
100
129
  action = f"{request.method} {request.url.path}"
101
130
 
131
+ # Get the configured logger for flow events.
102
132
  logger = get_flow_logger()
103
133
 
104
- # 4. Log Request (Source -> App)
105
- # Create and log the initial request event
134
+ # ----------------------------------------------------------------------
135
+ # 2. Log Request Start (Source -> App)
136
+ # ----------------------------------------------------------------------
137
+
138
+ # Create the 'Request' event representing the call coming into this service.
106
139
  req_event = FlowEvent(
107
140
  source=source,
108
141
  target=self.app_name,
109
142
  action=action,
110
143
  message=action,
144
+ # Include query parameters in the note if they exist.
111
145
  params=f"query={request.query_params}" if request.query_params else None,
112
146
  trace_id=trace_id,
113
147
  )
148
+
149
+ # Log the event. This writes the JSON entry that the visualizer will parse.
114
150
  logger.info(
115
151
  f"{source}->{self.app_name}: {action}", extra={"flow_event": req_event}
116
152
  )
117
153
 
118
- # 5. Set Context and Process Request
119
- # Use async context manager to set the current participant to the app name
120
- # This context will be inherited by all code called within call_next()
154
+ # ----------------------------------------------------------------------
155
+ # 3. Context Setup and Request Processing
156
+ # ----------------------------------------------------------------------
157
+
158
+ # Initialize the LogContext for this async task.
159
+ # Any 'traced' function called within this block will inherit 'trace_id'
160
+ # and see 'participant' as self.app_name.
121
161
  async with LogContext.ascope(
122
162
  {"participant": self.app_name, "trace_id": trace_id}
123
163
  ):
124
164
  start_time = time.time()
125
165
  try:
126
- # Pass control to the next middleware or endpoint
127
- # This executes the actual route logic and returns the response
166
+ # Process the request by calling the next item in the middleware chain.
128
167
  response = await call_next(request)
129
168
 
130
- # 6. Log Success Response (App -> Source)
131
- # Calculate execution duration in milliseconds
169
+ # ------------------------------------------------------------------
170
+ # 4. Log Success Response (App -> Source)
171
+ # ------------------------------------------------------------------
172
+
173
+ # Calculate execution time for performance insights.
132
174
  duration = (time.time() - start_time) * 1000
175
+
176
+ # Create the 'Return' event (dashed line back to caller).
133
177
  resp_event = FlowEvent(
134
178
  source=self.app_name,
135
179
  target=source,
@@ -146,9 +190,17 @@ class MermaidTraceMiddleware(BaseHTTPMiddleware):
146
190
  return response
147
191
 
148
192
  except Exception as e:
149
- # 7. Log Error Response (App --x Source)
150
- # This captures unhandled exceptions that bubble up to the middleware
151
- # Note: FastAPI's ExceptionHandlers might catch some exceptions before they reach here
193
+ # ------------------------------------------------------------------
194
+ # 5. Log Error Response (App --x Source)
195
+ # ------------------------------------------------------------------
196
+
197
+ # Capture full stack trace for the error.
198
+ stack_trace = "".join(
199
+ traceback.format_exception(type(e), e, e.__traceback__)
200
+ )
201
+
202
+ # If an unhandled exception occurs, log it as an error event.
203
+ # This will render as a cross (X) on the sequence diagram return arrow.
152
204
  err_event = FlowEvent(
153
205
  source=self.app_name,
154
206
  target=source,
@@ -157,10 +209,13 @@ class MermaidTraceMiddleware(BaseHTTPMiddleware):
157
209
  is_return=True,
158
210
  is_error=True,
159
211
  error_message=str(e),
212
+ stack_trace=stack_trace,
160
213
  trace_id=trace_id,
161
214
  )
162
215
  logger.error(
163
216
  f"{self.app_name}-x{source}: Error", extra={"flow_event": err_event}
164
217
  )
165
- # Re-raise the exception to maintain normal error handling flow
218
+
219
+ # Re-raise the exception so FastAPI's exception handlers can take over.
220
+ # We strictly monitor the flow here, not swallow errors.
166
221
  raise
@@ -0,0 +1,312 @@
1
+ """
2
+ LangChain Integration Module for MermaidTrace.
3
+
4
+ This module provides a LangChain Callback Handler that allows you to automatically
5
+ generate Mermaid sequence diagrams for your LangChain chains, LLM calls, and tool usage.
6
+ """
7
+
8
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING
9
+ import uuid
10
+
11
+ from ..core.events import FlowEvent
12
+ from ..core.context import LogContext
13
+ from ..core.decorators import get_flow_logger
14
+
15
+ if TYPE_CHECKING:
16
+ from langchain_core.callbacks import BaseCallbackHandler
17
+ from langchain_core.outputs import LLMResult
18
+ else:
19
+ try:
20
+ from langchain_core.callbacks import BaseCallbackHandler
21
+ from langchain_core.outputs import LLMResult
22
+ except ImportError:
23
+ BaseCallbackHandler = object
24
+ LLMResult = Any
25
+
26
+
27
+ class MermaidTraceCallbackHandler(BaseCallbackHandler):
28
+ """
29
+ LangChain Callback Handler that records execution flow as Mermaid sequence diagrams.
30
+
31
+ This handler intercepts LangChain events (Chain, LLM, Tool) and logs them as
32
+ FlowEvents, which are then processed by MermaidTrace to generate diagrams.
33
+ """
34
+
35
+ def __init__(self, host_name: str = "LangChain"):
36
+ """
37
+ Initialize the callback handler.
38
+
39
+ Args:
40
+ host_name (str): The name of the host participant in the diagram.
41
+ Defaults to "LangChain".
42
+ """
43
+ if BaseCallbackHandler is object:
44
+ raise ImportError(
45
+ "langchain-core is required to use MermaidTraceCallbackHandler. "
46
+ "Install it with `pip install langchain-core`."
47
+ )
48
+ self.host_name = host_name
49
+ self.logger = get_flow_logger()
50
+ self._participant_stack: List[str] = []
51
+
52
+ def _get_current_source(self) -> str:
53
+ if self._participant_stack:
54
+ return self._participant_stack[-1]
55
+ return str(LogContext.get("current_participant", self.host_name))
56
+
57
+ def on_chain_start(
58
+ self,
59
+ serialized: Optional[Dict[str, Any]],
60
+ inputs: Dict[str, Any],
61
+ **kwargs: Any,
62
+ ) -> None:
63
+ """Run when chain starts running."""
64
+ target = (
65
+ (serialized.get("name") if serialized else None)
66
+ or kwargs.get("name")
67
+ or "Chain"
68
+ )
69
+ source = self._get_current_source()
70
+
71
+ event = FlowEvent(
72
+ source=source,
73
+ target=target,
74
+ action="Run Chain",
75
+ message=f"Start Chain: {target}",
76
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
77
+ params=str(inputs),
78
+ )
79
+ self.logger.info(
80
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
81
+ )
82
+ self._participant_stack.append(target)
83
+
84
+ def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
85
+ """Run when chain ends running."""
86
+ if not self._participant_stack:
87
+ return
88
+
89
+ target = self._participant_stack.pop()
90
+ source = self._get_current_source()
91
+
92
+ event = FlowEvent(
93
+ source=target,
94
+ target=source,
95
+ action="Finish Chain",
96
+ message="Chain Complete",
97
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
98
+ result=str(outputs),
99
+ is_return=True,
100
+ )
101
+ self.logger.info(
102
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
103
+ )
104
+
105
+ def on_llm_start(
106
+ self, serialized: Optional[Dict[str, Any]], prompts: List[str], **kwargs: Any
107
+ ) -> None:
108
+ """Run when LLM starts running."""
109
+ target = (serialized.get("name") if serialized else None) or "LLM"
110
+ source = self._get_current_source()
111
+
112
+ event = FlowEvent(
113
+ source=source,
114
+ target=target,
115
+ action="Prompt",
116
+ message="LLM Request",
117
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
118
+ params=str(prompts),
119
+ )
120
+ self.logger.info(
121
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
122
+ )
123
+ self._participant_stack.append(target)
124
+
125
+ def on_chat_model_start(
126
+ self,
127
+ serialized: Optional[Dict[str, Any]],
128
+ messages: List[List[Any]],
129
+ **kwargs: Any,
130
+ ) -> None:
131
+ """Run when Chat Model starts running."""
132
+ target = (serialized.get("name") if serialized else None) or "ChatModel"
133
+ source = self._get_current_source()
134
+
135
+ event = FlowEvent(
136
+ source=source,
137
+ target=target,
138
+ action="Chat",
139
+ message="ChatModel Request",
140
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
141
+ params=str(messages),
142
+ )
143
+ self.logger.info(
144
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
145
+ )
146
+ self._participant_stack.append(target)
147
+
148
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
149
+ """Run when LLM ends running."""
150
+ if not self._participant_stack:
151
+ return
152
+
153
+ source = self._participant_stack.pop()
154
+ target = self._get_current_source()
155
+
156
+ event = FlowEvent(
157
+ source=source,
158
+ target=target,
159
+ action="Response",
160
+ message="LLM/Chat Completion",
161
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
162
+ result=str(response.generations),
163
+ is_return=True,
164
+ )
165
+ self.logger.info(
166
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
167
+ )
168
+
169
+ def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
170
+ """Run when LLM errors."""
171
+ if not self._participant_stack:
172
+ return
173
+ target = self._participant_stack.pop()
174
+ source = self._get_current_source()
175
+ event = FlowEvent(
176
+ source=target,
177
+ target=source,
178
+ action="Error",
179
+ message=f"LLM Error: {type(error).__name__}",
180
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
181
+ is_error=True,
182
+ error_message=str(error),
183
+ is_return=True,
184
+ )
185
+ self.logger.info(
186
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
187
+ )
188
+
189
+ def on_retriever_start(
190
+ self,
191
+ serialized: Optional[Dict[str, Any]],
192
+ query: str,
193
+ **kwargs: Any,
194
+ ) -> None:
195
+ """Run when Retriever starts running."""
196
+ target = (serialized.get("name") if serialized else None) or "Retriever"
197
+ source = self._get_current_source()
198
+
199
+ event = FlowEvent(
200
+ source=source,
201
+ target=target,
202
+ action="Retrieve",
203
+ message=f"Query: {query[:50]}...",
204
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
205
+ params=query,
206
+ )
207
+ self.logger.info(
208
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
209
+ )
210
+ self._participant_stack.append(target)
211
+
212
+ def on_retriever_end(self, documents: List[Any], **kwargs: Any) -> Any: # type: ignore[override]
213
+ """Run when Retriever ends running."""
214
+ if not self._participant_stack:
215
+ return
216
+
217
+ target = self._participant_stack.pop()
218
+ source = self._get_current_source()
219
+
220
+ event = FlowEvent(
221
+ source=target,
222
+ target=source,
223
+ action="Documents",
224
+ message=f"Retrieved {len(documents)} docs",
225
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
226
+ result=f"Count: {len(documents)}",
227
+ is_return=True,
228
+ )
229
+ self.logger.info(
230
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
231
+ )
232
+
233
+ def on_tool_start(
234
+ self, serialized: Optional[Dict[str, Any]], input_str: str, **kwargs: Any
235
+ ) -> None:
236
+ """Run when tool starts running."""
237
+ target = (serialized.get("name") if serialized else None) or "Tool"
238
+ source = self._get_current_source()
239
+
240
+ event = FlowEvent(
241
+ source=source,
242
+ target=target,
243
+ action="Call Tool",
244
+ message=f"Tool: {target}",
245
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
246
+ params=input_str,
247
+ )
248
+ self.logger.info(
249
+ f"{source} -> {target}: {event.action}", extra={"flow_event": event}
250
+ )
251
+ self._participant_stack.append(target)
252
+
253
+ def on_tool_end(self, output: Any, **kwargs: Any) -> None:
254
+ """Run when tool ends running."""
255
+ if not self._participant_stack:
256
+ return
257
+
258
+ target = self._participant_stack.pop()
259
+ source = self._get_current_source()
260
+
261
+ event = FlowEvent(
262
+ source=target,
263
+ target=source,
264
+ action="Finish Tool",
265
+ message="Tool Complete",
266
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
267
+ result=str(output),
268
+ is_return=True,
269
+ )
270
+ self.logger.info(
271
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
272
+ )
273
+
274
+ def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
275
+ """Run when chain errors."""
276
+ if not self._participant_stack:
277
+ return
278
+ target = self._participant_stack.pop()
279
+ source = self._get_current_source()
280
+ event = FlowEvent(
281
+ source=target,
282
+ target=source,
283
+ action="Error",
284
+ message=f"Chain Error: {type(error).__name__}",
285
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
286
+ is_error=True,
287
+ error_message=str(error),
288
+ is_return=True,
289
+ )
290
+ self.logger.info(
291
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
292
+ )
293
+
294
+ def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
295
+ """Run when tool errors."""
296
+ if not self._participant_stack:
297
+ return
298
+ target = self._participant_stack.pop()
299
+ source = self._get_current_source()
300
+ event = FlowEvent(
301
+ source=target,
302
+ target=source,
303
+ action="Error",
304
+ message=f"Tool Error: {type(error).__name__}",
305
+ trace_id=LogContext.get("trace_id", str(uuid.uuid4())),
306
+ is_error=True,
307
+ error_message=str(error),
308
+ is_return=True,
309
+ )
310
+ self.logger.info(
311
+ f"{target} -> {source}: {event.action}", extra={"flow_event": event}
312
+ )