monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (91) hide show
  1. monocle_apptrace/__main__.py +1 -1
  2. monocle_apptrace/exporters/file_exporter.py +125 -37
  3. monocle_apptrace/instrumentation/common/__init__.py +16 -1
  4. monocle_apptrace/instrumentation/common/constants.py +14 -1
  5. monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
  6. monocle_apptrace/instrumentation/common/method_wrappers.py +376 -0
  7. monocle_apptrace/instrumentation/common/span_handler.py +58 -32
  8. monocle_apptrace/instrumentation/common/utils.py +52 -15
  9. monocle_apptrace/instrumentation/common/wrapper.py +124 -18
  10. monocle_apptrace/instrumentation/common/wrapper_method.py +48 -1
  11. monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
  12. monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
  13. monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
  15. monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
  16. monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
  17. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +182 -0
  18. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +50 -0
  19. monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +57 -0
  20. monocle_apptrace/instrumentation/metamodel/adk/methods.py +24 -0
  21. monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +220 -0
  23. monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +152 -0
  24. monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
  25. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +191 -0
  26. monocle_apptrace/instrumentation/metamodel/agents/methods.py +56 -0
  27. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
  28. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +112 -18
  29. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +18 -10
  30. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
  31. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
  32. monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
  33. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
  34. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
  35. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
  36. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
  37. monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
  38. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
  39. monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
  40. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
  41. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
  42. monocle_apptrace/instrumentation/metamodel/finish_types.py +463 -0
  43. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
  44. monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
  45. monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
  46. monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
  47. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +86 -0
  48. monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
  49. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +31 -0
  50. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +79 -8
  51. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +15 -10
  52. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
  53. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
  54. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
  55. monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
  56. monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
  57. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +145 -19
  58. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +19 -10
  59. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
  60. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
  61. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +46 -0
  62. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +35 -9
  63. monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
  64. monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
  65. monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
  66. monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +108 -0
  67. monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
  68. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
  69. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
  70. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
  71. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +62 -0
  72. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
  73. monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
  74. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
  75. monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
  76. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
  77. monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +8 -0
  78. monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
  79. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +188 -16
  80. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +148 -92
  81. monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
  82. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +53 -23
  83. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
  84. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +15 -9
  85. monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
  86. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/METADATA +27 -11
  87. monocle_apptrace-0.5.0.dist-info/RECORD +142 -0
  88. monocle_apptrace-0.4.1.dist-info/RECORD +0 -96
  89. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/WHEEL +0 -0
  90. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/LICENSE +0 -0
  91. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,463 @@
1
+ """
2
+ This module provides common finish reason mappings and finish type enums
3
+ for different AI providers (OpenAI, Anthropic, Gemini, LangChain, LlamaIndex, Azure AI Inference).
4
+ """
5
+
6
+ from enum import Enum
7
+
8
+ class FinishType(Enum):
9
+ """Enum for standardized finish types across all AI providers."""
10
+ SUCCESS = "success"
11
+ TRUNCATED = "truncated"
12
+ CONTENT_FILTER = "content_filter"
13
+ ERROR = "error"
14
+ REFUSAL = "refusal"
15
+ RATE_LIMITED = "rate_limited"
16
+
17
+ # OpenAI finish reason mapping
18
+ OPENAI_FINISH_REASON_MAPPING = {
19
+ "stop": FinishType.SUCCESS.value,
20
+ "tool_calls": FinishType.SUCCESS.value,
21
+ "function_call": FinishType.SUCCESS.value, # deprecated but still possible
22
+ "length": FinishType.TRUNCATED.value,
23
+ "content_filter": FinishType.CONTENT_FILTER.value
24
+ }
25
+
26
+ # Anthropic finish reason mapping
27
+ ANTHROPIC_FINISH_REASON_MAPPING = {
28
+ "end_turn": FinishType.SUCCESS.value, # Natural completion
29
+ "max_tokens": FinishType.TRUNCATED.value, # Hit max_tokens limit
30
+ "stop_sequence": FinishType.SUCCESS.value, # Hit user stop sequence
31
+ "tool_use": FinishType.SUCCESS.value, # Tool use triggered
32
+ "pause_turn": FinishType.SUCCESS.value, # Paused for tool or server action
33
+ "refusal": FinishType.REFUSAL.value, # Refused for safety/ethics
34
+ }
35
+
36
+ # Gemini finish reason mapping
37
+ GEMINI_FINISH_REASON_MAPPING = {
38
+ "STOP": FinishType.SUCCESS.value,
39
+ "MAX_TOKENS": FinishType.TRUNCATED.value,
40
+ "SAFETY": FinishType.CONTENT_FILTER.value,
41
+ "RECITATION": FinishType.CONTENT_FILTER.value,
42
+ "OTHER": FinishType.ERROR.value,
43
+ "FINISH_REASON_UNSPECIFIED": None
44
+ }
45
+
46
+ # LlamaIndex finish reason mapping
47
+ # LlamaIndex often wraps underlying provider responses, similar to LangChain
48
+ LLAMAINDEX_FINISH_REASON_MAPPING = {
49
+ # Standard completion reasons
50
+ "stop": FinishType.SUCCESS.value,
51
+ "complete": FinishType.SUCCESS.value,
52
+ "finished": FinishType.SUCCESS.value,
53
+ "success": FinishType.SUCCESS.value,
54
+
55
+ # Token limits
56
+ "length": FinishType.TRUNCATED.value,
57
+ "max_tokens": FinishType.TRUNCATED.value,
58
+ "token_limit": FinishType.TRUNCATED.value,
59
+ "truncated": FinishType.TRUNCATED.value,
60
+
61
+ # Tool/function calling
62
+ "tool_calls": FinishType.SUCCESS.value,
63
+ "function_call": FinishType.SUCCESS.value,
64
+ "agent_finish": FinishType.SUCCESS.value,
65
+
66
+ # Content filtering and safety
67
+ "content_filter": FinishType.CONTENT_FILTER.value,
68
+ "safety": FinishType.CONTENT_FILTER.value,
69
+ "filtered": FinishType.CONTENT_FILTER.value,
70
+
71
+ # Errors
72
+ "error": FinishType.ERROR.value,
73
+ "failed": FinishType.ERROR.value,
74
+ "exception": FinishType.ERROR.value,
75
+
76
+ # Provider-specific reasons that might pass through LlamaIndex
77
+ # OpenAI reasons
78
+ "end_turn": FinishType.SUCCESS.value, # Anthropic
79
+ "stop_sequence": FinishType.SUCCESS.value, # Anthropic
80
+ "STOP": FinishType.SUCCESS.value, # Gemini
81
+ "SAFETY": FinishType.CONTENT_FILTER.value, # Gemini
82
+ "RECITATION": FinishType.CONTENT_FILTER.value, # Gemini
83
+ "OTHER": FinishType.ERROR.value, # Gemini
84
+ }
85
+
86
+ # Azure AI Inference finish reason mapping
87
+ AZURE_AI_INFERENCE_FINISH_REASON_MAPPING = {
88
+ # Standard completion reasons
89
+ "stop": FinishType.SUCCESS.value,
90
+ "completed": FinishType.SUCCESS.value,
91
+ "finished": FinishType.SUCCESS.value,
92
+
93
+ # Token limits
94
+ "length": FinishType.TRUNCATED.value,
95
+ "max_tokens": FinishType.TRUNCATED.value,
96
+ "token_limit": FinishType.TRUNCATED.value,
97
+ "max_completion_tokens": FinishType.TRUNCATED.value,
98
+
99
+ # Tool/function calling
100
+ "tool_calls": FinishType.SUCCESS.value,
101
+ "function_call": FinishType.SUCCESS.value,
102
+
103
+ # Content filtering and safety
104
+ "content_filter": FinishType.CONTENT_FILTER.value,
105
+ "content_filtered": FinishType.CONTENT_FILTER.value,
106
+ "safety": FinishType.CONTENT_FILTER.value,
107
+ "responsible_ai_policy": FinishType.CONTENT_FILTER.value,
108
+
109
+ # Errors
110
+ "error": FinishType.ERROR.value,
111
+ "failed": FinishType.ERROR.value,
112
+ "exception": FinishType.ERROR.value,
113
+ "timeout": FinishType.ERROR.value,
114
+
115
+ # Azure-specific reasons
116
+ "model_error": FinishType.ERROR.value,
117
+ "service_unavailable": FinishType.ERROR.value,
118
+ "rate_limit": FinishType.ERROR.value,
119
+ }
120
+
121
+ # AWS Bedrock finish reason mapping
122
+ # Based on AWS Bedrock Converse API and model-specific APIs
123
+ BEDROCK_FINISH_REASON_MAPPING = {
124
+ # Standard completion reasons
125
+ "end_turn": FinishType.SUCCESS.value, # Natural completion
126
+ "stop": FinishType.SUCCESS.value, # Hit stop sequence
127
+ "stop_sequence": FinishType.SUCCESS.value, # Stop sequence triggered
128
+ "completed": FinishType.SUCCESS.value, # Completion finished successfully
129
+
130
+ # Token limits
131
+ "max_tokens": FinishType.TRUNCATED.value, # Hit max_tokens limit
132
+ "length": FinishType.TRUNCATED.value, # Token length limit
133
+ "max_length": FinishType.TRUNCATED.value, # Maximum length reached
134
+ "token_limit": FinishType.TRUNCATED.value, # Token limit reached
135
+
136
+ # Tool/function calling
137
+ "tool_use": FinishType.SUCCESS.value, # Tool use triggered
138
+ "function_call": FinishType.SUCCESS.value, # Function call triggered
139
+
140
+ # Content filtering and safety
141
+ "content_filter": FinishType.CONTENT_FILTER.value, # Content filtered
142
+ "content_filtered": FinishType.CONTENT_FILTER.value, # Content was filtered
143
+ "safety": FinishType.CONTENT_FILTER.value, # Safety filter triggered
144
+ "guardrails": FinishType.CONTENT_FILTER.value, # Bedrock guardrails triggered
145
+ "blocked": FinishType.CONTENT_FILTER.value, # Request blocked
146
+
147
+ # Errors
148
+ "error": FinishType.ERROR.value, # General error
149
+ "failed": FinishType.ERROR.value, # Request failed
150
+ "exception": FinishType.ERROR.value, # Exception occurred
151
+ "timeout": FinishType.ERROR.value, # Request timeout
152
+ "model_error": FinishType.ERROR.value, # Model-specific error
153
+ "service_unavailable": FinishType.ERROR.value, # Service unavailable
154
+ "throttled": FinishType.ERROR.value, # Request throttled
155
+ "rate_limit": FinishType.ERROR.value, # Rate limit exceeded
156
+ "validation_error": FinishType.ERROR.value, # Validation error
157
+
158
+ # Model-specific reasons (various Bedrock models)
159
+ # Claude models via Bedrock
160
+ "end_turn": FinishType.SUCCESS.value, # Already defined above
161
+ "max_tokens": FinishType.TRUNCATED.value, # Already defined above
162
+ "stop_sequence": FinishType.SUCCESS.value, # Already defined above
163
+ "tool_use": FinishType.SUCCESS.value, # Already defined above
164
+
165
+ # AI21 models via Bedrock
166
+ "endoftext": FinishType.SUCCESS.value, # AI21 end of text
167
+ "length": FinishType.TRUNCATED.value, # AI21 length limit
168
+
169
+ # Cohere models via Bedrock
170
+ "COMPLETE": FinishType.SUCCESS.value, # Cohere completion
171
+ "MAX_TOKENS": FinishType.TRUNCATED.value, # Cohere max tokens
172
+ "ERROR": FinishType.ERROR.value, # Cohere error
173
+
174
+ # Meta Llama models via Bedrock
175
+ "stop": FinishType.SUCCESS.value, # Already defined above
176
+ "length": FinishType.TRUNCATED.value, # Already defined above
177
+
178
+ # Amazon Titan models via Bedrock
179
+ "FINISH": FinishType.SUCCESS.value, # Titan finish
180
+ "LENGTH": FinishType.TRUNCATED.value, # Titan length limit
181
+ "CONTENT_FILTERED": FinishType.CONTENT_FILTER.value, # Titan content filter
182
+ }
183
+
184
+ # LangChain finish reason mapping
185
+ # LangChain often wraps underlying provider responses, so we include common finish reasons
186
+ # that might appear in LangChain response objects
187
+ LANGCHAIN_FINISH_REASON_MAPPING = {
188
+ # Standard completion reasons
189
+ "stop": FinishType.SUCCESS.value,
190
+ "complete": FinishType.SUCCESS.value,
191
+ "finished": FinishType.SUCCESS.value,
192
+
193
+ # Token limits
194
+ "length": FinishType.TRUNCATED.value,
195
+ "max_tokens": FinishType.TRUNCATED.value,
196
+ "token_limit": FinishType.TRUNCATED.value,
197
+
198
+ # Tool/function calling
199
+ "tool_calls": FinishType.SUCCESS.value,
200
+ "function_call": FinishType.SUCCESS.value,
201
+
202
+ # Content filtering and safety
203
+ "content_filter": FinishType.CONTENT_FILTER.value,
204
+ "safety": FinishType.CONTENT_FILTER.value,
205
+ "filtered": FinishType.CONTENT_FILTER.value,
206
+
207
+ # Errors
208
+ "error": FinishType.ERROR.value,
209
+ "failed": FinishType.ERROR.value,
210
+ "exception": FinishType.ERROR.value,
211
+
212
+ # Provider-specific reasons that might pass through LangChain
213
+ # OpenAI reasons
214
+ "stop": FinishType.SUCCESS.value, # Already defined above
215
+
216
+ # Anthropic reasons
217
+ "end_turn": FinishType.SUCCESS.value,
218
+ "stop_sequence": FinishType.SUCCESS.value,
219
+
220
+ # Gemini reasons
221
+ "STOP": FinishType.SUCCESS.value,
222
+ "SAFETY": FinishType.CONTENT_FILTER.value,
223
+ "RECITATION": FinishType.CONTENT_FILTER.value,
224
+ "OTHER": FinishType.ERROR.value,
225
+ }
226
+
227
+ TEAMSAI_FINISH_REASON_MAPPING = {
228
+ "success": FinishType.SUCCESS.value,
229
+ "error": FinishType.ERROR.value,
230
+ "too_long": FinishType.TRUNCATED.value,
231
+ "rate_limited": FinishType.RATE_LIMITED.value,
232
+ "invalid_response": FinishType.ERROR.value,
233
+ }
234
+ # Haystack finish reason mapping
235
+ HAYSTACK_FINISH_REASON_MAPPING = {
236
+ # Standard completion reasons
237
+ "stop": FinishType.SUCCESS.value,
238
+ "complete": FinishType.SUCCESS.value,
239
+ "finished": FinishType.SUCCESS.value,
240
+
241
+ # Token limits
242
+ "length": FinishType.TRUNCATED.value,
243
+ "max_tokens": FinishType.TRUNCATED.value,
244
+ "token_limit": FinishType.TRUNCATED.value,
245
+
246
+ # Tool/function calling
247
+ "tool_calls": FinishType.SUCCESS.value,
248
+ "function_call": FinishType.SUCCESS.value,
249
+
250
+ # Content filtering and safety
251
+ "content_filter": FinishType.CONTENT_FILTER.value,
252
+ "safety": FinishType.CONTENT_FILTER.value,
253
+ "filtered": FinishType.CONTENT_FILTER.value,
254
+
255
+ # Errors
256
+ "error": FinishType.ERROR.value,
257
+ "failed": FinishType.ERROR.value,
258
+ "exception": FinishType.ERROR.value,
259
+
260
+ # Provider-specific reasons that might pass through LangChain
261
+ # OpenAI reasons
262
+ "stop": FinishType.SUCCESS.value, # Already defined above
263
+
264
+ # Anthropic reasons
265
+ "end_turn": FinishType.SUCCESS.value,
266
+ "stop_sequence": FinishType.SUCCESS.value,
267
+
268
+ # Gemini reasons
269
+ "STOP": FinishType.SUCCESS.value,
270
+ "SAFETY": FinishType.CONTENT_FILTER.value,
271
+ "RECITATION": FinishType.CONTENT_FILTER.value,
272
+ "OTHER": FinishType.ERROR.value,
273
+ }
274
+
275
+ ADK_FINISH_REASON_MAPPING = GEMINI_FINISH_REASON_MAPPING
276
+
277
+ def map_openai_finish_reason_to_finish_type(finish_reason):
278
+ """Map OpenAI finish_reason to standardized finish_type."""
279
+ if not finish_reason:
280
+ return None
281
+ return OPENAI_FINISH_REASON_MAPPING.get(finish_reason, None)
282
+
283
+
284
+ def map_anthropic_finish_reason_to_finish_type(finish_reason):
285
+ """Map Anthropic stop_reason to standardized finish_type."""
286
+ if not finish_reason:
287
+ return None
288
+ return ANTHROPIC_FINISH_REASON_MAPPING.get(finish_reason, None)
289
+
290
+
291
+ def map_gemini_finish_reason_to_finish_type(finish_reason):
292
+ """Map Gemini finish_reason to standardized finish_type."""
293
+ if not finish_reason:
294
+ return None
295
+ return GEMINI_FINISH_REASON_MAPPING.get(finish_reason, None)
296
+
297
+
298
+ def map_langchain_finish_reason_to_finish_type(finish_reason):
299
+ """Map LangChain finish_reason to standardized finish_type."""
300
+ if not finish_reason:
301
+ return None
302
+
303
+ # Convert to lowercase for case-insensitive matching
304
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
305
+
306
+ # Try direct mapping first
307
+ if finish_reason in LANGCHAIN_FINISH_REASON_MAPPING:
308
+ return LANGCHAIN_FINISH_REASON_MAPPING[finish_reason]
309
+
310
+ # Try lowercase mapping
311
+ if finish_reason_lower in LANGCHAIN_FINISH_REASON_MAPPING:
312
+ return LANGCHAIN_FINISH_REASON_MAPPING[finish_reason_lower]
313
+
314
+ # If no direct mapping, try to infer from common patterns
315
+ if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done']):
316
+ return FinishType.SUCCESS.value
317
+ elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat']):
318
+ return FinishType.TRUNCATED.value
319
+ elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block']):
320
+ return FinishType.CONTENT_FILTER.value
321
+ elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception']):
322
+ return FinishType.ERROR.value
323
+
324
+ return None
325
+
326
+
327
+ def map_llamaindex_finish_reason_to_finish_type(finish_reason):
328
+ """Map LlamaIndex finish_reason to standardized finish_type."""
329
+ if not finish_reason:
330
+ return None
331
+
332
+ # Convert to lowercase for case-insensitive matching
333
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
334
+
335
+ # Try direct mapping first
336
+ if finish_reason in LLAMAINDEX_FINISH_REASON_MAPPING:
337
+ return LLAMAINDEX_FINISH_REASON_MAPPING[finish_reason]
338
+
339
+ # Try lowercase mapping
340
+ if finish_reason_lower in LLAMAINDEX_FINISH_REASON_MAPPING:
341
+ return LLAMAINDEX_FINISH_REASON_MAPPING[finish_reason_lower]
342
+
343
+ # If no direct mapping, try to infer from common patterns
344
+ if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done', 'finish']):
345
+ return FinishType.SUCCESS.value
346
+ elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat']):
347
+ return FinishType.TRUNCATED.value
348
+ elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block']):
349
+ return FinishType.CONTENT_FILTER.value
350
+ elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception']):
351
+ return FinishType.ERROR.value
352
+
353
+ return None
354
+
355
+
356
+ def map_azure_ai_inference_finish_reason_to_finish_type(finish_reason):
357
+ """Map Azure AI Inference finish_reason to standardized finish_type."""
358
+ if not finish_reason:
359
+ return None
360
+
361
+ # Convert to lowercase for case-insensitive matching
362
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
363
+
364
+ # Try direct mapping first
365
+ if finish_reason in AZURE_AI_INFERENCE_FINISH_REASON_MAPPING:
366
+ return AZURE_AI_INFERENCE_FINISH_REASON_MAPPING[finish_reason]
367
+
368
+ # Try lowercase mapping
369
+ if finish_reason_lower in AZURE_AI_INFERENCE_FINISH_REASON_MAPPING:
370
+ return AZURE_AI_INFERENCE_FINISH_REASON_MAPPING[finish_reason_lower]
371
+
372
+ # If no direct mapping, try to infer from common patterns
373
+ if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done', 'finish']):
374
+ return FinishType.SUCCESS.value
375
+ elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat']):
376
+ return FinishType.TRUNCATED.value
377
+ elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block', 'responsible_ai', 'content_filter']):
378
+ return FinishType.CONTENT_FILTER.value
379
+ elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception', 'timeout', 'unavailable', 'rate_limit']):
380
+ return FinishType.ERROR.value
381
+
382
+ return None
383
+
384
+
385
+ def map_bedrock_finish_reason_to_finish_type(finish_reason):
386
+ """Map AWS Bedrock finish_reason/stopReason to standardized finish_type."""
387
+ if not finish_reason:
388
+ return None
389
+
390
+ # Convert to lowercase for case-insensitive matching
391
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
392
+
393
+ # Try direct mapping first
394
+ if finish_reason in BEDROCK_FINISH_REASON_MAPPING:
395
+ return BEDROCK_FINISH_REASON_MAPPING[finish_reason]
396
+
397
+ # Try lowercase mapping
398
+ if finish_reason_lower in BEDROCK_FINISH_REASON_MAPPING:
399
+ return BEDROCK_FINISH_REASON_MAPPING[finish_reason_lower]
400
+
401
+ # If no direct mapping, try to infer from common patterns
402
+ if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done', 'finish', 'end_turn', 'endoftext']):
403
+ return FinishType.SUCCESS.value
404
+ elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat', 'max_tokens']):
405
+ return FinishType.TRUNCATED.value
406
+ elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block', 'guardrails', 'content_filter']):
407
+ return FinishType.CONTENT_FILTER.value
408
+ elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception', 'timeout', 'unavailable', 'rate_limit', 'throttled', 'validation']):
409
+ return FinishType.ERROR.value
410
+
411
+ return None
412
+
413
+ def map_haystack_finish_reason_to_finish_type(finish_reason):
414
+ """Map Haystack finish_reason to standardized finish_type."""
415
+ if not finish_reason:
416
+ return None
417
+
418
+ # Convert to lowercase for case-insensitive matching
419
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
420
+
421
+ # Try direct mapping first
422
+ if finish_reason in HAYSTACK_FINISH_REASON_MAPPING:
423
+ return HAYSTACK_FINISH_REASON_MAPPING[finish_reason]
424
+
425
+ # Try lowercase mapping
426
+ if finish_reason_lower in HAYSTACK_FINISH_REASON_MAPPING:
427
+ return HAYSTACK_FINISH_REASON_MAPPING[finish_reason_lower]
428
+
429
+ # If no direct mapping, try to infer from common patterns
430
+ if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done']):
431
+ return FinishType.SUCCESS.value
432
+ elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat']):
433
+ return FinishType.TRUNCATED.value
434
+ elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block']):
435
+ return FinishType.CONTENT_FILTER.value
436
+ elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception']):
437
+ return FinishType.ERROR.value
438
+
439
+ return None
440
+
441
+ def map_teamsai_finish_reason_to_finish_type(finish_reason):
442
+ """Map TeamsAI finish_reason to standardized finish_type."""
443
+ if not finish_reason:
444
+ return None
445
+
446
+ # Convert to lowercase for case-insensitive matching
447
+ finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
448
+
449
+ # Try direct mapping first
450
+ if finish_reason in TEAMSAI_FINISH_REASON_MAPPING:
451
+ return TEAMSAI_FINISH_REASON_MAPPING[finish_reason]
452
+
453
+ # Try lowercase mapping
454
+ if finish_reason_lower in TEAMSAI_FINISH_REASON_MAPPING:
455
+ return TEAMSAI_FINISH_REASON_MAPPING[finish_reason_lower]
456
+
457
+ return None
458
+
459
+ def map_adk_finish_reason_to_finish_type(finish_reason):
460
+ """Map ADK finish_reason to standardized finish_type."""
461
+ if not finish_reason:
462
+ return None
463
+ return ADK_FINISH_REASON_MAPPING.get(finish_reason, None)
@@ -11,8 +11,6 @@ from opentelemetry.trace.propagation import _SPAN_KEY
11
11
 
12
12
  logger = logging.getLogger(__name__)
13
13
  MAX_DATA_LENGTH = 1000
14
- token_data = local()
15
- token_data.current_token = None
16
14
 
17
15
  def get_route(args) -> str:
18
16
  return args[0]['PATH_INFO'] if 'PATH_INFO' in args[0] else ""
@@ -47,21 +45,18 @@ def flask_pre_tracing(args):
47
45
  if key.startswith("HTTP_"):
48
46
  new_key = key[5:].lower().replace("_", "-")
49
47
  headers[new_key] = value
50
- token_data.current_token = extract_http_headers(headers)
48
+ return extract_http_headers(headers)
51
49
 
52
- def flask_post_tracing():
53
- clear_http_scopes(token_data.current_token)
54
- token_data.current_token = None
50
+ def flask_post_tracing(token):
51
+ clear_http_scopes(token)
55
52
 
56
53
  class FlaskSpanHandler(SpanHandler):
57
54
 
58
55
  def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
59
- flask_pre_tracing(args)
60
- return super().pre_tracing(to_wrap, wrapped, instance, args, kwargs)
56
+ return flask_pre_tracing(args)
61
57
 
62
- def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
63
- flask_post_tracing()
64
- return super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
58
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token):
59
+ flask_post_tracing(token)
65
60
 
66
61
  class FlaskResponseSpanHandler(SpanHandler):
67
62
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
@@ -0,0 +1,120 @@
1
+ import logging
2
+ from monocle_apptrace.instrumentation.common.utils import (
3
+ get_exception_message,
4
+ get_json_dumps,
5
+ get_status_code,
6
+ )
7
+ from monocle_apptrace.instrumentation.metamodel.finish_types import (
8
+ map_gemini_finish_reason_to_finish_type,
9
+ GEMINI_FINISH_REASON_MAPPING
10
+ )
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ def resolve_from_alias(my_map, alias):
15
+ """Find a alias that is not none from list of aliases"""
16
+
17
+ for i in alias:
18
+ if i in my_map.keys():
19
+ return my_map[i]
20
+ return None
21
+
22
+ def extract_messages(kwargs):
23
+ """Extract system and user messages"""
24
+ try:
25
+ messages = []
26
+ config = kwargs.get('config')
27
+ if config and hasattr(config, 'system_instruction'):
28
+ system_instructions = getattr(config, 'system_instruction', None)
29
+ if system_instructions:
30
+ messages.append({'system': system_instructions})
31
+
32
+ contents = kwargs.get('contents')
33
+ if isinstance(contents, list):
34
+ for content in contents:
35
+ if hasattr(content, 'parts') and getattr(content, 'parts'):
36
+ part = content.parts[0]
37
+ if hasattr(part, 'text'):
38
+ messages.append({getattr(content, 'role', 'user'): part.text})
39
+ elif isinstance(contents, str):
40
+ messages.append({'user': contents})
41
+
42
+ return [get_json_dumps(message) for message in messages]
43
+ except Exception as e:
44
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
45
+ return []
46
+
47
+ def extract_assistant_message(arguments):
48
+ try:
49
+ status = get_status_code(arguments)
50
+ messages = []
51
+ role = "assistant"
52
+ if hasattr(arguments['result'], "candidates") and len(arguments['result'].candidates) > 0 and hasattr(arguments['result'].candidates[0], "content") and hasattr(arguments['result'].candidates[0].content, "role"):
53
+ role = arguments["result"].candidates[0].content.role
54
+ if status == 'success':
55
+ if hasattr(arguments['result'], "text") and len(arguments['result'].text):
56
+ messages.append({role: arguments['result'].text})
57
+ else:
58
+ if arguments["exception"] is not None:
59
+ return get_exception_message(arguments)
60
+ elif hasattr(arguments["result"], "error"):
61
+ return arguments["result"].error
62
+ return get_json_dumps(messages[0]) if messages else ""
63
+ except (IndexError, AttributeError) as e:
64
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
65
+ return None
66
+
67
+ def update_input_span_events(kwargs):
68
+ if 'contents' in kwargs and isinstance(kwargs['contents'], list) and len(kwargs['contents']) > 0:
69
+ query = kwargs['contents'][0]
70
+ return query
71
+
72
+ def update_output_span_events(results):
73
+ if hasattr(results,'embeddings') and isinstance(results.embeddings, list) and len(results.embeddings) > 0:
74
+ embeddings = results.embeddings[0]
75
+ if hasattr(embeddings, 'values') and isinstance(embeddings.values, list) and len(embeddings.values) > 100:
76
+ output = str(results.embeddings[0].values[:100]) + "..."
77
+ return output
78
+
79
+ def extract_inference_endpoint(instance):
80
+ try:
81
+ if hasattr(instance,'_api_client') and hasattr(instance._api_client, '_http_options'):
82
+ if hasattr(instance._api_client._http_options,'base_url'):
83
+ return instance._api_client._http_options.base_url
84
+ except Exception as e:
85
+ logger.warning("Warning: Error occurred in inference endpoint: %s", str(e))
86
+ return []
87
+
88
+ def update_span_from_llm_response(response, instance):
89
+ meta_dict = {}
90
+ if response is not None and hasattr(response, "usage_metadata") and response.usage_metadata is not None:
91
+ token_usage = response.usage_metadata
92
+ if token_usage is not None:
93
+ meta_dict.update({"completion_tokens": token_usage.candidates_token_count})
94
+ meta_dict.update({"prompt_tokens": token_usage.prompt_token_count })
95
+ meta_dict.update({"total_tokens": token_usage.total_token_count})
96
+ return meta_dict
97
+
98
+ def extract_finish_reason(arguments):
99
+ """Extract finish_reason from Gemini response"""
100
+ try:
101
+ if arguments["exception"] is not None:
102
+ return None
103
+
104
+ response = arguments["result"]
105
+
106
+ # Handle Gemini response structure
107
+ if (response is not None and
108
+ hasattr(response, "candidates") and
109
+ len(response.candidates) > 0 and
110
+ hasattr(response.candidates[0], "finish_reason")):
111
+ return response.candidates[0].finish_reason
112
+
113
+ except (IndexError, AttributeError) as e:
114
+ logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
115
+ return None
116
+ return None
117
+
118
+ def map_finish_reason_to_finish_type(finish_reason):
119
+ """Map Gemini finish_reason to finish_type based on the possible errors mapping"""
120
+ return map_gemini_finish_reason_to_finish_type(finish_reason)