monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +1 -1
- monocle_apptrace/exporters/file_exporter.py +123 -36
- monocle_apptrace/instrumentation/common/__init__.py +16 -1
- monocle_apptrace/instrumentation/common/constants.py +6 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
- monocle_apptrace/instrumentation/common/method_wrappers.py +380 -0
- monocle_apptrace/instrumentation/common/span_handler.py +39 -24
- monocle_apptrace/instrumentation/common/utils.py +20 -14
- monocle_apptrace/instrumentation/common/wrapper.py +10 -9
- monocle_apptrace/instrumentation/common/wrapper_method.py +40 -1
- monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
- monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +35 -18
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +14 -10
- monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
- monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/finish_types.py +387 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +83 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +24 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +15 -8
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +5 -10
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -19
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +15 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +43 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +29 -5
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +51 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
- monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +13 -0
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +83 -16
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +103 -92
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +41 -22
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +5 -9
- monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/METADATA +14 -3
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/RECORD +74 -44
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -6,30 +6,50 @@ and assistant messages from various input formats.
|
|
|
6
6
|
import logging
|
|
7
7
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
8
8
|
Option,
|
|
9
|
+
get_json_dumps,
|
|
9
10
|
try_option,
|
|
10
11
|
get_exception_message,
|
|
11
12
|
get_parent_span,
|
|
12
13
|
get_status_code,
|
|
13
14
|
)
|
|
14
15
|
from monocle_apptrace.instrumentation.common.span_handler import NonFrameworkSpanHandler, WORKFLOW_TYPE_MAP
|
|
16
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import (
|
|
17
|
+
map_openai_finish_reason_to_finish_type,
|
|
18
|
+
OPENAI_FINISH_REASON_MAPPING
|
|
19
|
+
)
|
|
20
|
+
from monocle_apptrace.instrumentation.common.constants import CHILD_ERROR_CODE
|
|
15
21
|
|
|
16
22
|
logger = logging.getLogger(__name__)
|
|
17
23
|
|
|
18
|
-
|
|
19
24
|
def extract_messages(kwargs):
|
|
20
25
|
"""Extract system and user messages"""
|
|
21
26
|
try:
|
|
22
27
|
messages = []
|
|
23
28
|
if 'instructions' in kwargs:
|
|
24
|
-
messages.append({'
|
|
29
|
+
messages.append({'system': kwargs.get('instructions', {})})
|
|
25
30
|
if 'input' in kwargs:
|
|
26
|
-
|
|
31
|
+
if isinstance(kwargs['input'], str):
|
|
32
|
+
messages.append({'user': kwargs.get('input', "")})
|
|
33
|
+
# [
|
|
34
|
+
# {
|
|
35
|
+
# "role": "developer",
|
|
36
|
+
# "content": "Talk like a pirate."
|
|
37
|
+
# },
|
|
38
|
+
# {
|
|
39
|
+
# "role": "user",
|
|
40
|
+
# "content": "Are semicolons optional in JavaScript?"
|
|
41
|
+
# }
|
|
42
|
+
# ]
|
|
43
|
+
if isinstance(kwargs['input'], list):
|
|
44
|
+
for item in kwargs['input']:
|
|
45
|
+
if isinstance(item, dict) and 'role' in item and 'content' in item:
|
|
46
|
+
messages.append({item['role']: item['content']})
|
|
27
47
|
if 'messages' in kwargs and len(kwargs['messages']) >0:
|
|
28
48
|
for msg in kwargs['messages']:
|
|
29
49
|
if msg.get('content') and msg.get('role'):
|
|
30
50
|
messages.append({msg['role']: msg['content']})
|
|
31
51
|
|
|
32
|
-
return [
|
|
52
|
+
return [get_json_dumps(message) for message in messages]
|
|
33
53
|
except Exception as e:
|
|
34
54
|
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
35
55
|
return []
|
|
@@ -37,25 +57,39 @@ def extract_messages(kwargs):
|
|
|
37
57
|
|
|
38
58
|
def extract_assistant_message(arguments):
|
|
39
59
|
try:
|
|
60
|
+
messages = []
|
|
40
61
|
status = get_status_code(arguments)
|
|
41
|
-
|
|
42
|
-
if status == 'success':
|
|
62
|
+
if status == 'success' or status == 'completed':
|
|
43
63
|
response = arguments["result"]
|
|
44
|
-
if hasattr(response,"output_text") and len(response.output_text):
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
64
|
+
if hasattr(response, "output_text") and len(response.output_text):
|
|
65
|
+
role = response.role if hasattr(response, "role") else "assistant"
|
|
66
|
+
messages.append({role: response.output_text})
|
|
67
|
+
if (
|
|
68
|
+
response is not None
|
|
69
|
+
and hasattr(response, "choices")
|
|
70
|
+
and len(response.choices) > 0
|
|
71
|
+
):
|
|
72
|
+
if hasattr(response.choices[0], "message"):
|
|
73
|
+
role = (
|
|
74
|
+
response.choices[0].message.role
|
|
75
|
+
if hasattr(response.choices[0].message, "role")
|
|
76
|
+
else "assistant"
|
|
77
|
+
)
|
|
78
|
+
messages.append({role: response.choices[0].message.content})
|
|
79
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
49
80
|
else:
|
|
50
81
|
if arguments["exception"] is not None:
|
|
51
|
-
|
|
82
|
+
return get_exception_message(arguments)
|
|
52
83
|
elif hasattr(arguments["result"], "error"):
|
|
53
|
-
|
|
54
|
-
|
|
84
|
+
return arguments["result"].error
|
|
85
|
+
|
|
55
86
|
except (IndexError, AttributeError) as e:
|
|
56
|
-
logger.warning(
|
|
87
|
+
logger.warning(
|
|
88
|
+
"Warning: Error occurred in extract_assistant_message: %s", str(e)
|
|
89
|
+
)
|
|
57
90
|
return None
|
|
58
91
|
|
|
92
|
+
|
|
59
93
|
def extract_provider_name(instance):
|
|
60
94
|
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
61
95
|
return provider_url.unwrap_or(None)
|
|
@@ -129,7 +163,7 @@ def get_inference_type(instance):
|
|
|
129
163
|
|
|
130
164
|
class OpenAISpanHandler(NonFrameworkSpanHandler):
|
|
131
165
|
def is_teams_span_in_progress(self) -> bool:
|
|
132
|
-
return self.
|
|
166
|
+
return self.is_framework_span_in_progress() and self.get_workflow_name_in_progress() == WORKFLOW_TYPE_MAP["teams.ai"]
|
|
133
167
|
|
|
134
168
|
# If openAI is being called by Teams AI SDK, then retain the metadata part of the span events
|
|
135
169
|
def skip_processor(self, to_wrap, wrapped, instance, span, args, kwargs) -> list[str]:
|
|
@@ -144,3 +178,36 @@ class OpenAISpanHandler(NonFrameworkSpanHandler):
|
|
|
144
178
|
return super().hydrate_events(to_wrap, wrapped, instance, args, kwargs, ret_result, span=parent_span, parent_span=None, ex=ex)
|
|
145
179
|
|
|
146
180
|
return super().hydrate_events(to_wrap, wrapped, instance, args, kwargs, ret_result, span, parent_span=parent_span, ex=ex)
|
|
181
|
+
|
|
182
|
+
def post_task_processing(self, to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span):
|
|
183
|
+
# TeamsAI doesn't capture the status and other metadata from underlying OpenAI SDK.
|
|
184
|
+
# Thus we save the OpenAI status code in the parent span and retrieve it here to preserve meaningful error codes.
|
|
185
|
+
if self.is_teams_span_in_progress() and ex is not None:
|
|
186
|
+
if len(span.events) > 1 and span.events[1].name == "data.output" and span.events[1].attributes.get("error_code") is not None:
|
|
187
|
+
parent_span.set_attribute(CHILD_ERROR_CODE, span.events[1].attributes.get("error_code"))
|
|
188
|
+
super().post_task_processing(to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span)
|
|
189
|
+
|
|
190
|
+
def extract_finish_reason(arguments):
|
|
191
|
+
"""Extract finish_reason from OpenAI response"""
|
|
192
|
+
try:
|
|
193
|
+
if arguments["exception"] is not None:
|
|
194
|
+
if hasattr(arguments["exception"], "code") and arguments["exception"].code in OPENAI_FINISH_REASON_MAPPING.keys():
|
|
195
|
+
return arguments["exception"].code
|
|
196
|
+
response = arguments["result"]
|
|
197
|
+
|
|
198
|
+
# Handle streaming responses
|
|
199
|
+
if hasattr(response, "finish_reason") and response.finish_reason:
|
|
200
|
+
return response.finish_reason
|
|
201
|
+
|
|
202
|
+
# Handle non-streaming responses
|
|
203
|
+
if response is not None and hasattr(response, "choices") and len(response.choices) > 0:
|
|
204
|
+
if hasattr(response.choices[0], "finish_reason"):
|
|
205
|
+
return response.choices[0].finish_reason
|
|
206
|
+
except (IndexError, AttributeError) as e:
|
|
207
|
+
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
208
|
+
return None
|
|
209
|
+
return None
|
|
210
|
+
|
|
211
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
212
|
+
"""Map OpenAI finish_reason to finish_type based on the possible errors mapping"""
|
|
213
|
+
return map_openai_finish_reason_to_finish_type(finish_reason)
|
|
@@ -6,69 +6,105 @@ from monocle_apptrace.instrumentation.metamodel.openai import (
|
|
|
6
6
|
_helper,
|
|
7
7
|
)
|
|
8
8
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
9
|
+
get_error_message,
|
|
9
10
|
patch_instance_method,
|
|
10
|
-
resolve_from_alias
|
|
11
|
-
get_status,
|
|
12
|
-
get_exception_status_code,
|
|
13
|
-
get_status_code,
|
|
11
|
+
resolve_from_alias
|
|
14
12
|
)
|
|
15
13
|
|
|
16
14
|
logger = logging.getLogger(__name__)
|
|
17
15
|
|
|
18
16
|
|
|
17
|
+
def _process_stream_item(item, state):
|
|
18
|
+
"""Process a single stream item and update state."""
|
|
19
|
+
try:
|
|
20
|
+
if hasattr(item, "type") and isinstance(item.type, str) and item.type.startswith("response."):
|
|
21
|
+
if state["waiting_for_first_token"]:
|
|
22
|
+
state["waiting_for_first_token"] = False
|
|
23
|
+
state["first_token_time"] = time.time_ns()
|
|
24
|
+
if item.type == "response.output_text.delta":
|
|
25
|
+
state["accumulated_response"] += item.delta
|
|
26
|
+
if item.type == "response.completed":
|
|
27
|
+
state["stream_closed_time"] = time.time_ns()
|
|
28
|
+
if hasattr(item, "response") and hasattr(item.response, "usage"):
|
|
29
|
+
state["token_usage"] = item.response.usage
|
|
30
|
+
elif (
|
|
31
|
+
hasattr(item, "choices")
|
|
32
|
+
and item.choices
|
|
33
|
+
and item.choices[0].delta
|
|
34
|
+
and item.choices[0].delta.content
|
|
35
|
+
):
|
|
36
|
+
if hasattr(item.choices[0].delta, "role") and item.choices[0].delta.role:
|
|
37
|
+
state["role"] = item.choices[0].delta.role
|
|
38
|
+
if state["waiting_for_first_token"]:
|
|
39
|
+
state["waiting_for_first_token"] = False
|
|
40
|
+
state["first_token_time"] = time.time_ns()
|
|
41
|
+
|
|
42
|
+
state["accumulated_response"] += item.choices[0].delta.content
|
|
43
|
+
elif hasattr(item, "object") and item.object == "chat.completion.chunk" and item.usage:
|
|
44
|
+
# Handle the case where the response is a chunk
|
|
45
|
+
state["token_usage"] = item.usage
|
|
46
|
+
state["stream_closed_time"] = time.time_ns()
|
|
47
|
+
# Capture finish_reason from the chunk
|
|
48
|
+
if (
|
|
49
|
+
hasattr(item, "choices")
|
|
50
|
+
and item.choices
|
|
51
|
+
and len(item.choices) > 0
|
|
52
|
+
and hasattr(item.choices[0], 'finish_reason')
|
|
53
|
+
and item.choices[0].finish_reason
|
|
54
|
+
):
|
|
55
|
+
finish_reason = item.choices[0].finish_reason
|
|
56
|
+
state["finish_reason"] = finish_reason
|
|
57
|
+
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.warning(
|
|
60
|
+
"Warning: Error occurred while processing stream item: %s",
|
|
61
|
+
str(e),
|
|
62
|
+
)
|
|
63
|
+
finally:
|
|
64
|
+
state["accumulated_temp_list"].append(item)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _create_span_result(state, stream_start_time):
|
|
68
|
+
"""Create the span result object."""
|
|
69
|
+
return SimpleNamespace(
|
|
70
|
+
type="stream",
|
|
71
|
+
timestamps={
|
|
72
|
+
"role": state["role"],
|
|
73
|
+
"data.input": int(stream_start_time),
|
|
74
|
+
"data.output": int(state["first_token_time"]),
|
|
75
|
+
"metadata": int(state["stream_closed_time"] or time.time_ns()),
|
|
76
|
+
},
|
|
77
|
+
output_text=state["accumulated_response"],
|
|
78
|
+
usage=state["token_usage"],
|
|
79
|
+
finish_reason=state["finish_reason"]
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
19
83
|
def process_stream(to_wrap, response, span_processor):
|
|
20
|
-
waiting_for_first_token = True
|
|
21
84
|
stream_start_time = time.time_ns()
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
85
|
+
|
|
86
|
+
# Shared state for both sync and async processing
|
|
87
|
+
state = {
|
|
88
|
+
"waiting_for_first_token": True,
|
|
89
|
+
"first_token_time": stream_start_time,
|
|
90
|
+
"stream_closed_time": None,
|
|
91
|
+
"accumulated_response": "",
|
|
92
|
+
"token_usage": None,
|
|
93
|
+
"accumulated_temp_list": [],
|
|
94
|
+
"finish_reason": None,
|
|
95
|
+
"role": "assistant",
|
|
96
|
+
}
|
|
27
97
|
|
|
28
98
|
if to_wrap and hasattr(response, "__iter__"):
|
|
29
99
|
original_iter = response.__iter__
|
|
30
100
|
|
|
31
101
|
def new_iter(self):
|
|
32
|
-
nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
|
|
33
|
-
|
|
34
102
|
for item in original_iter():
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
item.choices
|
|
38
|
-
and item.choices[0].delta
|
|
39
|
-
and item.choices[0].delta.content
|
|
40
|
-
):
|
|
41
|
-
if waiting_for_first_token:
|
|
42
|
-
waiting_for_first_token = False
|
|
43
|
-
first_token_time = time.time_ns()
|
|
44
|
-
|
|
45
|
-
accumulated_response += item.choices[0].delta.content
|
|
46
|
-
# token_usage = item.usage
|
|
47
|
-
elif item.object == "chat.completion.chunk" and item.usage:
|
|
48
|
-
# Handle the case where the response is a chunk
|
|
49
|
-
token_usage = item.usage
|
|
50
|
-
stream_closed_time = time.time_ns()
|
|
51
|
-
|
|
52
|
-
except Exception as e:
|
|
53
|
-
logger.warning(
|
|
54
|
-
"Warning: Error occurred while processing item in new_iter: %s",
|
|
55
|
-
str(e),
|
|
56
|
-
)
|
|
57
|
-
finally:
|
|
58
|
-
accumulated_temp_list.append(item)
|
|
59
|
-
yield item
|
|
103
|
+
_process_stream_item(item, state)
|
|
104
|
+
yield item
|
|
60
105
|
|
|
61
106
|
if span_processor:
|
|
62
|
-
ret_val =
|
|
63
|
-
type="stream",
|
|
64
|
-
timestamps={
|
|
65
|
-
"data.input": int(stream_start_time),
|
|
66
|
-
"data.output": int(first_token_time),
|
|
67
|
-
"metadata": int(stream_closed_time or time.time_ns()),
|
|
68
|
-
},
|
|
69
|
-
output_text=accumulated_response,
|
|
70
|
-
usage=token_usage,
|
|
71
|
-
)
|
|
107
|
+
ret_val = _create_span_result(state, stream_start_time)
|
|
72
108
|
span_processor(ret_val)
|
|
73
109
|
|
|
74
110
|
patch_instance_method(response, "__iter__", new_iter)
|
|
@@ -77,46 +113,12 @@ def process_stream(to_wrap, response, span_processor):
|
|
|
77
113
|
original_iter = response.__aiter__
|
|
78
114
|
|
|
79
115
|
async def new_aiter(self):
|
|
80
|
-
nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
|
|
81
|
-
|
|
82
116
|
async for item in original_iter():
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
item.choices
|
|
86
|
-
and item.choices[0].delta
|
|
87
|
-
and item.choices[0].delta.content
|
|
88
|
-
):
|
|
89
|
-
if waiting_for_first_token:
|
|
90
|
-
waiting_for_first_token = False
|
|
91
|
-
first_token_time = time.time_ns()
|
|
92
|
-
|
|
93
|
-
accumulated_response += item.choices[0].delta.content
|
|
94
|
-
# token_usage = item.usage
|
|
95
|
-
elif item.object == "chat.completion.chunk" and item.usage:
|
|
96
|
-
# Handle the case where the response is a chunk
|
|
97
|
-
token_usage = item.usage
|
|
98
|
-
stream_closed_time = time.time_ns()
|
|
99
|
-
|
|
100
|
-
except Exception as e:
|
|
101
|
-
logger.warning(
|
|
102
|
-
"Warning: Error occurred while processing item in new_aiter: %s",
|
|
103
|
-
str(e),
|
|
104
|
-
)
|
|
105
|
-
finally:
|
|
106
|
-
accumulated_temp_list.append(item)
|
|
107
|
-
yield item
|
|
117
|
+
_process_stream_item(item, state)
|
|
118
|
+
yield item
|
|
108
119
|
|
|
109
120
|
if span_processor:
|
|
110
|
-
ret_val =
|
|
111
|
-
type="stream",
|
|
112
|
-
timestamps={
|
|
113
|
-
"data.input": int(stream_start_time),
|
|
114
|
-
"data.output": int(first_token_time),
|
|
115
|
-
"metadata": int(stream_closed_time or time.time_ns()),
|
|
116
|
-
},
|
|
117
|
-
output_text=accumulated_response,
|
|
118
|
-
usage=token_usage,
|
|
119
|
-
)
|
|
121
|
+
ret_val = _create_span_result(state, stream_start_time)
|
|
120
122
|
span_processor(ret_val)
|
|
121
123
|
|
|
122
124
|
patch_instance_method(response, "__aiter__", new_aiter)
|
|
@@ -198,20 +200,17 @@ INFERENCE = {
|
|
|
198
200
|
{
|
|
199
201
|
"name": "data.output",
|
|
200
202
|
"attributes": [
|
|
203
|
+
|
|
204
|
+
{
|
|
205
|
+
"attribute": "error_code",
|
|
206
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
207
|
+
},
|
|
201
208
|
{
|
|
202
209
|
"_comment": "this is result from LLM",
|
|
203
210
|
"attribute": "response",
|
|
204
211
|
"accessor": lambda arguments: _helper.extract_assistant_message(
|
|
205
212
|
arguments,
|
|
206
213
|
),
|
|
207
|
-
},
|
|
208
|
-
{
|
|
209
|
-
"attribute": "status",
|
|
210
|
-
"accessor": lambda arguments: get_status(arguments)
|
|
211
|
-
},
|
|
212
|
-
{
|
|
213
|
-
"attribute": "status_code",
|
|
214
|
-
"accessor": lambda arguments: get_status_code(arguments)
|
|
215
214
|
}
|
|
216
215
|
],
|
|
217
216
|
},
|
|
@@ -223,6 +222,18 @@ INFERENCE = {
|
|
|
223
222
|
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
224
223
|
arguments["result"]
|
|
225
224
|
),
|
|
225
|
+
},
|
|
226
|
+
{
|
|
227
|
+
"_comment": "finish reason from OpenAI response",
|
|
228
|
+
"attribute": "finish_reason",
|
|
229
|
+
"accessor": lambda arguments: _helper.extract_finish_reason(arguments)
|
|
230
|
+
},
|
|
231
|
+
{
|
|
232
|
+
"_comment": "finish type mapped from finish reason",
|
|
233
|
+
"attribute": "finish_type",
|
|
234
|
+
"accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
|
|
235
|
+
_helper.extract_finish_reason(arguments)
|
|
236
|
+
)
|
|
226
237
|
}
|
|
227
238
|
],
|
|
228
239
|
},
|
|
@@ -1,17 +1,22 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
|
|
3
2
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
4
3
|
Option,
|
|
4
|
+
MonocleSpanException,
|
|
5
5
|
get_keys_as_tuple,
|
|
6
6
|
get_nested_value,
|
|
7
7
|
try_option,
|
|
8
8
|
get_exception_message,
|
|
9
|
-
|
|
9
|
+
get_json_dumps,
|
|
10
|
+
get_status_code
|
|
10
11
|
)
|
|
11
|
-
|
|
12
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import (
|
|
13
|
+
map_teamsai_finish_reason_to_finish_type,
|
|
14
|
+
TEAMSAI_FINISH_REASON_MAPPING
|
|
15
|
+
)
|
|
16
|
+
from monocle_apptrace.instrumentation.common.constants import CHILD_ERROR_CODE
|
|
12
17
|
logger = logging.getLogger(__name__)
|
|
13
18
|
|
|
14
|
-
def
|
|
19
|
+
def extract_messages(arguments):
|
|
15
20
|
"""
|
|
16
21
|
Captures the input from Teams AI state.
|
|
17
22
|
Args:
|
|
@@ -47,7 +52,7 @@ def capture_input(arguments):
|
|
|
47
52
|
if hasattr(context, "activity") and hasattr(context.activity, "text"):
|
|
48
53
|
messages.append({'user': str(context.activity.text)})
|
|
49
54
|
|
|
50
|
-
return [
|
|
55
|
+
return [get_json_dumps(message) for message in messages]
|
|
51
56
|
except Exception as e:
|
|
52
57
|
print(f"Debug - Arguments structure: {str(arguments)}")
|
|
53
58
|
print(f"Debug - kwargs: {str(kwargs)}")
|
|
@@ -105,14 +110,6 @@ def get_prompt_template(arguments):
|
|
|
105
110
|
"prompt_template_type": get_nested_value(arguments.get("kwargs", {}), ["prompt", "config", "type"])
|
|
106
111
|
}
|
|
107
112
|
|
|
108
|
-
def get_status_code(arguments):
|
|
109
|
-
if arguments["exception"] is not None:
|
|
110
|
-
return get_exception_status_code(arguments)
|
|
111
|
-
elif hasattr(arguments["result"], "status"):
|
|
112
|
-
return arguments["result"].status
|
|
113
|
-
else:
|
|
114
|
-
return 'success'
|
|
115
|
-
|
|
116
113
|
def get_status(arguments):
|
|
117
114
|
if arguments["exception"] is not None:
|
|
118
115
|
return 'error'
|
|
@@ -120,27 +117,49 @@ def get_status(arguments):
|
|
|
120
117
|
return 'success'
|
|
121
118
|
else:
|
|
122
119
|
return 'error'
|
|
123
|
-
|
|
124
|
-
def
|
|
120
|
+
|
|
121
|
+
def extract_assistant_message(arguments) -> str:
|
|
125
122
|
status = get_status_code(arguments)
|
|
126
|
-
|
|
123
|
+
messages = []
|
|
124
|
+
role = "assistant"
|
|
127
125
|
if status == 'success':
|
|
128
126
|
if hasattr(arguments["result"], "message"):
|
|
129
|
-
|
|
127
|
+
messages.append({role: arguments["result"].message.content})
|
|
130
128
|
else:
|
|
131
|
-
|
|
129
|
+
messages.append({role: str(arguments["result"])})
|
|
132
130
|
else:
|
|
133
131
|
if arguments["exception"] is not None:
|
|
134
|
-
|
|
132
|
+
return get_exception_message(arguments)
|
|
135
133
|
elif hasattr(arguments["result"], "error"):
|
|
136
|
-
|
|
137
|
-
return
|
|
134
|
+
return arguments["result"].error
|
|
135
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
136
|
+
|
|
137
|
+
def extract_finish_reason(arguments):
|
|
138
|
+
"""Map TeamAI finish_reason to standardized finish_type."""
|
|
139
|
+
return get_status_code(arguments)
|
|
140
|
+
|
|
141
|
+
def extract_status_code(arguments):
|
|
142
|
+
# TeamsAI doesn't capture the status and other metadata from underlying OpenAI SDK.
|
|
143
|
+
# Thus we save the OpenAI status code in the parent span and retrieve it here to preserve meaningful error codes.
|
|
144
|
+
status = get_status_code(arguments)
|
|
145
|
+
if status == 'error' and arguments['exception'] is None:
|
|
146
|
+
child_status = arguments['span'].attributes.get(CHILD_ERROR_CODE)
|
|
147
|
+
if child_status is not None:
|
|
148
|
+
return child_status
|
|
149
|
+
return status
|
|
138
150
|
|
|
139
151
|
def check_status(arguments):
|
|
140
152
|
status = get_status_code(arguments)
|
|
141
|
-
if status != 'success':
|
|
153
|
+
if status != 'success' and arguments['exception'] is None:
|
|
142
154
|
raise MonocleSpanException(f"{status}")
|
|
143
155
|
|
|
156
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
157
|
+
"""Map TeamsAI finish_reason to standardized finish_type."""
|
|
158
|
+
if not finish_reason:
|
|
159
|
+
return None
|
|
160
|
+
|
|
161
|
+
return map_teamsai_finish_reason_to_finish_type(finish_reason)
|
|
162
|
+
|
|
144
163
|
def extract_provider_name(instance):
|
|
145
164
|
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
146
165
|
return provider_url.unwrap_or(None)
|
monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.teamsai import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
-
from monocle_apptrace.instrumentation.common.utils import get_llm_type
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_error_message, get_llm_type
|
|
5
5
|
TEAMAI_OUTPUT_PROCESSOR = {
|
|
6
6
|
"type": "inference.framework",
|
|
7
7
|
"attributes": [
|
|
@@ -44,7 +44,7 @@ TEAMAI_OUTPUT_PROCESSOR = {
|
|
|
44
44
|
"attributes": [
|
|
45
45
|
{
|
|
46
46
|
"attribute": "input",
|
|
47
|
-
"accessor": _helper.
|
|
47
|
+
"accessor": _helper.extract_messages
|
|
48
48
|
}
|
|
49
49
|
]
|
|
50
50
|
},
|
|
@@ -53,16 +53,12 @@ TEAMAI_OUTPUT_PROCESSOR = {
|
|
|
53
53
|
"_comment": "output from Teams AI",
|
|
54
54
|
"attributes": [
|
|
55
55
|
{
|
|
56
|
-
"attribute": "
|
|
57
|
-
"accessor": lambda arguments: _helper.
|
|
58
|
-
},
|
|
59
|
-
{
|
|
60
|
-
"attribute": "status_code",
|
|
61
|
-
"accessor": lambda arguments: _helper.get_status_code(arguments)
|
|
56
|
+
"attribute": "error_code",
|
|
57
|
+
"accessor": lambda arguments: _helper.extract_status_code(arguments)
|
|
62
58
|
},
|
|
63
59
|
{
|
|
64
60
|
"attribute": "response",
|
|
65
|
-
"accessor": lambda arguments: _helper.
|
|
61
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments)
|
|
66
62
|
},
|
|
67
63
|
{
|
|
68
64
|
"attribute": "check_status",
|
|
@@ -100,8 +100,6 @@
|
|
|
100
100
|
"name": "data.output",
|
|
101
101
|
"timestamp": "2025-06-06T09:00:00.682278Z",
|
|
102
102
|
"attributes": {
|
|
103
|
-
"status": "success",
|
|
104
|
-
"status_code": "success",
|
|
105
103
|
"response": "Ah, the French press! The only thing more sophisticated than a monocle-wearing cat. Here\u2019s how to brew a cup of coffee that\u2019ll make you feel like you\u2019re sipping in a Parisian caf\u00e9:\n\n1. **Gather Your Gear**: You\u2019ll need coarsely ground coffee, hot water (just off the boil), a French press, and a timer. Optional: a beret for that extra flair.\n\n2. **Measure Your Coffee**: A good rule of thumb is"
|
|
106
104
|
}
|
|
107
105
|
}
|
|
@@ -324,8 +322,6 @@
|
|
|
324
322
|
"name": "data.output",
|
|
325
323
|
"timestamp": "2025-06-06T09:01:20.299355Z",
|
|
326
324
|
"attributes": {
|
|
327
|
-
"status": "success",
|
|
328
|
-
"status_code": "success",
|
|
329
325
|
"response": "Brewing coffee can be a delightful experience, and there are several methods to choose from. Below, I\u2019ll provide a detailed guide for three popular brewing methods: Pour-Over, French Press, and Espresso. Each method has its unique characteristics, so you can choose one based on your preference.\n\n### 1. Pour-Over Coffee\n\n**Equipment Needed:**\n- Pour-over dripper (e.g., Hario V60, Chemex)\n- Coffee filter\n- Kettle (preferably a gooseneck kettle for control)\n- Scale\n- Coffee grinder\n- Mug or carafe\n\n**Ingredients:**\n- Fresh coffee beans (medium roast recommended)\n- Filtered water\n\n**Steps:**\n1. **Measure Coffee:** Use a coffee-to-water ratio of 1:15. For example, for 300g of water, use 20g of coffee.\n2. **Grind Coffee:** Grind the coffee to a medium-coarse consistency, similar to sea salt.\n3. **Boil Water:** Heat water to about 200\u00b0F (93\u00b0C). If you don\u2019t have a thermometer, bring water to a boil and let it sit for 30 seconds.\n4. **Prepare Filter:** Place the coffee filter in the dripper and rinse it with hot water to eliminate paper taste and preheat the dripper.\n5. **Add Coffee:** Place the ground coffee in the filter and create a small well in the center.\n6. **Bloom:** Pour just enough hot water (about 40g) to saturate the grounds. Let it bloom for 30-45 seconds.\n7. **Brew:** Slowly pour the remaining water in a circular motion, avoiding the edges. Aim to finish pouring in about 2-3 minutes.\n8. **Serve:** Once the water has fully drained, remove the dripper, and enjoy your coffee!\n\n### 2. French Press Coffee\n\n**Equipment Needed:**\n- French press\n- Kettle\n- Scale\n- Coffee grinder\n- Stirring spoon\n- Timer\n\n**Ingredients:**\n- Fresh coffee beans (coarse grind recommended)\n- Filtered water\n\n**Steps:**\n1. **Measure Coffee:** Use a coffee-to-water ratio of 1:15. For example, for 350g of water, use 23g of coffee.\n2. **Grind Coffee:** Grind the coffee to a coarse consistency, similar to breadcrumbs.\n3. **Boil Water:** Heat water to about 200\u00b0F (93\u00b0C).\n4. **Add Coffee:** Place the ground coffee in the French press.\n5. **Add Water:** Pour hot water over the coffee grounds, ensuring all grounds are saturated. Stir gently to mix.\n6. **Steep:** Place the lid on the French press and let it steep for 4 minutes.\n7. **Press:** Slowly press the plunger down to separate the grounds from the coffee.\n8. **Serve:** Pour the coffee into your mug and enjoy!\n\n### 3. Espresso\n\n**Equipment Needed:**\n- Espresso machine\n- Coffee grinder\n- Tamper\n- Scale\n- Shot glass or demitasse\n\n**Ingredients:**\n- Fresh coffee beans (dark roast recommended)\n- Filtered water\n\n**Steps:**\n1. **Measure Coffee:** Use about 18-20g of coffee for a double shot (about 60ml).\n2. **Grind Coffee:** Grind the coffee to a fine consistency, similar to table salt.\n3. **Preheat Machine:** Turn on your espresso machine and allow it to heat up.\n4. **Add Coffee:** Place the ground coffee in the portafilter and distribute evenly.\n5. **Tamp:** Use a tamper to press the coffee evenly and firmly.\n6. **Brew:** Lock the portafilter into the machine and start the extraction. Aim for a brew time of 25-30 seconds.\n7. **Serve:** Once the espresso is brewed, pour it into a shot glass or demitasse and enjoy!\n\n### Troubleshooting Common Problems\n\n- **Bitter Coffee:** This can be due to over-extraction. Try a coarser grind or reduce the brew time.\n- **Weak Coffee:** This may be due to under-extraction. Use a finer grind or increase the brew time.\n- **Sour Coffee:** This can happen if the coffee is under-extracted or brewed with water that is too cool. Ensure your water is at the right temperature.\n\n### Conclusion\n\nEach brewing method has its nuances, and experimenting with different variables (grind size, water temperature, brew time) can help you find your perfect cup. Enjoy the process, and happy brewing!"
|
|
330
326
|
}
|
|
331
327
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: monocle_apptrace
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.5.0b1
|
|
4
4
|
Summary: package with monocle genAI tracing
|
|
5
5
|
Project-URL: Homepage, https://github.com/monocle2ai/monocle
|
|
6
6
|
Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
|
|
@@ -20,25 +20,32 @@ Requires-Dist: boto3==1.37.24; extra == 'aws'
|
|
|
20
20
|
Provides-Extra: azure
|
|
21
21
|
Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
|
|
22
22
|
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: a2a-sdk==0.2.8; extra == 'dev'
|
|
23
24
|
Requires-Dist: anthropic-haystack; extra == 'dev'
|
|
24
|
-
Requires-Dist: anthropic==0.
|
|
25
|
+
Requires-Dist: anthropic==0.57.1; extra == 'dev'
|
|
25
26
|
Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
|
|
26
27
|
Requires-Dist: boto3==1.37.24; extra == 'dev'
|
|
27
28
|
Requires-Dist: chromadb==1.0.10; extra == 'dev'
|
|
29
|
+
Requires-Dist: click==8.2.1; extra == 'dev'
|
|
28
30
|
Requires-Dist: datasets==2.20.0; extra == 'dev'
|
|
29
31
|
Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
|
|
32
|
+
Requires-Dist: fastapi>=0.115.1; extra == 'dev'
|
|
30
33
|
Requires-Dist: flask; extra == 'dev'
|
|
31
34
|
Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
|
|
35
|
+
Requires-Dist: httpx==0.28.1; extra == 'dev'
|
|
32
36
|
Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
|
|
33
37
|
Requires-Dist: langchain-anthropic==0.3.13; extra == 'dev'
|
|
34
38
|
Requires-Dist: langchain-aws==0.2.23; extra == 'dev'
|
|
35
39
|
Requires-Dist: langchain-chroma==0.2.4; extra == 'dev'
|
|
36
40
|
Requires-Dist: langchain-community==0.3.24; extra == 'dev'
|
|
41
|
+
Requires-Dist: langchain-google-genai==2.1.8; extra == 'dev'
|
|
42
|
+
Requires-Dist: langchain-mcp-adapters==0.1.8; extra == 'dev'
|
|
37
43
|
Requires-Dist: langchain-mistralai==0.2.10; extra == 'dev'
|
|
38
44
|
Requires-Dist: langchain-openai==0.3.18; extra == 'dev'
|
|
39
45
|
Requires-Dist: langchain==0.3.25; extra == 'dev'
|
|
40
46
|
Requires-Dist: langchainhub==0.1.21; extra == 'dev'
|
|
41
|
-
Requires-Dist: langgraph==0.
|
|
47
|
+
Requires-Dist: langgraph-supervisor==0.0.28; extra == 'dev'
|
|
48
|
+
Requires-Dist: langgraph==0.5.4; extra == 'dev'
|
|
42
49
|
Requires-Dist: llama-index-embeddings-huggingface==0.5.4; extra == 'dev'
|
|
43
50
|
Requires-Dist: llama-index-llms-anthropic==0.6.19; extra == 'dev'
|
|
44
51
|
Requires-Dist: llama-index-llms-azure-openai==0.3.2; extra == 'dev'
|
|
@@ -46,17 +53,21 @@ Requires-Dist: llama-index-llms-mistralai==0.4.0; extra == 'dev'
|
|
|
46
53
|
Requires-Dist: llama-index-vector-stores-chroma==0.4.1; extra == 'dev'
|
|
47
54
|
Requires-Dist: llama-index-vector-stores-opensearch==0.5.4; extra == 'dev'
|
|
48
55
|
Requires-Dist: llama-index==0.12.37; extra == 'dev'
|
|
56
|
+
Requires-Dist: mcp==1.12.1; extra == 'dev'
|
|
49
57
|
Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
|
|
50
58
|
Requires-Dist: numpy==1.26.4; extra == 'dev'
|
|
51
59
|
Requires-Dist: opendal==0.45.14; extra == 'dev'
|
|
52
60
|
Requires-Dist: opensearch-haystack==1.2.0; extra == 'dev'
|
|
53
61
|
Requires-Dist: opentelemetry-instrumentation-flask; extra == 'dev'
|
|
54
62
|
Requires-Dist: parameterized==0.9.0; extra == 'dev'
|
|
63
|
+
Requires-Dist: pydantic==2.11.7; extra == 'dev'
|
|
55
64
|
Requires-Dist: pytest-asyncio==0.26.0; extra == 'dev'
|
|
56
65
|
Requires-Dist: pytest==8.3.5; extra == 'dev'
|
|
66
|
+
Requires-Dist: python-dotenv>=1.1.0; extra == 'dev'
|
|
57
67
|
Requires-Dist: requests-aws4auth==1.2.3; extra == 'dev'
|
|
58
68
|
Requires-Dist: sentence-transformers==2.6.1; extra == 'dev'
|
|
59
69
|
Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'
|
|
70
|
+
Requires-Dist: uvicorn==0.35.0; extra == 'dev'
|
|
60
71
|
Description-Content-Type: text/markdown
|
|
61
72
|
|
|
62
73
|
# Monocle for tracing GenAI app code
|