monocle-apptrace 0.4.0b3__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (40) hide show
  1. monocle_apptrace/instrumentation/__init__.py +2 -1
  2. monocle_apptrace/instrumentation/common/constants.py +3 -0
  3. monocle_apptrace/instrumentation/common/instrumentor.py +1 -1
  4. monocle_apptrace/instrumentation/common/span_handler.py +1 -1
  5. monocle_apptrace/instrumentation/common/utils.py +20 -2
  6. monocle_apptrace/instrumentation/common/wrapper_method.py +6 -1
  7. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +29 -4
  8. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +12 -2
  9. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +78 -0
  10. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +51 -0
  11. monocle_apptrace/instrumentation/metamodel/azfunc/methods.py +23 -0
  12. monocle_apptrace/instrumentation/metamodel/azfunc/wrapper.py +23 -0
  13. monocle_apptrace/instrumentation/metamodel/azureaiinference/__init__.py +1 -0
  14. monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +216 -0
  15. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +208 -0
  16. monocle_apptrace/instrumentation/metamodel/azureaiinference/methods.py +23 -0
  17. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +42 -17
  18. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +11 -3
  19. monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
  20. monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +76 -0
  21. monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +75 -0
  23. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +14 -0
  24. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +20 -12
  25. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +10 -2
  26. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +19 -13
  27. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +10 -2
  28. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +21 -13
  29. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +10 -2
  30. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +17 -9
  31. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +3 -2
  32. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +50 -4
  33. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +32 -12
  34. monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +30 -17
  35. monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +448 -0
  36. {monocle_apptrace-0.4.0b3.dist-info → monocle_apptrace-0.4.2.dist-info}/METADATA +1 -1
  37. {monocle_apptrace-0.4.0b3.dist-info → monocle_apptrace-0.4.2.dist-info}/RECORD +40 -26
  38. {monocle_apptrace-0.4.0b3.dist-info → monocle_apptrace-0.4.2.dist-info}/WHEEL +0 -0
  39. {monocle_apptrace-0.4.0b3.dist-info → monocle_apptrace-0.4.2.dist-info}/licenses/LICENSE +0 -0
  40. {monocle_apptrace-0.4.0b3.dist-info → monocle_apptrace-0.4.2.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,208 @@
1
+ import logging
2
+ import time
3
+ from types import SimpleNamespace
4
+ from monocle_apptrace.instrumentation.metamodel.azureaiinference import _helper
5
+ from monocle_apptrace.instrumentation.common.utils import (
6
+ resolve_from_alias,
7
+ patch_instance_method,
8
+ get_status,
9
+ get_exception_status_code
10
+ )
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def process_stream(to_wrap, response, span_processor):
16
+ """Process streaming responses from Azure AI Inference."""
17
+ waiting_for_first_token = True
18
+ stream_start_time = time.time_ns()
19
+ first_token_time = stream_start_time
20
+ stream_closed_time = None
21
+ accumulated_response = ""
22
+ token_usage = None
23
+
24
+ # For sync iteration - patch __next__ instead of __iter__
25
+ if to_wrap and hasattr(response, "__next__"):
26
+ original_next = response.__next__
27
+
28
+ def new_next(self):
29
+ nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
30
+
31
+ try:
32
+ item = original_next()
33
+
34
+ # Handle Azure AI Inference streaming chunks
35
+ if hasattr(item, 'choices') and item.choices:
36
+ choice = item.choices[0]
37
+ if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
38
+ if waiting_for_first_token:
39
+ waiting_for_first_token = False
40
+ first_token_time = time.time_ns()
41
+
42
+ accumulated_response += choice.delta.content
43
+
44
+ # Check for usage information at the end of stream
45
+ if hasattr(item, 'usage') and item.usage:
46
+ token_usage = item.usage
47
+ stream_closed_time = time.time_ns()
48
+
49
+ return item
50
+
51
+ except StopIteration:
52
+ # Stream is complete, process final span
53
+ if span_processor:
54
+ ret_val = SimpleNamespace(
55
+ type="stream",
56
+ timestamps={
57
+ "data.input": int(stream_start_time),
58
+ "data.output": int(first_token_time),
59
+ "metadata": int(stream_closed_time or time.time_ns()),
60
+ },
61
+ output_text=accumulated_response,
62
+ usage=token_usage,
63
+ )
64
+ span_processor(ret_val)
65
+ raise
66
+ except Exception as e:
67
+ logger.warning(
68
+ "Warning: Error occurred while processing item in new_next: %s",
69
+ str(e),
70
+ )
71
+ raise
72
+
73
+ patch_instance_method(response, "__next__", new_next)
74
+
75
+ # For async iteration - patch __anext__ instead of __aiter__
76
+ if to_wrap and hasattr(response, "__anext__"):
77
+ original_anext = response.__anext__
78
+
79
+ async def new_anext(self):
80
+ nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
81
+
82
+ try:
83
+ item = await original_anext()
84
+
85
+ # Handle Azure AI Inference streaming chunks
86
+ if hasattr(item, 'choices') and item.choices:
87
+ choice = item.choices[0]
88
+ if hasattr(choice, 'delta') and hasattr(choice.delta, 'content') and choice.delta.content:
89
+ if waiting_for_first_token:
90
+ waiting_for_first_token = False
91
+ first_token_time = time.time_ns()
92
+
93
+ accumulated_response += choice.delta.content
94
+
95
+ # Check for usage information at the end of stream
96
+ if hasattr(item, 'usage') and item.usage:
97
+ token_usage = item.usage
98
+ stream_closed_time = time.time_ns()
99
+
100
+ return item
101
+
102
+ except StopAsyncIteration:
103
+ # Stream is complete, process final span
104
+ if span_processor:
105
+ ret_val = SimpleNamespace(
106
+ type="stream",
107
+ timestamps={
108
+ "data.input": int(stream_start_time),
109
+ "data.output": int(first_token_time),
110
+ "metadata": int(stream_closed_time or time.time_ns()),
111
+ },
112
+ output_text=accumulated_response,
113
+ usage=token_usage,
114
+ )
115
+ span_processor(ret_val)
116
+ raise
117
+ except Exception as e:
118
+ logger.warning(
119
+ "Warning: Error occurred while processing item in new_anext: %s",
120
+ str(e),
121
+ )
122
+ raise
123
+
124
+ patch_instance_method(response, "__anext__", new_anext)
125
+
126
+
127
+ INFERENCE = {
128
+ "type": "inference",
129
+ "is_auto_close": lambda kwargs: kwargs.get("stream", False) is False,
130
+ "response_processor": process_stream,
131
+ "attributes": [
132
+ [
133
+ {
134
+ "_comment": "Azure AI Inference provider type, endpoint",
135
+ "attribute": "type",
136
+ "accessor": lambda arguments: f"inference.{_helper.get_inference_type(arguments)}"
137
+ },
138
+ {
139
+ "attribute": "provider_name",
140
+ "accessor": lambda arguments: _helper.get_provider_name(arguments['instance'])
141
+ },
142
+ {
143
+ "attribute": "inference_endpoint",
144
+ "accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
145
+ },
146
+ {
147
+ "attribute": "deployment",
148
+ "accessor": lambda arguments: resolve_from_alias(
149
+ arguments['instance'].__dict__,
150
+ ['deployment', 'deployment_name', 'azure_deployment', '_deployment']
151
+ )
152
+ }
153
+ ],
154
+ [
155
+ {
156
+ "_comment": "LLM Model information",
157
+ "attribute": "name",
158
+ "accessor": lambda arguments: _helper.get_model_name(arguments)
159
+ },
160
+ {
161
+ "attribute": "type",
162
+ "accessor": lambda arguments: f"model.llm.{_helper.get_model_name(arguments)}" if _helper.get_model_name(arguments) else "model.llm.unknown"
163
+ }
164
+ ]
165
+ ],
166
+ "events": [
167
+ {
168
+ "name": "data.input",
169
+ "attributes": [
170
+ {
171
+ "_comment": "Chat messages input to Azure AI Inference",
172
+ "attribute": "input",
173
+ "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
174
+ }
175
+ ]
176
+ },
177
+ {
178
+ "name": "data.output",
179
+ "attributes": [
180
+ {
181
+ "_comment": "Response from Azure AI Inference",
182
+ "attribute": "response",
183
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
184
+ },
185
+ {
186
+ "attribute": "status",
187
+ "accessor": lambda arguments: get_status(arguments)
188
+ },
189
+ {
190
+ "attribute": "status_code",
191
+ "accessor": lambda arguments: get_exception_status_code(arguments)
192
+ }
193
+ ]
194
+ },
195
+ {
196
+ "name": "metadata",
197
+ "attributes": [
198
+ {
199
+ "_comment": "Usage metadata from Azure AI Inference",
200
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(
201
+ arguments['result'],
202
+ arguments.get('instance')
203
+ )
204
+ }
205
+ ]
206
+ }
207
+ ]
208
+ }
@@ -0,0 +1,23 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.azureaiinference.entities.inference import INFERENCE
3
+
4
+ AZURE_AI_INFERENCE_METHODS = [
5
+ # Chat Completions - Synchronous
6
+ {
7
+ "package": "azure.ai.inference",
8
+ "object": "ChatCompletionsClient",
9
+ "method": "complete",
10
+ "wrapper_method": task_wrapper,
11
+ "span_handler": "non_framework_handler",
12
+ "output_processor": INFERENCE
13
+ },
14
+ # Chat Completions - Asynchronous
15
+ {
16
+ "package": "azure.ai.inference.aio",
17
+ "object": "ChatCompletionsClient",
18
+ "method": "complete",
19
+ "wrapper_method": atask_wrapper,
20
+ "span_handler": "non_framework_handler",
21
+ "output_processor": INFERENCE
22
+ }
23
+ ]
@@ -8,7 +8,7 @@ import json
8
8
  from io import BytesIO
9
9
  from functools import wraps
10
10
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
11
-
11
+ from monocle_apptrace.instrumentation.common.utils import ( get_exception_message,)
12
12
  logger = logging.getLogger(__name__)
13
13
 
14
14
 
@@ -30,24 +30,49 @@ def extract_messages(args):
30
30
  logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
31
31
  return []
32
32
 
33
+ def get_exception_status_code(arguments):
34
+ if arguments['exception'] is not None and hasattr(arguments['exception'], 'response') and arguments['exception'].response is not None:
35
+ if "ResponseMetadata" in arguments['exception'].response and "HTTPStatusCode" in arguments['exception'].response["ResponseMetadata"]:
36
+ return arguments['exception'].response["ResponseMetadata"]["HTTPStatusCode"]
37
+ elif arguments['exception'] is not None:
38
+ return 'error'
39
+ else:
40
+ return 'success'
41
+
42
+ def get_status_code(arguments):
43
+ if arguments["exception"] is not None:
44
+ return get_exception_status_code(arguments)
45
+ elif hasattr(arguments["result"], "status"):
46
+ return arguments["result"].status
47
+ else:
48
+ return 'success'
33
49
 
34
- def extract_assistant_message(response):
50
+ def extract_assistant_message(arguments):
35
51
  try:
36
- if "Body" in response and hasattr(response['Body'], "_raw_stream"):
37
- raw_stream = getattr(response['Body'], "_raw_stream")
38
- if hasattr(raw_stream, "data"):
39
- response_bytes = getattr(raw_stream, "data")
40
- response_str = response_bytes.decode('utf-8')
41
- response_dict = json.loads(response_str)
42
- response['Body'] = BytesIO(response_bytes)
43
- return [response_dict["answer"]]
44
- if "output" in response:
45
- output = response.get("output", {})
46
- message = output.get("message", {})
47
- content = message.get("content", [])
48
- if isinstance(content, list) and len(content) > 0 and "text" in content[0]:
49
- reply = content[0]["text"]
50
- return [reply]
52
+ status = get_status_code(arguments)
53
+ response: str = ""
54
+ if status == 'success':
55
+ if "Body" in arguments['result'] and hasattr(arguments['result']['Body'], "_raw_stream"):
56
+ raw_stream = getattr(arguments['result']['Body'], "_raw_stream")
57
+ if hasattr(raw_stream, "data"):
58
+ response_bytes = getattr(raw_stream, "data")
59
+ response_str = response_bytes.decode('utf-8')
60
+ response_dict = json.loads(response_str)
61
+ arguments['result']['Body'] = BytesIO(response_bytes)
62
+ response = response_dict["answer"]
63
+ if "output" in arguments['result']:
64
+ output = arguments['result'].get("output", {})
65
+ message = output.get("message", {})
66
+ content = message.get("content", [])
67
+ if isinstance(content, list) and len(content) > 0 and "text" in content[0]:
68
+ reply = content[0]["text"]
69
+ response = reply
70
+ else:
71
+ if arguments["exception"] is not None:
72
+ response = get_exception_message(arguments)
73
+ elif hasattr(arguments["result"], "error"):
74
+ response = arguments["result"].error
75
+ return response
51
76
  except Exception as e:
52
77
  logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
53
78
  return []
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.botocore import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import get_llm_type
4
+ from monocle_apptrace.instrumentation.common.utils import (get_llm_type, get_status,)
5
5
  INFERENCE = {
6
6
  "type": "inference",
7
7
  "attributes": [
@@ -33,7 +33,6 @@ INFERENCE = {
33
33
  "events": [
34
34
  {"name": "data.input",
35
35
  "attributes": [
36
-
37
36
  {
38
37
  "_comment": "this is instruction and user query to LLM",
39
38
  "attribute": "input",
@@ -44,10 +43,19 @@ INFERENCE = {
44
43
  {
45
44
  "name": "data.output",
46
45
  "attributes": [
46
+ {
47
+ "_comment": "this is result from LLM",
48
+ "attribute": "status",
49
+ "accessor": lambda arguments: get_status(arguments)
50
+ },
51
+ {
52
+ "attribute": "status_code",
53
+ "accessor": lambda arguments: _helper.get_status_code(arguments)
54
+ },
47
55
  {
48
56
  "_comment": "this is response from LLM",
49
57
  "attribute": "response",
50
- "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
58
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
51
59
  }
52
60
  ]
53
61
  },
@@ -0,0 +1,76 @@
1
+ import logging
2
+ from monocle_apptrace.instrumentation.common.utils import (
3
+ get_exception_message,
4
+ get_status_code,
5
+ )
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ def resolve_from_alias(my_map, alias):
10
+ """Find a alias that is not none from list of aliases"""
11
+
12
+ for i in alias:
13
+ if i in my_map.keys():
14
+ return my_map[i]
15
+ return None
16
+
17
+ def extract_messages(kwargs):
18
+ """Extract system and user messages"""
19
+ try:
20
+ messages = []
21
+ config = kwargs.get('config')
22
+ if config and hasattr(config, 'system_instruction'):
23
+ system_instructions = getattr(config, 'system_instruction', None)
24
+ if system_instructions:
25
+ messages.append({'system': system_instructions})
26
+
27
+ contents = kwargs.get('contents')
28
+ if isinstance(contents, list):
29
+ for content in contents:
30
+ if hasattr(content, 'parts') and getattr(content, 'parts'):
31
+ part = content.parts[0]
32
+ if hasattr(part, 'text'):
33
+ messages.append({getattr(content, 'role', 'user'): part.text})
34
+ elif isinstance(contents, str):
35
+ messages.append({'input': contents})
36
+
37
+ return [str(message) for message in messages]
38
+ except Exception as e:
39
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
40
+ return []
41
+
42
+ def extract_assistant_message(arguments):
43
+ try:
44
+ status = get_status_code(arguments)
45
+ response: str = ""
46
+ if status == 'success':
47
+ if hasattr(arguments['result'], "text") and len(arguments['result'].text):
48
+ response = arguments['result'].text
49
+ else:
50
+ if arguments["exception"] is not None:
51
+ response = get_exception_message(arguments)
52
+ elif hasattr(arguments["result"], "error"):
53
+ response = arguments["result"].error
54
+ return response
55
+ except (IndexError, AttributeError) as e:
56
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
57
+ return None
58
+
59
+ def extract_inference_endpoint(instance):
60
+ try:
61
+ if hasattr(instance,'_api_client') and hasattr(instance._api_client, '_http_options'):
62
+ if hasattr(instance._api_client._http_options,'base_url'):
63
+ return instance._api_client._http_options.base_url
64
+ except Exception as e:
65
+ logger.warning("Warning: Error occurred in inference endpoint: %s", str(e))
66
+ return []
67
+
68
+ def update_span_from_llm_response(response, instance):
69
+ meta_dict = {}
70
+ if response is not None and hasattr(response, "usage_metadata") and response.usage_metadata is not None:
71
+ token_usage = response.usage_metadata
72
+ if token_usage is not None:
73
+ meta_dict.update({"completion_tokens": token_usage.candidates_token_count})
74
+ meta_dict.update({"prompt_tokens": token_usage.prompt_token_count })
75
+ meta_dict.update({"total_tokens": token_usage.total_token_count})
76
+ return meta_dict
@@ -0,0 +1,75 @@
1
+ from monocle_apptrace.instrumentation.metamodel.gemini import (
2
+ _helper,
3
+ )
4
+ from monocle_apptrace.instrumentation.common.utils import get_llm_type, get_status, get_status_code
5
+ INFERENCE = {
6
+ "type": "inference",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "provider type , inference_endpoint",
11
+ "attribute": "type",
12
+ "accessor": lambda arguments: 'inference.gemini'
13
+ },
14
+ {
15
+ "attribute": "inference_endpoint",
16
+ "accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
17
+ }
18
+ ],
19
+ [
20
+ {
21
+ "_comment": "LLM Model",
22
+ "attribute": "name",
23
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['kwargs'],
24
+ ['model'])
25
+ },
26
+ {
27
+ "attribute": "type",
28
+ "accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['kwargs'],
29
+ ['model'])
30
+ }
31
+ ]
32
+ ],
33
+ "events": [
34
+ {
35
+ "name": "data.input",
36
+ "attributes": [
37
+
38
+ {
39
+ "_comment": "this is instruction and user query to LLM",
40
+ "attribute": "input",
41
+ "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
42
+ }
43
+ ]
44
+ },
45
+ {
46
+ "name": "data.output",
47
+ "attributes": [
48
+ {
49
+ "_comment": "this is result from LLM",
50
+ "attribute": "status",
51
+ "accessor": lambda arguments: get_status(arguments)
52
+ },
53
+ {
54
+ "attribute": "status_code",
55
+ "accessor": lambda arguments: get_status_code(arguments)
56
+ },
57
+ {
58
+ "attribute": "response",
59
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
60
+ }
61
+ ]
62
+ },
63
+ {
64
+ "name": "metadata",
65
+ "attributes": [
66
+ {
67
+ "_comment": "this is metadata usage from LLM",
68
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'], arguments['instance'])
69
+ }
70
+ ]
71
+ }
72
+
73
+
74
+ ]
75
+ }
@@ -0,0 +1,14 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.gemini.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+
6
+ GEMINI_METHODS = [
7
+ {
8
+ "package": "google.genai.models",
9
+ "object": "Models",
10
+ "method": "generate_content",
11
+ "wrapper_method": task_wrapper,
12
+ "output_processor": INFERENCE,
13
+ }
14
+ ]
@@ -5,6 +5,8 @@ from monocle_apptrace.instrumentation.common.utils import (
5
5
  get_keys_as_tuple,
6
6
  get_nested_value,
7
7
  try_option,
8
+ get_exception_message,
9
+ get_status_code,
8
10
  )
9
11
  logger = logging.getLogger(__name__)
10
12
 
@@ -52,19 +54,25 @@ def extract_question_from_prompt(content):
52
54
  logger.warning("Warning: Error occurred in extract_question_from_prompt: %s", str(e))
53
55
  return ""
54
56
 
55
-
56
- def extract_assistant_message(response):
57
- try:
58
- if "replies" in response:
59
- reply = response["replies"][0]
57
+ def extract_assistant_message(arguments):
58
+ status = get_status_code(arguments)
59
+ response: str = ""
60
+ if status == 'success':
61
+ if "replies" in arguments['result']:
62
+ reply = arguments['result']["replies"][0]
60
63
  if hasattr(reply, 'content'):
61
- return [reply.content]
62
- if hasattr(reply, 'text'):
63
- return [reply.text]
64
- return [reply]
65
- except Exception as e:
66
- logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
67
- return []
64
+ response = reply.content
65
+ elif hasattr(reply, 'text'):
66
+ response = reply.text
67
+ else:
68
+ response = reply
69
+ else:
70
+ if arguments["exception"] is not None:
71
+ response = get_exception_message(arguments)
72
+ elif hasattr(response, "error"):
73
+ response = arguments['result'].error
74
+
75
+ return response
68
76
 
69
77
 
70
78
  def get_vectorstore_deployment(my_map):
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.haystack import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import get_llm_type
4
+ from monocle_apptrace.instrumentation.common.utils import get_llm_type, get_status, get_status_code
5
5
 
6
6
  INFERENCE = {
7
7
  "type": "inference.framework",
@@ -60,8 +60,16 @@ INFERENCE = {
60
60
  "attributes": [
61
61
  {
62
62
  "_comment": "this is response from LLM",
63
+ "attribute": "status",
64
+ "accessor": lambda arguments: get_status(arguments)
65
+ },
66
+ {
67
+ "attribute": "status_code",
68
+ "accessor": lambda arguments: get_status_code(arguments)
69
+ },
70
+ {
63
71
  "attribute": "response",
64
- "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
72
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
65
73
  }
66
74
  ]
67
75
  },
@@ -9,6 +9,8 @@ from monocle_apptrace.instrumentation.common.utils import (
9
9
  get_keys_as_tuple,
10
10
  get_nested_value,
11
11
  try_option,
12
+ get_exception_message,
13
+ get_status_code,
12
14
  )
13
15
 
14
16
 
@@ -35,18 +37,23 @@ def extract_messages(args):
35
37
  logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
36
38
  return []
37
39
 
40
+ def extract_assistant_message(arguments):
41
+ status = get_status_code(arguments)
42
+ response: str = ""
43
+ if status == 'success':
44
+ if isinstance(arguments['result'], str):
45
+ response = arguments['result']
46
+ if hasattr(arguments['result'], "content"):
47
+ response = arguments['result'].content
48
+ if hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content"):
49
+ response = arguments['result'].message.content
50
+ else:
51
+ if arguments["exception"] is not None:
52
+ response = get_exception_message(arguments)
53
+ elif hasattr(arguments["result"], "error"):
54
+ response = arguments["result"].error
38
55
 
39
- def extract_assistant_message(response):
40
- try:
41
- if isinstance(response, str):
42
- return [response]
43
- if hasattr(response, "content"):
44
- return [response.content]
45
- if hasattr(response, "message") and hasattr(response.message, "content"):
46
- return [response.message.content]
47
- except Exception as e:
48
- logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
49
- return []
56
+ return response
50
57
 
51
58
 
52
59
  def extract_provider_name(instance):
@@ -131,5 +138,4 @@ def update_span_from_llm_response(response, instance):
131
138
  {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
132
139
  meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
133
140
  meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
134
- return meta_dict
135
-
141
+ return meta_dict
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.langchain import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type
4
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type, get_status, get_status_code
5
5
 
6
6
  INFERENCE = {
7
7
  "type": "inference.framework",
@@ -54,8 +54,16 @@ INFERENCE = {
54
54
  "attributes": [
55
55
  {
56
56
  "_comment": "this is result from LLM",
57
+ "attribute": "status",
58
+ "accessor": lambda arguments: get_status(arguments)
59
+ },
60
+ {
61
+ "attribute": "status_code",
62
+ "accessor": lambda arguments: get_status_code(arguments)
63
+ },
64
+ {
57
65
  "attribute": "response",
58
- "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
66
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
59
67
  }
60
68
  ]
61
69
  },