monocle-apptrace 0.3.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (38) hide show
  1. monocle_apptrace/exporters/aws/s3_exporter.py +3 -1
  2. monocle_apptrace/exporters/azure/blob_exporter.py +2 -2
  3. monocle_apptrace/exporters/base_exporter.py +10 -4
  4. monocle_apptrace/exporters/file_exporter.py +19 -4
  5. monocle_apptrace/exporters/monocle_exporters.py +3 -3
  6. monocle_apptrace/exporters/okahu/okahu_exporter.py +5 -2
  7. monocle_apptrace/instrumentation/common/constants.py +9 -5
  8. monocle_apptrace/instrumentation/common/instrumentor.py +24 -13
  9. monocle_apptrace/instrumentation/common/span_handler.py +79 -38
  10. monocle_apptrace/instrumentation/common/utils.py +90 -54
  11. monocle_apptrace/instrumentation/common/wrapper.py +193 -40
  12. monocle_apptrace/instrumentation/common/wrapper_method.py +13 -6
  13. monocle_apptrace/instrumentation/metamodel/aiohttp/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +66 -0
  15. monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +51 -0
  16. monocle_apptrace/instrumentation/metamodel/aiohttp/methods.py +13 -0
  17. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +8 -3
  18. monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +0 -1
  19. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +17 -4
  20. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +1 -1
  21. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +8 -1
  22. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +1 -1
  23. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +13 -9
  24. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +1 -1
  25. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +14 -0
  26. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +26 -5
  27. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +184 -26
  28. monocle_apptrace/instrumentation/metamodel/openai/methods.py +6 -6
  29. monocle_apptrace/instrumentation/metamodel/requests/_helper.py +1 -1
  30. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +55 -5
  31. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +13 -33
  32. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +24 -20
  33. monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +54 -8
  34. {monocle_apptrace-0.3.1.dist-info → monocle_apptrace-0.4.0.dist-info}/METADATA +22 -18
  35. {monocle_apptrace-0.3.1.dist-info → monocle_apptrace-0.4.0.dist-info}/RECORD +38 -34
  36. {monocle_apptrace-0.3.1.dist-info → monocle_apptrace-0.4.0.dist-info}/WHEEL +0 -0
  37. {monocle_apptrace-0.3.1.dist-info → monocle_apptrace-0.4.0.dist-info}/licenses/LICENSE +0 -0
  38. {monocle_apptrace-0.3.1.dist-info → monocle_apptrace-0.4.0.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,13 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.aiohttp.entities.http import AIO_HTTP_PROCESSOR
3
+
4
+ AIOHTTP_METHODS = [
5
+ {
6
+ "package": "aiohttp.web_app",
7
+ "object": "Application",
8
+ "method": "_handle",
9
+ "wrapper_method": atask_wrapper,
10
+ "span_handler": "aiohttp_handler",
11
+ "output_processor": AIO_HTTP_PROCESSOR
12
+ }
13
+ ]
@@ -2,6 +2,8 @@ import logging
2
2
  from threading import local
3
3
  from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes
4
4
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
5
+ from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
6
+ from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
5
7
  from urllib.parse import unquote
6
8
  from opentelemetry.context import get_current
7
9
  from opentelemetry.trace import Span, get_current_span
@@ -28,12 +30,15 @@ def get_body(args) -> dict:
28
30
  def extract_response(instance) -> str:
29
31
  if hasattr(instance, 'data') and hasattr(instance, 'content_length'):
30
32
  response = instance.data[0:max(instance.content_length, MAX_DATA_LENGTH)]
31
- else:
33
+ else:
32
34
  response = ""
33
35
  return response
34
36
 
35
37
  def extract_status(instance) -> str:
36
- status = instance.status if hasattr(instance, 'status') else ""
38
+ status = f"{instance.status_code}" if hasattr(instance, 'status_code') else ""
39
+ if status not in HTTP_SUCCESS_CODES:
40
+ error_message = extract_response(instance)
41
+ raise MonocleSpanException(f"error: {status} - {error_message}")
37
42
  return status
38
43
 
39
44
  def flask_pre_tracing(args):
@@ -65,7 +70,7 @@ class FlaskResponseSpanHandler(SpanHandler):
65
70
  if _parent_span_context is not None:
66
71
  parent_span: Span = _parent_span_context.get(_SPAN_KEY, None)
67
72
  if parent_span is not None:
68
- self.hydrate_events(to_wrap, wrapped, instance, args, kwargs, return_value, parent_span)
73
+ self.hydrate_events(to_wrap, wrapped, instance, args, kwargs, return_value, parent_span=parent_span)
69
74
  except Exception as e:
70
75
  logger.info(f"Failed to propogate flask response: {e}")
71
76
  super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
@@ -18,7 +18,6 @@ FLASK_HTTP_PROCESSOR = {
18
18
  }
19
19
 
20
20
  FLASK_RESPONSE_PROCESSOR = {
21
- "type": "http.process",
22
21
  "events": [
23
22
  {
24
23
  "name": "data.input",
@@ -1,4 +1,5 @@
1
1
  import logging
2
+
2
3
  from monocle_apptrace.instrumentation.common.utils import (
3
4
  Option,
4
5
  get_keys_as_tuple,
@@ -11,13 +12,19 @@ logger = logging.getLogger(__name__)
11
12
  def extract_messages(kwargs):
12
13
  try:
13
14
  messages = []
15
+ system_message, user_message = None,None
14
16
  if isinstance(kwargs, dict):
15
17
  if 'system_prompt' in kwargs and kwargs['system_prompt']:
16
18
  system_message = kwargs['system_prompt']
17
- messages.append({"system" : system_message})
18
19
  if 'prompt' in kwargs and kwargs['prompt']:
19
20
  user_message = extract_question_from_prompt(kwargs['prompt'])
21
+ if 'messages' in kwargs and len(kwargs['messages'])>1:
22
+ system_message = kwargs['messages'][0].text
23
+ user_message = kwargs['messages'][1].text
24
+ if system_message and user_message:
25
+ messages.append({"system": system_message})
20
26
  messages.append({"user": user_message})
27
+
21
28
  return [str(message) for message in messages]
22
29
  except Exception as e:
23
30
  logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
@@ -52,6 +59,8 @@ def extract_assistant_message(response):
52
59
  reply = response["replies"][0]
53
60
  if hasattr(reply, 'content'):
54
61
  return [reply.content]
62
+ if hasattr(reply, 'text'):
63
+ return [reply.text]
55
64
  return [reply]
56
65
  except Exception as e:
57
66
  logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
@@ -108,15 +117,19 @@ def extract_embeding_model(instance):
108
117
 
109
118
  def update_span_from_llm_response(response, instance):
110
119
  meta_dict = {}
111
- if response is not None and isinstance(response, dict) and "meta" in response:
112
- token_usage = response["meta"][0]["usage"]
120
+ token_usage = None
121
+ if response is not None and isinstance(response, dict):
122
+ if "meta" in response:
123
+ token_usage = response["meta"][0]["usage"]
124
+ elif "replies" in response: # and "meta" in response["replies"][0]:
125
+ token_usage = response["replies"][0].meta["usage"]
113
126
  if token_usage is not None:
114
127
  temperature = instance.__dict__.get("temperature", None)
115
128
  meta_dict.update({"temperature": temperature})
116
129
  meta_dict.update(
117
130
  {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
118
131
  meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
119
- meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
132
+ meta_dict.update({"total_tokens": token_usage.get("total_tokens") or token_usage.get("completion_tokens")+token_usage.get("prompt_tokens")})
120
133
  return meta_dict
121
134
 
122
135
 
@@ -4,7 +4,7 @@ from monocle_apptrace.instrumentation.metamodel.haystack import (
4
4
  from monocle_apptrace.instrumentation.common.utils import get_llm_type
5
5
 
6
6
  INFERENCE = {
7
- "type": "inference",
7
+ "type": "inference.framework",
8
8
  "attributes": [
9
9
  [
10
10
  {
@@ -36,5 +36,12 @@ HAYSTACK_METHODS = [
36
36
  "object": "Pipeline",
37
37
  "method": "run",
38
38
  "wrapper_method": task_wrapper
39
- }
39
+ },
40
+ {
41
+ "package": "haystack_integrations.components.generators.anthropic",
42
+ "object": "AnthropicChatGenerator",
43
+ "method": "run",
44
+ "wrapper_method": task_wrapper,
45
+ "output_processor": INFERENCE
46
+ },
40
47
  ]
@@ -4,7 +4,7 @@ from monocle_apptrace.instrumentation.metamodel.langchain import (
4
4
  from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type
5
5
 
6
6
  INFERENCE = {
7
- "type": "inference",
7
+ "type": "inference.framework",
8
8
  "attributes": [
9
9
  [
10
10
  {
@@ -96,12 +96,19 @@ def extract_query_from_content(content):
96
96
 
97
97
 
98
98
  def extract_provider_name(instance):
99
- provider_url = try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
100
- return provider_url
99
+ if hasattr(instance,'api_base'):
100
+ provider_url: Option[str]= try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
101
+ if hasattr(instance,'_client'):
102
+ provider_url:Option[str] = try_option(getattr, instance._client.base_url,'host')
103
+ return provider_url.unwrap_or(None)
101
104
 
102
105
 
103
106
  def extract_inference_endpoint(instance):
104
- inference_endpoint = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
107
+ if hasattr(instance,'_client'):
108
+ if hasattr(instance._client,'sdk_configuration'):
109
+ inference_endpoint: Option[str] = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
110
+ if hasattr(instance._client,'base_url'):
111
+ inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
105
112
  return inference_endpoint.unwrap_or(extract_provider_name(instance))
106
113
 
107
114
 
@@ -163,10 +170,7 @@ def update_span_from_llm_response(response, instance):
163
170
  if token_usage is not None:
164
171
  temperature = instance.__dict__.get("temperature", None)
165
172
  meta_dict.update({"temperature": temperature})
166
- if getattr(token_usage, "completion_tokens", None):
167
- meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens")})
168
- if getattr(token_usage, "prompt_tokens", None):
169
- meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens")})
170
- if getattr(token_usage, "total_tokens", None):
171
- meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens")})
173
+ meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
174
+ meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None)})
175
+ meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens",None) or getattr(token_usage,"output_tokens",None)+getattr(token_usage,"input_tokens",None)})
172
176
  return meta_dict
@@ -4,7 +4,7 @@ from monocle_apptrace.instrumentation.metamodel.llamaindex import (
4
4
  from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type
5
5
 
6
6
  INFERENCE = {
7
- "type": "inference",
7
+ "type": "inference.framework",
8
8
  "attributes": [
9
9
  [
10
10
  {
@@ -84,5 +84,19 @@ LLAMAINDEX_METHODS = [
84
84
  "method": "chat",
85
85
  "wrapper_method": task_wrapper,
86
86
  "output_processor": AGENT
87
+ },
88
+ {
89
+ "package": "llama_index.llms.anthropic",
90
+ "object": "Anthropic",
91
+ "method": "chat",
92
+ "wrapper_method": task_wrapper,
93
+ "output_processor": INFERENCE
94
+ },
95
+ {
96
+ "package": "llama_index.llms.anthropic",
97
+ "object": "Anthropic",
98
+ "method": "achat",
99
+ "wrapper_method": atask_wrapper,
100
+ "output_processor": INFERENCE
87
101
  }
88
102
  ]
@@ -6,11 +6,11 @@ and assistant messages from various input formats.
6
6
  import logging
7
7
  from monocle_apptrace.instrumentation.common.utils import (
8
8
  Option,
9
- get_keys_as_tuple,
10
- get_nested_value,
11
9
  try_option,
10
+ get_exception_message,
11
+ get_parent_span
12
12
  )
13
-
13
+ from monocle_apptrace.instrumentation.common.span_handler import NonFrameworkSpanHandler, WORKFLOW_TYPE_MAP
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
@@ -34,8 +34,11 @@ def extract_messages(kwargs):
34
34
  return []
35
35
 
36
36
 
37
- def extract_assistant_message(response):
37
+ def extract_assistant_message(arguments):
38
38
  try:
39
+ if arguments["exception"] is not None:
40
+ return get_exception_message(arguments)
41
+ response = arguments["result"]
39
42
  if hasattr(response,"output_text") and len(response.output_text):
40
43
  return response.output_text
41
44
  if response is not None and hasattr(response,"choices") and len(response.choices) >0:
@@ -114,4 +117,22 @@ def get_inference_type(instance):
114
117
  if inference_type.unwrap_or(None):
115
118
  return 'azure_openai'
116
119
  else:
117
- return 'openai'
120
+ return 'openai'
121
+
122
+ class OpenAISpanHandler(NonFrameworkSpanHandler):
123
+ def is_teams_span_in_progress(self) -> bool:
124
+ return self.is_framework_span_in_progess() and self.get_workflow_name_in_progress() == WORKFLOW_TYPE_MAP["teams.ai"]
125
+
126
+ # If openAI is being called by Teams AI SDK, then retain the metadata part of the span events
127
+ def skip_processor(self, to_wrap, wrapped, instance, span, args, kwargs) -> list[str]:
128
+ if self.is_teams_span_in_progress():
129
+ return ["attributes", "events.data.input", "events.data.output"]
130
+ else:
131
+ return super().skip_processor(to_wrap, wrapped, instance, span, args, kwargs)
132
+
133
+ def hydrate_events(self, to_wrap, wrapped, instance, args, kwargs, ret_result, span, parent_span=None, ex:Exception=None) -> bool:
134
+ # If openAI is being called by Teams AI SDK, then copy parent
135
+ if self.is_teams_span_in_progress() and ex is None:
136
+ return super().hydrate_events(to_wrap, wrapped, instance, args, kwargs, ret_result, span=parent_span, parent_span=None, ex=ex)
137
+
138
+ return super().hydrate_events(to_wrap, wrapped, instance, args, kwargs, ret_result, span, parent_span=parent_span, ex=ex)
@@ -1,71 +1,229 @@
1
+ import logging
2
+ import random
3
+ import time
4
+ from types import SimpleNamespace
1
5
  from monocle_apptrace.instrumentation.metamodel.openai import (
2
6
  _helper,
3
7
  )
4
- from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
8
+ from monocle_apptrace.instrumentation.common.utils import (
9
+ patch_instance_method,
10
+ resolve_from_alias,
11
+ get_status,
12
+ get_exception_status_code
13
+ )
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def process_stream(to_wrap, response, span_processor):
19
+ waiting_for_first_token = True
20
+ stream_start_time = time.time_ns()
21
+ first_token_time = stream_start_time
22
+ stream_closed_time = None
23
+ accumulated_response = ""
24
+ token_usage = None
25
+ accumulated_temp_list = []
26
+
27
+ if to_wrap and hasattr(response, "__iter__"):
28
+ original_iter = response.__iter__
29
+
30
+ def new_iter(self):
31
+ nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
32
+
33
+ for item in original_iter():
34
+ try:
35
+ if (
36
+ item.choices
37
+ and item.choices[0].delta
38
+ and item.choices[0].delta.content
39
+ ):
40
+ if waiting_for_first_token:
41
+ waiting_for_first_token = False
42
+ first_token_time = time.time_ns()
43
+
44
+ accumulated_response += item.choices[0].delta.content
45
+ # token_usage = item.usage
46
+ elif item.object == "chat.completion.chunk" and item.usage:
47
+ # Handle the case where the response is a chunk
48
+ token_usage = item.usage
49
+ stream_closed_time = time.time_ns()
50
+
51
+ except Exception as e:
52
+ logger.warning(
53
+ "Warning: Error occurred while processing item in new_iter: %s",
54
+ str(e),
55
+ )
56
+ finally:
57
+ accumulated_temp_list.append(item)
58
+ yield item
59
+
60
+ if span_processor:
61
+ ret_val = SimpleNamespace(
62
+ type="stream",
63
+ timestamps={
64
+ "data.input": int(stream_start_time),
65
+ "data.output": int(first_token_time),
66
+ "metadata": int(stream_closed_time or time.time_ns()),
67
+ },
68
+ output_text=accumulated_response,
69
+ usage=token_usage,
70
+ )
71
+ span_processor(ret_val)
72
+
73
+ patch_instance_method(response, "__iter__", new_iter)
74
+
75
+ if to_wrap and hasattr(response, "__aiter__"):
76
+ original_iter = response.__aiter__
77
+
78
+ async def new_aiter(self):
79
+ nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
80
+
81
+ async for item in original_iter():
82
+ try:
83
+ if (
84
+ item.choices
85
+ and item.choices[0].delta
86
+ and item.choices[0].delta.content
87
+ ):
88
+ if waiting_for_first_token:
89
+ waiting_for_first_token = False
90
+ first_token_time = time.time_ns()
91
+
92
+ accumulated_response += item.choices[0].delta.content
93
+ # token_usage = item.usage
94
+ elif item.object == "chat.completion.chunk" and item.usage:
95
+ # Handle the case where the response is a chunk
96
+ token_usage = item.usage
97
+ stream_closed_time = time.time_ns()
98
+
99
+ except Exception as e:
100
+ logger.warning(
101
+ "Warning: Error occurred while processing item in new_aiter: %s",
102
+ str(e),
103
+ )
104
+ finally:
105
+ accumulated_temp_list.append(item)
106
+ yield item
107
+
108
+ if span_processor:
109
+ ret_val = SimpleNamespace(
110
+ type="stream",
111
+ timestamps={
112
+ "data.input": int(stream_start_time),
113
+ "data.output": int(first_token_time),
114
+ "metadata": int(stream_closed_time or time.time_ns()),
115
+ },
116
+ output_text=accumulated_response,
117
+ usage=token_usage,
118
+ )
119
+ span_processor(ret_val)
120
+
121
+ patch_instance_method(response, "__aiter__", new_aiter)
122
+
5
123
 
6
124
  INFERENCE = {
7
125
  "type": "inference",
126
+ "is_auto_close": lambda kwargs: kwargs.get("stream", False) is False,
127
+ "response_processor": process_stream,
8
128
  "attributes": [
9
129
  [
10
130
  {
11
131
  "_comment": "provider type ,name , deployment , inference_endpoint",
12
132
  "attribute": "type",
13
- "accessor": lambda arguments: 'inference.' + (_helper.get_inference_type(arguments['instance'])) or 'openai'
133
+ "accessor": lambda arguments: "inference."
134
+ + (_helper.get_inference_type(arguments["instance"]))
135
+ or "openai",
14
136
  },
15
137
  {
16
138
  "attribute": "provider_name",
17
- "accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
139
+ "accessor": lambda arguments: _helper.extract_provider_name(
140
+ arguments["instance"]
141
+ ),
18
142
  },
19
143
  {
20
144
  "attribute": "deployment",
21
- "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
145
+ "accessor": lambda arguments: resolve_from_alias(
146
+ arguments["instance"].__dict__,
147
+ [
148
+ "engine",
149
+ "azure_deployment",
150
+ "deployment_name",
151
+ "deployment_id",
152
+ "deployment",
153
+ ],
154
+ ),
22
155
  },
23
156
  {
24
157
  "attribute": "inference_endpoint",
25
- "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base', 'endpoint']) or _helper.extract_inference_endpoint(arguments['instance'])
26
- }
158
+ "accessor": lambda arguments: resolve_from_alias(
159
+ arguments["instance"].__dict__,
160
+ ["azure_endpoint", "api_base", "endpoint"],
161
+ )
162
+ or _helper.extract_inference_endpoint(arguments["instance"]),
163
+ },
27
164
  ],
28
165
  [
29
166
  {
30
167
  "_comment": "LLM Model",
31
168
  "attribute": "name",
32
- "accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
169
+ "accessor": lambda arguments: resolve_from_alias(
170
+ arguments["kwargs"],
171
+ ["model", "model_name", "endpoint_name", "deployment_name"],
172
+ ),
33
173
  },
34
174
  {
35
175
  "attribute": "type",
36
- "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
37
- }
38
- ]
176
+ "accessor": lambda arguments: "model.llm."
177
+ + resolve_from_alias(
178
+ arguments["kwargs"],
179
+ ["model", "model_name", "endpoint_name", "deployment_name"],
180
+ ),
181
+ },
182
+ ],
39
183
  ],
40
184
  "events": [
41
- {"name": "data.input",
42
- "attributes": [
43
-
44
- {
45
- "_comment": "this is instruction and user query to LLM",
46
- "attribute": "input",
47
- "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
48
- }
49
- ]
50
- },
185
+ {
186
+ "name": "data.input",
187
+ "attributes": [
188
+ {
189
+ "_comment": "this is instruction and user query to LLM",
190
+ "attribute": "input",
191
+ "accessor": lambda arguments: _helper.extract_messages(
192
+ arguments["kwargs"]
193
+ ),
194
+ }
195
+ ],
196
+ },
51
197
  {
52
198
  "name": "data.output",
53
199
  "attributes": [
54
200
  {
55
201
  "_comment": "this is result from LLM",
56
202
  "attribute": "response",
57
- "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
203
+ "accessor": lambda arguments: _helper.extract_assistant_message(
204
+ arguments,
205
+ ),
206
+ },
207
+ {
208
+ "attribute": "status",
209
+ "accessor": lambda arguments: get_status(arguments)
210
+ },
211
+ {
212
+ "attribute": "status_code",
213
+ "accessor": lambda arguments: get_exception_status_code(arguments)
58
214
  }
59
- ]
215
+ ],
60
216
  },
61
217
  {
62
218
  "name": "metadata",
63
219
  "attributes": [
64
220
  {
65
221
  "_comment": "this is metadata usage from LLM",
66
- "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
222
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(
223
+ arguments["result"]
224
+ ),
67
225
  }
68
- ]
69
- }
70
- ]
226
+ ],
227
+ },
228
+ ],
71
229
  }
@@ -12,7 +12,7 @@ OPENAI_METHODS = [
12
12
  "object": "Completions",
13
13
  "method": "create",
14
14
  "wrapper_method": task_wrapper,
15
- "span_handler": "non_framework_handler",
15
+ "span_handler": "openai_handler",
16
16
  "output_processor": INFERENCE
17
17
  },
18
18
  {
@@ -20,7 +20,7 @@ OPENAI_METHODS = [
20
20
  "object": "AsyncCompletions",
21
21
  "method": "create",
22
22
  "wrapper_method": atask_wrapper,
23
- "span_handler": "non_framework_handler",
23
+ "span_handler": "openai_handler",
24
24
  "output_processor": INFERENCE
25
25
  },
26
26
  {
@@ -28,7 +28,7 @@ OPENAI_METHODS = [
28
28
  "object": "Embeddings",
29
29
  "method": "create",
30
30
  "wrapper_method": task_wrapper,
31
- "span_handler": "non_framework_handler",
31
+ "span_handler": "openai_handler",
32
32
  "output_processor": RETRIEVAL
33
33
  },
34
34
  {
@@ -36,7 +36,7 @@ OPENAI_METHODS = [
36
36
  "object": "AsyncEmbeddings",
37
37
  "method": "create",
38
38
  "wrapper_method": atask_wrapper,
39
- "span_handler": "non_framework_handler",
39
+ "span_handler": "openai_handler",
40
40
  "output_processor": RETRIEVAL
41
41
  },
42
42
  {
@@ -44,7 +44,7 @@ OPENAI_METHODS = [
44
44
  "object": "Responses",
45
45
  "method": "create",
46
46
  "wrapper_method": task_wrapper,
47
- "span_handler": "non_framework_handler",
47
+ "span_handler": "openai_handler",
48
48
  "output_processor": INFERENCE
49
49
  },
50
50
  {
@@ -52,7 +52,7 @@ OPENAI_METHODS = [
52
52
  "object": "AsyncResponses",
53
53
  "method": "create",
54
54
  "wrapper_method": atask_wrapper,
55
- "span_handler": "non_framework_handler",
55
+ "span_handler": "openai_handler",
56
56
  "output_processor": INFERENCE
57
57
  }
58
58
 
@@ -30,7 +30,7 @@ def extract_response(result) -> str:
30
30
  return result.text if hasattr(result, 'text') else str(result)
31
31
 
32
32
  def extract_status(result) -> str:
33
- return f"{result.status_code} {result.reason}"
33
+ return f"{result.status_code}"
34
34
 
35
35
 
36
36
  def request_pre_task_processor(kwargs):
@@ -1,4 +1,12 @@
1
1
  from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
2
+ from monocle_apptrace.instrumentation.common.utils import (
3
+ Option,
4
+ get_keys_as_tuple,
5
+ get_nested_value,
6
+ try_option,
7
+ get_exception_message,
8
+ get_exception_status_code
9
+ )
2
10
  def capture_input(arguments):
3
11
  """
4
12
  Captures the input from Teams AI state.
@@ -51,8 +59,50 @@ def capture_prompt_info(arguments):
51
59
  except Exception as e:
52
60
  return f"Error capturing prompt: {str(e)}"
53
61
 
54
- def status_check(arguments):
55
- if hasattr(arguments["result"], "error") and arguments["result"].error is not None:
56
- error_msg:str = arguments["result"].error
57
- error_code:str = arguments["result"].status if hasattr(arguments["result"], "status") else "unknown"
58
- raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
62
+ def get_status_code(arguments):
63
+ if arguments["exception"] is not None:
64
+ return get_exception_status_code(arguments)
65
+ elif hasattr(arguments["result"], "status"):
66
+ return arguments["result"].status
67
+ else:
68
+ return 'success'
69
+
70
+ def get_status(arguments):
71
+ if arguments["exception"] is not None:
72
+ return 'error'
73
+ elif get_status_code(arguments) == 'success':
74
+ return 'success'
75
+ else:
76
+ return 'error'
77
+
78
+ def get_response(arguments) -> str:
79
+ status = get_status_code(arguments)
80
+ response:str = ""
81
+ if status == 'success':
82
+ if hasattr(arguments["result"], "message"):
83
+ response = arguments["result"].message.content
84
+ else:
85
+ response = str(arguments["result"])
86
+ else:
87
+ if arguments["exception"] is not None:
88
+ response = get_exception_message(arguments)
89
+ elif hasattr(arguments["result"], "error"):
90
+ response = arguments["result"].error
91
+ return response
92
+
93
+ def check_status(arguments):
94
+ status = get_status_code(arguments)
95
+ if status != 'success':
96
+ raise MonocleSpanException(f"{status}")
97
+
98
+ def extract_provider_name(instance):
99
+ provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
100
+ return provider_url.unwrap_or(None)
101
+
102
+
103
+ def extract_inference_endpoint(instance):
104
+ inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
105
+ if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
106
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
107
+
108
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))