monocle-apptrace 0.3.0b6__py3-none-any.whl → 0.3.1b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (38) hide show
  1. monocle_apptrace/__init__.py +1 -0
  2. monocle_apptrace/exporters/aws/s3_exporter.py +20 -6
  3. monocle_apptrace/exporters/aws/s3_exporter_opendal.py +22 -11
  4. monocle_apptrace/exporters/azure/blob_exporter.py +22 -8
  5. monocle_apptrace/exporters/azure/blob_exporter_opendal.py +23 -8
  6. monocle_apptrace/exporters/exporter_processor.py +128 -3
  7. monocle_apptrace/exporters/file_exporter.py +16 -0
  8. monocle_apptrace/exporters/monocle_exporters.py +10 -1
  9. monocle_apptrace/exporters/okahu/okahu_exporter.py +8 -6
  10. monocle_apptrace/instrumentation/__init__.py +1 -0
  11. monocle_apptrace/instrumentation/common/__init__.py +2 -0
  12. monocle_apptrace/instrumentation/common/constants.py +3 -0
  13. monocle_apptrace/instrumentation/common/instrumentor.py +86 -12
  14. monocle_apptrace/instrumentation/common/span_handler.py +11 -4
  15. monocle_apptrace/instrumentation/common/utils.py +46 -17
  16. monocle_apptrace/instrumentation/common/wrapper.py +6 -4
  17. monocle_apptrace/instrumentation/common/wrapper_method.py +3 -1
  18. monocle_apptrace/instrumentation/metamodel/anthropic/__init__.py +0 -0
  19. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +64 -0
  20. monocle_apptrace/instrumentation/metamodel/anthropic/entities/__init__.py +0 -0
  21. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +72 -0
  22. monocle_apptrace/instrumentation/metamodel/anthropic/methods.py +22 -0
  23. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +2 -2
  24. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +2 -1
  25. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +9 -4
  26. monocle_apptrace/instrumentation/metamodel/openai/methods.py +16 -0
  27. monocle_apptrace/instrumentation/metamodel/teamsai/__init__.py +0 -0
  28. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +58 -0
  29. monocle_apptrace/instrumentation/metamodel/teamsai/entities/__init__.py +0 -0
  30. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/__init__.py +0 -0
  31. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +80 -0
  32. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +70 -0
  33. monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +26 -0
  34. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1b1.dist-info}/METADATA +2 -1
  35. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1b1.dist-info}/RECORD +38 -26
  36. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1b1.dist-info}/WHEEL +0 -0
  37. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1b1.dist-info}/licenses/LICENSE +0 -0
  38. {monocle_apptrace-0.3.0b6.dist-info → monocle_apptrace-0.3.1b1.dist-info}/licenses/NOTICE +0 -0
@@ -17,13 +17,13 @@ from opentelemetry.trace import get_tracer
17
17
  from wrapt import wrap_function_wrapper
18
18
  from opentelemetry.trace.propagation import set_span_in_context, _SPAN_KEY
19
19
  from monocle_apptrace.exporters.monocle_exporters import get_monocle_exporter
20
- from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
20
+ from monocle_apptrace.instrumentation.common.span_handler import SpanHandler, NonFrameworkSpanHandler
21
21
  from monocle_apptrace.instrumentation.common.wrapper_method import (
22
22
  DEFAULT_METHODS_LIST,
23
23
  WrapperMethod,
24
24
  MONOCLE_SPAN_HANDLERS
25
25
  )
26
- from monocle_apptrace.instrumentation.common.wrapper import scope_wrapper, ascope_wrapper
26
+ from monocle_apptrace.instrumentation.common.wrapper import scope_wrapper, ascope_wrapper, wrapper_processor
27
27
  from monocle_apptrace.instrumentation.common.utils import (
28
28
  set_scope, remove_scope, http_route_handler, load_scopes, async_wrapper, http_async_route_handler
29
29
  )
@@ -65,13 +65,11 @@ class MonocleInstrumentor(BaseInstrumentor):
65
65
  def instrumented_endpoint_invoke(to_wrap,wrapped, span_name, instance,fn):
66
66
  @wraps(fn)
67
67
  def with_instrumentation(*args, **kwargs):
68
- handler = SpanHandler()
69
- with tracer.start_as_current_span(span_name) as span:
70
- response = fn(*args, **kwargs)
71
- handler.hydrate_span(to_wrap, wrapped=wrapped, instance=instance, args=args, kwargs=kwargs,
72
- result=response, span=span)
73
- return response
74
-
68
+ async_task = inspect.iscoroutinefunction(fn)
69
+ boto_method_to_wrap = to_wrap.copy()
70
+ boto_method_to_wrap['skip_span'] = False
71
+ return wrapper_processor(async_task, tracer, NonFrameworkSpanHandler(),
72
+ boto_method_to_wrap, fn, instance, args, kwargs)
75
73
  return with_instrumentation
76
74
  return instrumented_endpoint_invoke
77
75
 
@@ -158,6 +156,24 @@ def setup_monocle_telemetry(
158
156
  span_handlers: Dict[str,SpanHandler] = None,
159
157
  wrapper_methods: List[Union[dict,WrapperMethod]] = None,
160
158
  union_with_default_methods: bool = True) -> None:
159
+ """
160
+ Set up Monocle telemetry for the application.
161
+
162
+ Parameters
163
+ ----------
164
+ workflow_name : str
165
+ The name of the workflow to be used as the service name in telemetry.
166
+ span_processors : List[SpanProcessor], optional
167
+ Custom span processors to use instead of the default ones. If None,
168
+ BatchSpanProcessors with Monocle exporters will be used.
169
+ span_handlers : Dict[str, SpanHandler], optional
170
+ Dictionary of span handlers to be used by the instrumentor, mapping handler names to handler objects.
171
+ wrapper_methods : List[Union[dict, WrapperMethod]], optional
172
+ Custom wrapper methods for instrumentation. If None, default methods will be used.
173
+ union_with_default_methods : bool, default=True
174
+ If True, combine the provided wrapper_methods with the default methods.
175
+ If False, only use the provided wrapper_methods.
176
+ """
161
177
  resource = Resource(attributes={
162
178
  SERVICE_NAME: workflow_name
163
179
  })
@@ -196,6 +212,16 @@ def set_context_properties(properties: dict) -> None:
196
212
  attach(set_value(SESSION_PROPERTIES_KEY, properties))
197
213
 
198
214
  def start_trace():
215
+ """
216
+ Starts a new trace. All the spans created after this call will be part of the same trace.
217
+ Returns:
218
+ Token: A token representing the attached context for the workflow span.
219
+ This token is to be used later to stop the current trace.
220
+ Returns None if tracing fails.
221
+
222
+ Raises:
223
+ Exception: The function catches all exceptions internally and logs a warning.
224
+ """
199
225
  try:
200
226
  tracer = get_tracer(instrumenting_module_name= MONOCLE_INSTRUMENTOR, tracer_provider= get_tracer_provider())
201
227
  span = tracer.start_span(name = "workflow")
@@ -209,6 +235,14 @@ def start_trace():
209
235
  return None
210
236
 
211
237
  def stop_trace(token) -> None:
238
+ """
239
+ Stop the active trace and detach workflow type if token is provided. All the spans created after this will not be part of the trace.
240
+ Args:
241
+ token: The token that was returned when the trace was started. Used to detach
242
+ workflow type. Can be None in which case only the span is ended.
243
+ Returns:
244
+ None
245
+ """
212
246
  try:
213
247
  _parent_span_context = get_current()
214
248
  if _parent_span_context is not None:
@@ -229,32 +263,67 @@ def is_valid_trace_id_uuid(traceId: str) -> bool:
229
263
  return False
230
264
 
231
265
  def start_scope(scope_name: str, scope_value:str = None) -> object:
266
+ """
267
+ Start a new scope with the given name and and optional value. If no value is provided, a random UUID will be generated.
268
+ All the spans, across traces created after this call will have the scope attached until the scope is stopped.
269
+ Args:
270
+ scope_name: The name of the scope.
271
+ scope_value: Optional value of the scope. If None, a random UUID will be generated.
272
+ Returns:
273
+ Token: A token representing the attached context for the scope. This token is to be used later to stop the current scope.
274
+ """
232
275
  return set_scope(scope_name, scope_value)
233
276
 
234
277
  def stop_scope(token:object) -> None:
278
+ """
279
+ Stop the active scope. All the spans created after this will not have the scope attached.
280
+ Args:
281
+ token: The token that was returned when the scope was started.
282
+ Returns:
283
+ None
284
+ """
235
285
  remove_scope(token)
236
286
  return
237
287
 
288
+ @contextmanager
289
+ def monocle_trace():
290
+ """
291
+ Context manager to start and stop a scope. All the spans, across traces created within the encapsulated code will have same trace ID
292
+ """
293
+ token = start_trace()
294
+ try:
295
+ yield
296
+ finally:
297
+ stop_trace(token)
298
+
238
299
  @contextmanager
239
300
  def monocle_trace_scope(scope_name: str, scope_value:str = None):
301
+ """
302
+ Context manager to start and stop a scope. All the spans, across traces created within the encapsulated code will have the scope attached.
303
+ Args:
304
+ scope_name: The name of the scope.
305
+ scope_value: Optional value of the scope. If None, a random UUID will be generated."""
240
306
  token = start_scope(scope_name, scope_value)
241
307
  try:
242
308
  yield
243
309
  finally:
244
310
  stop_scope(token)
245
311
 
246
- def monocle_trace_scope_method(scope_name: str):
312
+ def monocle_trace_scope_method(scope_name: str, scope_value:str=None):
313
+ """
314
+ Decorator to start and stop a scope for a method. All the spans, across traces created in the method will have the scope attached.
315
+ """
247
316
  def decorator(func):
248
317
  if inspect.iscoroutinefunction(func):
249
318
  @wraps(func)
250
319
  async def wrapper(*args, **kwargs):
251
- result = async_wrapper(func, scope_name, None, *args, **kwargs)
320
+ result = async_wrapper(func, scope_name, scope_value, None, *args, **kwargs)
252
321
  return result
253
322
  return wrapper
254
323
  else:
255
324
  @wraps(func)
256
325
  def wrapper(*args, **kwargs):
257
- token = start_scope(scope_name)
326
+ token = start_scope(scope_name, scope_value)
258
327
  try:
259
328
  result = func(*args, **kwargs)
260
329
  return result
@@ -264,6 +333,10 @@ def monocle_trace_scope_method(scope_name: str):
264
333
  return decorator
265
334
 
266
335
  def monocle_trace_http_route(func):
336
+ """
337
+ Decorator to start and stop a continue traces and scope for a http route. It will also initiate new scopes from the http headers if configured in ``monocle_scopes.json``
338
+ All the spans, across traces created in the route will have the scope attached.
339
+ """
267
340
  if inspect.iscoroutinefunction(func):
268
341
  @wraps(func)
269
342
  async def wrapper(*args, **kwargs):
@@ -286,3 +359,4 @@ class FixedIdGenerator(id_generator.IdGenerator):
286
359
 
287
360
  def generate_trace_id(self) -> int:
288
361
  return self.trace_id
362
+
@@ -3,14 +3,14 @@ import os
3
3
  from importlib.metadata import version
4
4
  from opentelemetry.context import get_value, set_value, attach, detach
5
5
  from opentelemetry.sdk.trace import Span
6
-
6
+ from opentelemetry.trace.status import Status, StatusCode
7
7
  from monocle_apptrace.instrumentation.common.constants import (
8
8
  QUERY,
9
9
  service_name_map,
10
10
  service_type_map,
11
11
  MONOCLE_SDK_VERSION
12
12
  )
13
- from monocle_apptrace.instrumentation.common.utils import set_attribute, get_scopes
13
+ from monocle_apptrace.instrumentation.common.utils import set_attribute, get_scopes, MonocleSpanException
14
14
  from monocle_apptrace.instrumentation.common.constants import WORKFLOW_TYPE_KEY, WORKFLOW_TYPE_GENERIC
15
15
 
16
16
  logger = logging.getLogger(__name__)
@@ -64,9 +64,12 @@ class SpanHandler:
64
64
  """ Set attributes of workflow if this is a root span"""
65
65
  SpanHandler.set_workflow_attributes(to_wrap, span)
66
66
  SpanHandler.set_app_hosting_identifier_attribute(span)
67
+ span.set_status(StatusCode.OK)
67
68
 
68
- def post_task_processing(self, to_wrap, wrapped, instance, args, kwargs, result, span):
69
- pass
69
+
70
+ def post_task_processing(self, to_wrap, wrapped, instance, args, kwargs, result, span:Span):
71
+ if span.status.status_code == StatusCode.UNSET:
72
+ span.set_status(StatusCode.OK)
70
73
 
71
74
  def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span):
72
75
  self.hydrate_attributes(to_wrap, wrapped, instance, args, kwargs, result, span)
@@ -95,6 +98,8 @@ class SpanHandler:
95
98
  result = accessor(arguments)
96
99
  if result and isinstance(result, (str, list)):
97
100
  span.set_attribute(attribute_name, result)
101
+ except MonocleSpanException as e:
102
+ span.set_status(StatusCode.ERROR, e.message)
98
103
  except Exception as e:
99
104
  logger.debug(f"Error processing accessor: {e}")
100
105
  else:
@@ -131,6 +136,8 @@ class SpanHandler:
131
136
  event_attributes[attribute_key] = accessor(arguments)
132
137
  else:
133
138
  event_attributes.update(accessor(arguments))
139
+ except MonocleSpanException as e:
140
+ span.set_status(StatusCode.ERROR, e.message)
134
141
  except Exception as e:
135
142
  logger.debug(f"Error evaluating accessor for attribute '{attribute_key}': {e}")
136
143
  span.add_event(name=event_name, attributes=event_attributes)
@@ -21,6 +21,21 @@ embedding_model_context = {}
21
21
  scope_id_generator = id_generator.RandomIdGenerator()
22
22
  http_scopes:dict[str:str] = {}
23
23
 
24
+ class MonocleSpanException(Exception):
25
+ def __init__(self, err_message:str):
26
+ """
27
+ Monocle exeption to indicate error in span processing.
28
+ Parameters:
29
+ - err_message (str): Error message.
30
+ - status (str): Status code
31
+ """
32
+ super().__init__(err_message)
33
+ self.message = err_message
34
+
35
+ def __str__(self):
36
+ """String representation of the exception."""
37
+ return f"[Monocle Span Error: {self.message} {self.status}"
38
+
24
39
  def set_tracer_provider(tracer_provider: TracerProvider):
25
40
  global monocle_tracer_provider
26
41
  monocle_tracer_provider = tracer_provider
@@ -252,35 +267,49 @@ async def http_async_route_handler(func, *args, **kwargs):
252
267
  headers = kwargs['req'].headers
253
268
  else:
254
269
  headers = None
255
- return async_wrapper(func, None, headers, *args, **kwargs)
270
+ return async_wrapper(func, None, None, headers, *args, **kwargs)
256
271
 
257
- def run_async_with_scope(method, scope_name, headers, *args, **kwargs):
272
+ def run_async_with_scope(method, current_context, exceptions, *args, **kwargs):
258
273
  token = None
259
- if scope_name:
260
- token = set_scope(scope_name)
261
- elif headers:
262
- token = extract_http_headers(headers)
263
274
  try:
275
+ if current_context:
276
+ token = attach(current_context)
264
277
  return asyncio.run(method(*args, **kwargs))
278
+ except Exception as e:
279
+ exceptions['exception'] = e
280
+ raise e
265
281
  finally:
266
282
  if token:
267
- remove_scope(token)
283
+ detach(token)
268
284
 
269
- def async_wrapper(method, scope_name=None, headers=None, *args, **kwargs):
285
+ def async_wrapper(method, scope_name=None, scope_value=None, headers=None, *args, **kwargs):
270
286
  try:
271
287
  run_loop = asyncio.get_running_loop()
272
288
  except RuntimeError:
273
289
  run_loop = None
274
290
 
275
- if run_loop and run_loop.is_running():
276
- results = []
277
- thread = threading.Thread(target=lambda: results.append(run_async_with_scope(method, scope_name, headers, *args, **kwargs)))
278
- thread.start()
279
- thread.join()
280
- return_value = results[0] if len(results) > 0 else None
281
- return return_value
282
- else:
283
- return run_async_with_scope(method, scope_name, headers, *args, **kwargs)
291
+ token = None
292
+ exceptions = {}
293
+ if scope_name:
294
+ token = set_scope(scope_name, scope_value)
295
+ elif headers:
296
+ token = extract_http_headers(headers)
297
+ current_context = get_current()
298
+ try:
299
+ if run_loop and run_loop.is_running():
300
+ results = []
301
+ thread = threading.Thread(target=lambda: results.append(run_async_with_scope(method, current_context, exceptions, *args, **kwargs)))
302
+ thread.start()
303
+ thread.join()
304
+ if 'exception' in exceptions:
305
+ raise exceptions['exception']
306
+ return_value = results[0] if len(results) > 0 else None
307
+ return return_value
308
+ else:
309
+ return run_async_with_scope(method, None, exceptions, *args, **kwargs)
310
+ finally:
311
+ if token:
312
+ remove_scope(token)
284
313
 
285
314
  class Option(Generic[T]):
286
315
  def __init__(self, value: Optional[T]):
@@ -31,10 +31,11 @@ def wrapper_processor(async_task: bool, tracer: Tracer, handler: SpanHandler, to
31
31
  try:
32
32
  handler.pre_tracing(to_wrap, wrapped, instance, args, kwargs)
33
33
  skip_scan:bool = to_wrap.get('skip_span') or handler.skip_span(to_wrap, wrapped, instance, args, kwargs)
34
- token = SpanHandler.attach_workflow_type(to_wrap=to_wrap)
34
+ if not to_wrap.get('skip_span'):
35
+ token = SpanHandler.attach_workflow_type(to_wrap=to_wrap)
35
36
  if skip_scan:
36
37
  if async_task:
37
- return_value = async_wrapper(wrapped, None, None, *args, **kwargs)
38
+ return_value = async_wrapper(wrapped, None, None, None, *args, **kwargs)
38
39
  else:
39
40
  return_value = wrapped(*args, **kwargs)
40
41
  else:
@@ -58,7 +59,7 @@ def span_processor(name: str, async_task: bool, tracer: Tracer, handler: SpanHan
58
59
  else:
59
60
  handler.pre_task_processing(to_wrap, wrapped, instance, args, kwargs, span)
60
61
  if async_task:
61
- return_value = async_wrapper(wrapped, None, None, *args, **kwargs)
62
+ return_value = async_wrapper(wrapped, None, None, None, *args, **kwargs)
62
63
  else:
63
64
  return_value = wrapped(*args, **kwargs)
64
65
  handler.hydrate_span(to_wrap, wrapped, instance, args, kwargs, return_value, span)
@@ -86,5 +87,6 @@ def scope_wrapper(tracer: Tracer, handler: SpanHandler, to_wrap, wrapped, instan
86
87
  @with_tracer_wrapper
87
88
  async def ascope_wrapper(tracer: Tracer, handler: SpanHandler, to_wrap, wrapped, instance, args, kwargs):
88
89
  scope_name = to_wrap.get('scope_name', None)
89
- return_value = async_wrapper(wrapped, scope_name, None, *args, **kwargs)
90
+ scope_value = to_wrap.get('scope_value', None)
91
+ return_value = async_wrapper(wrapped, scope_name, scope_value, None, *args, **kwargs)
90
92
  return return_value
@@ -15,6 +15,8 @@ from monocle_apptrace.instrumentation.metamodel.flask.methods import (FLASK_METH
15
15
  from monocle_apptrace.instrumentation.metamodel.flask._helper import FlaskSpanHandler
16
16
  from monocle_apptrace.instrumentation.metamodel.requests.methods import (REQUESTS_METHODS, )
17
17
  from monocle_apptrace.instrumentation.metamodel.requests._helper import RequestSpanHandler
18
+ from monocle_apptrace.instrumentation.metamodel.teamsai.methods import (TEAMAI_METHODS, )
19
+ from monocle_apptrace.instrumentation.metamodel.anthropic.methods import (ANTHROPIC_METHODS, )
18
20
 
19
21
  class WrapperMethod:
20
22
  def __init__(
@@ -61,7 +63,7 @@ class WrapperMethod:
61
63
  def get_span_handler(self) -> SpanHandler:
62
64
  return self.span_handler()
63
65
 
64
- DEFAULT_METHODS_LIST = LANGCHAIN_METHODS + LLAMAINDEX_METHODS + HAYSTACK_METHODS + BOTOCORE_METHODS + FLASK_METHODS + REQUESTS_METHODS + LANGGRAPH_METHODS + OPENAI_METHODS
66
+ DEFAULT_METHODS_LIST = LANGCHAIN_METHODS + LLAMAINDEX_METHODS + HAYSTACK_METHODS + BOTOCORE_METHODS + FLASK_METHODS + REQUESTS_METHODS + LANGGRAPH_METHODS + OPENAI_METHODS + TEAMAI_METHODS + ANTHROPIC_METHODS
65
67
 
66
68
  MONOCLE_SPAN_HANDLERS: Dict[str, SpanHandler] = {
67
69
  "default": SpanHandler(),
@@ -0,0 +1,64 @@
1
+ """
2
+ This module provides utility functions for extracting system, user,
3
+ and assistant messages from various input formats.
4
+ """
5
+
6
+ import logging
7
+ from monocle_apptrace.instrumentation.common.utils import (
8
+ Option,
9
+ get_keys_as_tuple,
10
+ get_nested_value,
11
+ try_option,
12
+ )
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ def extract_provider_name(instance):
18
+ provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
19
+ return provider_url.unwrap_or(None)
20
+
21
+ def extract_inference_endpoint(instance):
22
+ inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
23
+ if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
24
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
25
+
26
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
27
+
28
+ def extract_messages(kwargs):
29
+ """Extract system and user messages"""
30
+ try:
31
+ messages = []
32
+ if 'messages' in kwargs and len(kwargs['messages']) >0:
33
+ for msg in kwargs['messages']:
34
+ if msg.get('content') and msg.get('role'):
35
+ messages.append({msg['role']: msg['content']})
36
+
37
+ return [str(message) for message in messages]
38
+ except Exception as e:
39
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
40
+ return []
41
+
42
+
43
+ def extract_assistant_message(response):
44
+ try:
45
+ if response is not None and hasattr(response,"content") and len(response.content) >0:
46
+ if hasattr(response.content[0],"text"):
47
+ return response.content[0].text
48
+ except (IndexError, AttributeError) as e:
49
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
50
+ return None
51
+
52
+ def update_span_from_llm_response(response):
53
+ meta_dict = {}
54
+ if response is not None and hasattr(response, "usage"):
55
+ if hasattr(response, "usage") and response.usage is not None:
56
+ token_usage = response.usage
57
+ else:
58
+ response_metadata = response.response_metadata
59
+ token_usage = response_metadata.get("token_usage")
60
+ if token_usage is not None:
61
+ meta_dict.update({"completion_tokens": getattr(response.usage, "output_tokens", 0)})
62
+ meta_dict.update({"prompt_tokens": getattr(response.usage, "input_tokens", 0)})
63
+ meta_dict.update({"total_tokens": getattr(response.usage, "input_tokens", 0)+getattr(response.usage, "output_tokens", 0)})
64
+ return meta_dict
@@ -0,0 +1,72 @@
1
+ from monocle_apptrace.instrumentation.metamodel.anthropic import (
2
+ _helper,
3
+ )
4
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type
5
+
6
+ INFERENCE = {
7
+ "type": "inference",
8
+ "attributes": [
9
+ [
10
+ {
11
+ "_comment": "provider type ,name , deployment , inference_endpoint",
12
+ "attribute": "type",
13
+ "accessor": lambda arguments: 'inference.' + (get_llm_type(arguments['instance']) or 'generic')
14
+
15
+ },
16
+ {
17
+ "attribute": "provider_name",
18
+ "accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
19
+ },
20
+ {
21
+ "attribute": "deployment",
22
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
23
+ },
24
+ {
25
+ "attribute": "inference_endpoint",
26
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base', 'endpoint']) or _helper.extract_inference_endpoint(arguments['instance'])
27
+ }
28
+ ],
29
+ [
30
+ {
31
+ "_comment": "LLM Model",
32
+ "attribute": "name",
33
+ "accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
34
+ },
35
+ {
36
+ "attribute": "type",
37
+ "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
38
+ }
39
+ ]
40
+ ],
41
+ "events": [
42
+ {"name": "data.input",
43
+ "attributes": [
44
+
45
+ {
46
+ "_comment": "this is instruction and user query to LLM",
47
+ "attribute": "input",
48
+ "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
49
+ }
50
+ ]
51
+ },
52
+ {
53
+ "name": "data.output",
54
+ "attributes": [
55
+ {
56
+ "_comment": "this is result from LLM",
57
+ "attribute": "response",
58
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
59
+ }
60
+ ]
61
+ },
62
+ {
63
+ "name": "metadata",
64
+ "attributes": [
65
+ {
66
+ "_comment": "this is metadata usage from LLM",
67
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
68
+ }
69
+ ]
70
+ }
71
+ ]
72
+ }
@@ -0,0 +1,22 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.anthropic.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+
6
+ ANTHROPIC_METHODS = [
7
+ {
8
+ "package": "anthropic.resources.messages.messages",
9
+ "object": "Messages",
10
+ "method": "create",
11
+ "wrapper_method": task_wrapper,
12
+ "output_processor": INFERENCE
13
+ },
14
+ {
15
+ "package": "anthropic.resources.messages.messages",
16
+ "object": "AsyncMessages",
17
+ "method": "create",
18
+ "wrapper_method": atask_wrapper,
19
+ "output_processor": INFERENCE
20
+ },
21
+
22
+ ]
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.botocore import (
2
2
  _helper,
3
3
  )
4
-
4
+ from monocle_apptrace.instrumentation.common.utils import get_llm_type
5
5
  INFERENCE = {
6
6
  "type": "inference",
7
7
  "attributes": [
@@ -9,7 +9,7 @@ INFERENCE = {
9
9
  {
10
10
  "_comment": "provider type , inference_endpoint",
11
11
  "attribute": "type",
12
- "accessor": lambda arguments: 'inference.aws_sagemaker'
12
+ "accessor": lambda arguments: 'inference.'+(get_llm_type(arguments['instance']) or 'generic')
13
13
  },
14
14
  {
15
15
  "attribute": "inference_endpoint",
@@ -1,3 +1,4 @@
1
+ from opentelemetry.context import get_value, set_value, attach, detach
1
2
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
2
3
 
3
4
  class BotoCoreSpanHandler(SpanHandler):
@@ -22,4 +23,4 @@ class BotoCoreSpanHandler(SpanHandler):
22
23
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
23
24
  self._botocore_processor(to_wrap=to_wrap, wrapped=wrapped, instance=instance, return_value=return_value, args=args,
24
25
  kwargs=kwargs)
25
- return super().pre_tracing(to_wrap, wrapped, instance, args, kwargs)
26
+ return super().post_tracing(to_wrap, wrapped, instance, args, kwargs,return_value)
@@ -19,6 +19,10 @@ def extract_messages(kwargs):
19
19
  """Extract system and user messages"""
20
20
  try:
21
21
  messages = []
22
+ if 'instructions' in kwargs:
23
+ messages.append({'instructions': kwargs.get('instructions', {})})
24
+ if 'input' in kwargs:
25
+ messages.append({'input': kwargs.get('input', {})})
22
26
  if 'messages' in kwargs and len(kwargs['messages']) >0:
23
27
  for msg in kwargs['messages']:
24
28
  if msg.get('content') and msg.get('role'):
@@ -32,6 +36,8 @@ def extract_messages(kwargs):
32
36
 
33
37
  def extract_assistant_message(response):
34
38
  try:
39
+ if hasattr(response,"output_text") and len(response.output_text):
40
+ return response.output_text
35
41
  if response is not None and hasattr(response,"choices") and len(response.choices) >0:
36
42
  if hasattr(response.choices[0],"message"):
37
43
  return response.choices[0].message.content
@@ -85,10 +91,9 @@ def update_span_from_llm_response(response):
85
91
  response_metadata = response.response_metadata
86
92
  token_usage = response_metadata.get("token_usage")
87
93
  if token_usage is not None:
88
- meta_dict.update(
89
- {"completion_tokens": getattr(response.usage, "completion_tokens", None)})
90
- meta_dict.update({"prompt_tokens": getattr(response.usage, "prompt_tokens", None)})
91
- meta_dict.update({"total_tokens": getattr(response.usage, "total_tokens", None)})
94
+ meta_dict.update({"completion_tokens": getattr(token_usage,"completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
95
+ meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens", None) or getattr(token_usage, "input_tokens", None)})
96
+ meta_dict.update({"total_tokens": getattr(token_usage,"total_tokens")})
92
97
  return meta_dict
93
98
 
94
99
  def extract_vector_input(vector_input: dict):
@@ -40,6 +40,22 @@ OPENAI_METHODS = [
40
40
  "span_name": "openai_embeddings",
41
41
  "span_handler": "non_framework_handler",
42
42
  "output_processor": RETRIEVAL
43
+ },
44
+ {
45
+ "package": "openai.resources.responses",
46
+ "object": "Responses",
47
+ "method": "create",
48
+ "wrapper_method": task_wrapper,
49
+ "span_handler": "non_framework_handler",
50
+ "output_processor": INFERENCE
51
+ },
52
+ {
53
+ "package": "openai.resources.responses",
54
+ "object": "AsyncResponses",
55
+ "method": "create",
56
+ "wrapper_method": atask_wrapper,
57
+ "span_handler": "non_framework_handler",
58
+ "output_processor": INFERENCE
43
59
  }
44
60
 
45
61
  ]