monocle-apptrace 0.3.0b1__py3-none-any.whl → 0.3.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (74) hide show
  1. monocle_apptrace/exporters/aws/s3_exporter.py +1 -1
  2. monocle_apptrace/exporters/aws/s3_exporter_opendal.py +126 -0
  3. monocle_apptrace/exporters/azure/blob_exporter_opendal.py +147 -0
  4. monocle_apptrace/exporters/monocle_exporters.py +38 -20
  5. monocle_apptrace/instrumentation/__init__.py +0 -0
  6. monocle_apptrace/instrumentation/common/__init__.py +0 -0
  7. monocle_apptrace/{constants.py → instrumentation/common/constants.py} +13 -0
  8. monocle_apptrace/instrumentation/common/instrumentor.py +208 -0
  9. monocle_apptrace/instrumentation/common/span_handler.py +154 -0
  10. monocle_apptrace/instrumentation/common/utils.py +171 -0
  11. monocle_apptrace/instrumentation/common/wrapper.py +69 -0
  12. monocle_apptrace/instrumentation/common/wrapper_method.py +45 -0
  13. monocle_apptrace/instrumentation/metamodel/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/botocore/__init__.py +0 -0
  15. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +126 -0
  16. monocle_apptrace/instrumentation/metamodel/botocore/entities/__init__.py +0 -0
  17. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +65 -0
  18. monocle_apptrace/instrumentation/metamodel/botocore/methods.py +16 -0
  19. monocle_apptrace/instrumentation/metamodel/haystack/__init__.py +0 -0
  20. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +127 -0
  21. monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +76 -0
  23. monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +61 -0
  24. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +42 -0
  25. monocle_apptrace/instrumentation/metamodel/langchain/__init__.py +0 -0
  26. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +121 -0
  27. monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py +0 -0
  28. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +71 -0
  29. monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +58 -0
  30. monocle_apptrace/instrumentation/metamodel/langchain/methods.py +105 -0
  31. monocle_apptrace/instrumentation/metamodel/llamaindex/__init__.py +0 -0
  32. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +154 -0
  33. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/__init__.py +0 -0
  34. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +71 -0
  35. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +57 -0
  36. monocle_apptrace/{metamodel/maps/llamaindex_methods.json → instrumentation/metamodel/llamaindex/methods.py} +28 -31
  37. {monocle_apptrace-0.3.0b1.dist-info → monocle_apptrace-0.3.0b3.dist-info}/METADATA +17 -2
  38. monocle_apptrace-0.3.0b3.dist-info/RECORD +48 -0
  39. {monocle_apptrace-0.3.0b1.dist-info → monocle_apptrace-0.3.0b3.dist-info}/WHEEL +1 -1
  40. monocle_apptrace/botocore/__init__.py +0 -9
  41. monocle_apptrace/haystack/__init__.py +0 -9
  42. monocle_apptrace/haystack/wrap_pipeline.py +0 -63
  43. monocle_apptrace/instrumentor.py +0 -121
  44. monocle_apptrace/langchain/__init__.py +0 -9
  45. monocle_apptrace/llamaindex/__init__.py +0 -16
  46. monocle_apptrace/message_processing.py +0 -80
  47. monocle_apptrace/metamodel/README.md +0 -47
  48. monocle_apptrace/metamodel/entities/README.md +0 -77
  49. monocle_apptrace/metamodel/entities/app_hosting_types.json +0 -29
  50. monocle_apptrace/metamodel/entities/entities.json +0 -49
  51. monocle_apptrace/metamodel/entities/inference_types.json +0 -33
  52. monocle_apptrace/metamodel/entities/model_types.json +0 -41
  53. monocle_apptrace/metamodel/entities/vector_store_types.json +0 -25
  54. monocle_apptrace/metamodel/entities/workflow_types.json +0 -22
  55. monocle_apptrace/metamodel/maps/attributes/inference/botocore_entities.json +0 -27
  56. monocle_apptrace/metamodel/maps/attributes/inference/haystack_entities.json +0 -57
  57. monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +0 -57
  58. monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +0 -57
  59. monocle_apptrace/metamodel/maps/attributes/retrieval/haystack_entities.json +0 -31
  60. monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +0 -31
  61. monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +0 -31
  62. monocle_apptrace/metamodel/maps/botocore_methods.json +0 -13
  63. monocle_apptrace/metamodel/maps/haystack_methods.json +0 -45
  64. monocle_apptrace/metamodel/maps/langchain_methods.json +0 -129
  65. monocle_apptrace/metamodel/spans/README.md +0 -121
  66. monocle_apptrace/metamodel/spans/span_example.json +0 -140
  67. monocle_apptrace/metamodel/spans/span_format.json +0 -55
  68. monocle_apptrace/metamodel/spans/span_types.json +0 -16
  69. monocle_apptrace/utils.py +0 -252
  70. monocle_apptrace/wrap_common.py +0 -511
  71. monocle_apptrace/wrapper.py +0 -27
  72. monocle_apptrace-0.3.0b1.dist-info/RECORD +0 -48
  73. {monocle_apptrace-0.3.0b1.dist-info → monocle_apptrace-0.3.0b3.dist-info}/licenses/LICENSE +0 -0
  74. {monocle_apptrace-0.3.0b1.dist-info → monocle_apptrace-0.3.0b3.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,127 @@
1
+ import logging
2
+ from monocle_apptrace.instrumentation.common.utils import (
3
+ Option,
4
+ get_keys_as_tuple,
5
+ get_nested_value,
6
+ try_option,
7
+ )
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ def extract_messages(kwargs):
12
+ try:
13
+ messages = []
14
+ if isinstance(kwargs, dict):
15
+ if 'system_prompt' in kwargs and kwargs['system_prompt']:
16
+ system_message = kwargs['system_prompt']
17
+ messages.append({"system" : system_message})
18
+ if 'prompt' in kwargs and kwargs['prompt']:
19
+ user_message = extract_question_from_prompt(kwargs['prompt'])
20
+ messages.append({"user": user_message})
21
+ return [str(message) for message in messages]
22
+ except Exception as e:
23
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
24
+ return []
25
+
26
+
27
+ def extract_question_from_prompt(content):
28
+ try:
29
+ question_prefix = "Question:"
30
+ answer_prefix = "Answer:"
31
+
32
+ question_start = content.find(question_prefix)
33
+ if question_start == -1:
34
+ return None # Return None if "Question:" is not found
35
+
36
+ question_start += len(question_prefix)
37
+ answer_start = content.find(answer_prefix, question_start)
38
+ if answer_start == -1:
39
+ question = content[question_start:].strip()
40
+ else:
41
+ question = content[question_start:answer_start].strip()
42
+
43
+ return question
44
+ except Exception as e:
45
+ logger.warning("Warning: Error occurred in extract_question_from_prompt: %s", str(e))
46
+ return ""
47
+
48
+
49
+ def extract_assistant_message(response):
50
+ try:
51
+ if "replies" in response:
52
+ reply = response["replies"][0]
53
+ if hasattr(reply, 'content'):
54
+ return [reply.content]
55
+ return [reply]
56
+ except Exception as e:
57
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
58
+ return []
59
+
60
+
61
+ def get_vectorstore_deployment(my_map):
62
+ if isinstance(my_map, dict):
63
+ if '_client_settings' in my_map:
64
+ client = my_map['_client_settings'].__dict__
65
+ host, port = get_keys_as_tuple(client, 'host', 'port')
66
+ if host:
67
+ return f"{host}:{port}" if port else host
68
+ keys_to_check = ['client', '_client']
69
+ host = get_host_from_map(my_map, keys_to_check)
70
+ if host:
71
+ return host
72
+ else:
73
+ if hasattr(my_map, 'client') and '_endpoint' in my_map.client.__dict__:
74
+ return my_map.client.__dict__['_endpoint']
75
+ host, port = get_keys_as_tuple(my_map.__dict__, 'host', 'port')
76
+ if host:
77
+ return f"{host}:{port}" if port else host
78
+ return None
79
+
80
+
81
+ def get_host_from_map(my_map, keys_to_check):
82
+ for key in keys_to_check:
83
+ seed_connections = get_nested_value(my_map, [key, 'transport', 'seed_connections'])
84
+ if seed_connections and 'host' in seed_connections[0].__dict__:
85
+ return seed_connections[0].__dict__['host']
86
+ return None
87
+
88
+
89
+ def resolve_from_alias(my_map, alias):
90
+ """Find a alias that is not none from list of aliases"""
91
+
92
+ for i in alias:
93
+ if i in my_map.keys():
94
+ return my_map[i]
95
+ return None
96
+
97
+ def extract_inference_endpoint(instance):
98
+ inference_endpoint: Option[str] = try_option(getattr, instance.client, 'base_url').map(str)
99
+ if inference_endpoint.is_none():
100
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
101
+
102
+ return inference_endpoint.unwrap_or(None)
103
+
104
+ def extract_embeding_model(instance):
105
+ pipeline = try_option(getattr, instance, '__haystack_added_to_pipeline__')
106
+ return pipeline.map(lambda p: try_option(getattr, p, 'get_component').map(
107
+ lambda g: try_option(getattr, g('text_embedder'), 'model').unwrap_or(None)).unwrap_or(None)).unwrap_or(None)
108
+
109
+ def update_span_from_llm_response(response, instance):
110
+ meta_dict = {}
111
+ if response is not None and isinstance(response, dict) and "meta" in response:
112
+ token_usage = response["meta"][0]["usage"]
113
+ if token_usage is not None:
114
+ temperature = instance.__dict__.get("temperature", None)
115
+ meta_dict.update({"temperature": temperature})
116
+ meta_dict.update(
117
+ {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
118
+ meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
119
+ meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
120
+ return meta_dict
121
+
122
+
123
+ def update_output_span_events(results):
124
+ output_arg_text = " ".join([doc.content for doc in results['documents']])
125
+ if len(output_arg_text) > 100:
126
+ output_arg_text = output_arg_text[:100] + "..."
127
+ return output_arg_text
@@ -0,0 +1,76 @@
1
+ from monocle_apptrace.instrumentation.metamodel.haystack import (
2
+ _helper,
3
+ )
4
+
5
+ INFERENCE = {
6
+ "type": "inference",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "provider type ,name , deployment , inference_endpoint",
11
+ "attribute": "type",
12
+ "accessor": lambda arguments: 'inference.azure_oai'
13
+ },
14
+ {
15
+ "attribute": "provider_name",
16
+ "accessor": lambda arguments: arguments['kwargs']['provider_name']
17
+ },
18
+ {
19
+ "attribute": "deployment",
20
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
21
+ ['engine', 'azure_deployment',
22
+ 'deployment_name', 'deployment_id',
23
+ 'deployment'])
24
+ },
25
+ {
26
+ "attribute": "inference_endpoint",
27
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__, ['api_base_url']) or _helper.extract_inference_endpoint(arguments['instance'])
28
+ }
29
+ ],
30
+ [
31
+ {
32
+ "_comment": "LLM Model",
33
+ "attribute": "name",
34
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
35
+ ['model', 'model_name'])
36
+ },
37
+ {
38
+ "attribute": "type",
39
+ "accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['instance'].__dict__,
40
+ ['model', 'model_name'])
41
+ }
42
+ ]
43
+ ],
44
+ "events": [
45
+ {"name": "data.input",
46
+ "attributes": [
47
+
48
+ {
49
+ "_comment": "this is instruction and user query to LLM",
50
+ "attribute": "input",
51
+ "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
52
+ }
53
+ ]
54
+ },
55
+ {
56
+ "name": "data.output",
57
+ "attributes": [
58
+ {
59
+ "_comment": "this is response from LLM",
60
+ "attribute": "response",
61
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
62
+ }
63
+ ]
64
+ },
65
+ {
66
+ "name": "metadata",
67
+ "attributes": [
68
+ {
69
+ "_comment": "this is metadata usage from LLM",
70
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'],
71
+ arguments['instance'])
72
+ }
73
+ ]
74
+ }
75
+ ]
76
+ }
@@ -0,0 +1,61 @@
1
+ from monocle_apptrace.instrumentation.metamodel.haystack import (_helper, )
2
+ from monocle_apptrace.instrumentation.common.utils import get_attribute
3
+
4
+ RETRIEVAL = {
5
+ "type": "retrieval",
6
+ "attributes": [
7
+ [
8
+ {
9
+ "_comment": "vector store name and type",
10
+ "attribute": "name",
11
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
12
+ ['document_store',
13
+ '_document_store']).__class__.__name__
14
+ },
15
+ {
16
+ "attribute": "type",
17
+ "accessor": lambda arguments: 'vectorstore.' + _helper.resolve_from_alias(
18
+ arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__
19
+ },
20
+ {
21
+ "attribute": "deployment",
22
+ "accessor": lambda arguments: _helper.get_vectorstore_deployment(
23
+ _helper.resolve_from_alias(arguments['instance'].__dict__,
24
+ ['document_store', '_document_store']).__dict__)
25
+ }
26
+ ],
27
+ [
28
+ {
29
+ "_comment": "embedding model name and type",
30
+ "attribute": "name",
31
+ "accessor": lambda arguments: _helper.extract_embeding_model(arguments['instance'])
32
+ },
33
+ {
34
+ "attribute": "type",
35
+ "accessor": lambda arguments: 'model.embedding.' + _helper.extract_embeding_model(arguments['instance'])
36
+ }
37
+ ]
38
+ ],
39
+ "events": [
40
+ {"name": "data.input",
41
+ "attributes": [
42
+
43
+ {
44
+ "_comment": "this is instruction and user query to LLM",
45
+ "attribute": "input",
46
+ "accessor": lambda arguments: get_attribute("input")
47
+ }
48
+ ]
49
+ },
50
+ {
51
+ "name": "data.output",
52
+ "attributes": [
53
+ {
54
+ "_comment": "this is result from LLM",
55
+ "attribute": "response",
56
+ "accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
57
+ }
58
+ ]
59
+ }
60
+ ]
61
+ }
@@ -0,0 +1,42 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.haystack.entities.inference import INFERENCE
3
+ from monocle_apptrace.instrumentation.metamodel.haystack.entities.retrieval import RETRIEVAL
4
+
5
+ HAYSTACK_METHODS = [
6
+ {
7
+ "package": "haystack.components.retrievers.in_memory",
8
+ "object": "InMemoryEmbeddingRetriever",
9
+ "method": "run",
10
+ "span_name": "haystack.retriever",
11
+ "wrapper_method": task_wrapper,
12
+ "output_processor": RETRIEVAL
13
+ },
14
+ {
15
+ "package": "haystack_integrations.components.retrievers.opensearch",
16
+ "object": "OpenSearchEmbeddingRetriever",
17
+ "method": "run",
18
+ "span_name": "haystack.retriever",
19
+ "wrapper_method": task_wrapper,
20
+ "output_processor": RETRIEVAL
21
+ },
22
+ {
23
+ "package": "haystack.components.generators.openai",
24
+ "object": "OpenAIGenerator",
25
+ "method": "run",
26
+ "wrapper_method": task_wrapper,
27
+ "output_processor": INFERENCE
28
+ },
29
+ {
30
+ "package": "haystack.components.generators.chat.openai",
31
+ "object": "OpenAIChatGenerator",
32
+ "method": "run",
33
+ "wrapper_method": task_wrapper,
34
+ "output_processor": INFERENCE
35
+ },
36
+ {
37
+ "package": "haystack.core.pipeline.pipeline",
38
+ "object": "Pipeline",
39
+ "method": "run",
40
+ "wrapper_method": task_wrapper
41
+ }
42
+ ]
@@ -0,0 +1,121 @@
1
+ """
2
+ This module provides utility functions for extracting system, user,
3
+ and assistant messages from various input formats.
4
+ """
5
+
6
+ import logging
7
+ from monocle_apptrace.instrumentation.common.utils import (
8
+ Option,
9
+ get_keys_as_tuple,
10
+ get_nested_value,
11
+ try_option,
12
+ )
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def extract_messages(args):
19
+ """Extract system and user messages"""
20
+ try:
21
+ messages = []
22
+ if args and isinstance(args, (list, tuple)) and len(args) > 0:
23
+ if hasattr(args[0], "messages") and isinstance(args[0].messages, list):
24
+ for msg in args[0].messages:
25
+ if hasattr(msg, 'content') and hasattr(msg, 'type'):
26
+ messages.append({msg.type: msg.content})
27
+ return [str(d) for d in messages]
28
+ except Exception as e:
29
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
30
+ return []
31
+
32
+
33
+ def extract_assistant_message(response):
34
+ try:
35
+ if isinstance(response, str):
36
+ return [response]
37
+ if hasattr(response, "content"):
38
+ return [response.content]
39
+ if hasattr(response, "message") and hasattr(response.message, "content"):
40
+ return [response.message.content]
41
+ except Exception as e:
42
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
43
+ return []
44
+
45
+
46
+ def extract_provider_name(instance):
47
+ provider_url: Option[str] = try_option(getattr, instance.client._client.base_url, 'host')
48
+ return provider_url.unwrap_or(None)
49
+
50
+
51
+ def extract_inference_endpoint(instance):
52
+ inference_endpoint: Option[str] = try_option(getattr, instance.client._client, 'base_url').map(str)
53
+ if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
54
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
55
+
56
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
57
+
58
+
59
+ def extract_vectorstore_deployment(my_map):
60
+ if isinstance(my_map, dict):
61
+ if '_client_settings' in my_map:
62
+ client = my_map['_client_settings'].__dict__
63
+ host, port = get_keys_as_tuple(client, 'host', 'port')
64
+ if host:
65
+ return f"{host}:{port}" if port else host
66
+ keys_to_check = ['client', '_client']
67
+ host = __get_host_from_map(my_map, keys_to_check)
68
+ if host:
69
+ return host
70
+ else:
71
+ if hasattr(my_map, 'client') and '_endpoint' in my_map.client.__dict__:
72
+ return my_map.client.__dict__['_endpoint']
73
+ host, port = get_keys_as_tuple(my_map.__dict__, 'host', 'port')
74
+ if host:
75
+ return f"{host}:{port}" if port else host
76
+ return None
77
+
78
+ def __get_host_from_map(my_map, keys_to_check):
79
+ for key in keys_to_check:
80
+ seed_connections = get_nested_value(my_map, [key, 'transport', 'seed_connections'])
81
+ if seed_connections and 'host' in seed_connections[0].__dict__:
82
+ return seed_connections[0].__dict__['host']
83
+ return None
84
+
85
+ def resolve_from_alias(my_map, alias):
86
+ """Find a alias that is not none from list of aliases"""
87
+
88
+ for i in alias:
89
+ if i in my_map.keys():
90
+ return my_map[i]
91
+ return None
92
+
93
+
94
+ def update_input_span_events(args):
95
+ return args[0] if len(args) > 0 else ""
96
+
97
+
98
+ def update_output_span_events(results):
99
+ output_arg_text = " ".join([doc.page_content for doc in results if hasattr(doc, 'page_content')])
100
+ if len(output_arg_text) > 100:
101
+ output_arg_text = output_arg_text[:100] + "..."
102
+ return output_arg_text
103
+
104
+
105
+ def update_span_from_llm_response(response, instance):
106
+ meta_dict = {}
107
+ if response is not None and hasattr(response, "response_metadata"):
108
+ if hasattr(response, "usage_metadata") and response.usage_metadata is not None:
109
+ token_usage = response.usage_metadata
110
+ else:
111
+ response_metadata = response.response_metadata
112
+ token_usage = response_metadata.get("token_usage")
113
+ if token_usage is not None:
114
+ temperature = instance.__dict__.get("temperature", None)
115
+ meta_dict.update({"temperature": temperature})
116
+ meta_dict.update(
117
+ {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
118
+ meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
119
+ meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
120
+ return meta_dict
121
+
@@ -0,0 +1,71 @@
1
+ from monocle_apptrace.instrumentation.metamodel.langchain import (
2
+ _helper,
3
+ )
4
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
5
+
6
+ INFERENCE = {
7
+ "type": "inference",
8
+ "attributes": [
9
+ [
10
+ {
11
+ "_comment": "provider type ,name , deployment , inference_endpoint",
12
+ "attribute": "type",
13
+ "accessor": lambda arguments: 'inference.azure_oai'
14
+ },
15
+ {
16
+ "attribute": "provider_name",
17
+ "accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
18
+ },
19
+ {
20
+ "attribute": "deployment",
21
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
22
+ },
23
+ {
24
+ "attribute": "inference_endpoint",
25
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base', 'endpoint']) or _helper.extract_inference_endpoint(arguments['instance'])
26
+ }
27
+ ],
28
+ [
29
+ {
30
+ "_comment": "LLM Model",
31
+ "attribute": "name",
32
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
33
+ },
34
+ {
35
+ "attribute": "type",
36
+ "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
37
+ }
38
+ ]
39
+ ],
40
+ "events": [
41
+ {"name": "data.input",
42
+ "attributes": [
43
+
44
+ {
45
+ "_comment": "this is instruction and user query to LLM",
46
+ "attribute": "input",
47
+ "accessor": lambda arguments: _helper.extract_messages(arguments['args'])
48
+ }
49
+ ]
50
+ },
51
+ {
52
+ "name": "data.output",
53
+ "attributes": [
54
+ {
55
+ "_comment": "this is result from LLM",
56
+ "attribute": "response",
57
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
58
+ }
59
+ ]
60
+ },
61
+ {
62
+ "name": "metadata",
63
+ "attributes": [
64
+ {
65
+ "_comment": "this is metadata usage from LLM",
66
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'], arguments['instance'])
67
+ }
68
+ ]
69
+ }
70
+ ]
71
+ }
@@ -0,0 +1,58 @@
1
+ from monocle_apptrace.instrumentation.metamodel.langchain import (
2
+ _helper,
3
+ )
4
+
5
+ RETRIEVAL = {
6
+ "type": "retrieval",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "vector store name and type",
11
+ "attribute": "name",
12
+ "accessor": lambda arguments: type(arguments['instance'].vectorstore).__name__
13
+ },
14
+ {
15
+ "attribute": "type",
16
+ "accessor": lambda arguments: 'vectorstore.' + type(arguments['instance'].vectorstore).__name__
17
+ },
18
+ {
19
+ "attribute": "deployment",
20
+ "accessor": lambda arguments: _helper.extract_vectorstore_deployment(
21
+ arguments['instance'].vectorstore.__dict__)
22
+ }
23
+ ],
24
+ [
25
+ {
26
+ "_comment": "embedding model name and type",
27
+ "attribute": "name",
28
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].vectorstore.embeddings.__dict__,['endpoint_name','model_id','model'])
29
+ },
30
+ {
31
+ "attribute": "type",
32
+ "accessor": lambda arguments: 'model.embedding.' + _helper.resolve_from_alias(arguments['instance'].vectorstore.embeddings.__dict__,['endpoint_name','model_id','model'])
33
+ }
34
+ ]
35
+ ],
36
+ "events": [
37
+ {"name": "data.input",
38
+ "attributes": [
39
+
40
+ {
41
+ "_comment": "this is instruction and user query to LLM",
42
+ "attribute": "input",
43
+ "accessor": lambda arguments: _helper.update_input_span_events(arguments['args'])
44
+ }
45
+ ]
46
+ },
47
+ {
48
+ "name": "data.output",
49
+ "attributes": [
50
+ {
51
+ "_comment": "this is result from LLM",
52
+ "attribute": "response",
53
+ "accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
54
+ }
55
+ ]
56
+ }
57
+ ]
58
+ }
@@ -0,0 +1,105 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.langchain.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+ from monocle_apptrace.instrumentation.metamodel.langchain.entities.retrieval import (
6
+ RETRIEVAL,
7
+ )
8
+
9
+ LANGCHAIN_METHODS = [
10
+ {
11
+ "package": "langchain.prompts.base",
12
+ "object": "BasePromptTemplate",
13
+ "method": "invoke",
14
+ "wrapper_method": task_wrapper
15
+ },
16
+ {
17
+ "package": "langchain.prompts.base",
18
+ "object": "BasePromptTemplate",
19
+ "method": "ainvoke",
20
+ "wrapper_method": atask_wrapper
21
+ },
22
+ {
23
+ "package": "langchain.chat_models.base",
24
+ "object": "BaseChatModel",
25
+ "method": "invoke",
26
+ "wrapper_method": task_wrapper,
27
+ "output_processor": INFERENCE
28
+ },
29
+ {
30
+ "package": "langchain.chat_models.base",
31
+ "object": "BaseChatModel",
32
+ "method": "ainvoke",
33
+ "wrapper_method": atask_wrapper,
34
+ "output_processor": INFERENCE
35
+ },
36
+ {
37
+ "package": "langchain_core.language_models.llms",
38
+ "object": "LLM",
39
+ "method": "_generate",
40
+ "wrapper_method": task_wrapper,
41
+ "output_processor": INFERENCE
42
+ },
43
+ {
44
+ "package": "langchain_core.language_models.llms",
45
+ "object": "LLM",
46
+ "method": "_agenerate",
47
+ "wrapper_method": atask_wrapper,
48
+ "output_processor": INFERENCE
49
+ },
50
+ {
51
+ "package": "langchain_core.language_models.llms",
52
+ "object": "BaseLLM",
53
+ "method": "invoke",
54
+ "wrapper_method": task_wrapper,
55
+ "output_processor": INFERENCE
56
+ },
57
+ {
58
+ "package": "langchain_core.language_models.llms",
59
+ "object": "BaseLLM",
60
+ "method": "ainvoke",
61
+ "wrapper_method": atask_wrapper,
62
+ "output_processor": INFERENCE
63
+ },
64
+ {
65
+ "package": "langchain_core.retrievers",
66
+ "object": "BaseRetriever",
67
+ "method": "invoke",
68
+ "wrapper_method": task_wrapper,
69
+ "output_processor": RETRIEVAL
70
+
71
+ },
72
+ {
73
+ "package": "langchain_core.retrievers",
74
+ "object": "BaseRetriever",
75
+ "method": "ainvoke",
76
+ "wrapper_method": atask_wrapper,
77
+ "output_processor": RETRIEVAL
78
+ },
79
+ {
80
+ "package": "langchain.schema",
81
+ "object": "BaseOutputParser",
82
+ "method": "invoke",
83
+ "wrapper_method": task_wrapper
84
+ },
85
+ {
86
+ "package": "langchain.schema",
87
+ "object": "BaseOutputParser",
88
+ "method": "ainvoke",
89
+ "wrapper_method": atask_wrapper
90
+ },
91
+ {
92
+ "package": "langchain.schema.runnable",
93
+ "object": "RunnableSequence",
94
+ "method": "invoke",
95
+ "span_name": "langchain.workflow",
96
+ "wrapper_method": task_wrapper
97
+ },
98
+ {
99
+ "package": "langchain.schema.runnable",
100
+ "object": "RunnableSequence",
101
+ "method": "ainvoke",
102
+ "span_name": "langchain.workflow",
103
+ "wrapper_method": atask_wrapper
104
+ }
105
+ ]