monocle-apptrace 0.1.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (88) hide show
  1. monocle_apptrace/__init__.py +1 -0
  2. monocle_apptrace/__main__.py +19 -0
  3. monocle_apptrace/exporters/aws/s3_exporter.py +181 -0
  4. monocle_apptrace/exporters/aws/s3_exporter_opendal.py +137 -0
  5. monocle_apptrace/exporters/azure/blob_exporter.py +146 -0
  6. monocle_apptrace/exporters/azure/blob_exporter_opendal.py +162 -0
  7. monocle_apptrace/exporters/base_exporter.py +48 -0
  8. monocle_apptrace/exporters/exporter_processor.py +144 -0
  9. monocle_apptrace/exporters/file_exporter.py +16 -0
  10. monocle_apptrace/exporters/monocle_exporters.py +55 -0
  11. monocle_apptrace/exporters/okahu/okahu_exporter.py +117 -0
  12. monocle_apptrace/instrumentation/__init__.py +1 -0
  13. monocle_apptrace/instrumentation/common/__init__.py +2 -0
  14. monocle_apptrace/instrumentation/common/constants.py +70 -0
  15. monocle_apptrace/instrumentation/common/instrumentor.py +362 -0
  16. monocle_apptrace/instrumentation/common/span_handler.py +220 -0
  17. monocle_apptrace/instrumentation/common/utils.py +356 -0
  18. monocle_apptrace/instrumentation/common/wrapper.py +92 -0
  19. monocle_apptrace/instrumentation/common/wrapper_method.py +72 -0
  20. monocle_apptrace/instrumentation/metamodel/__init__.py +0 -0
  21. monocle_apptrace/instrumentation/metamodel/botocore/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +95 -0
  23. monocle_apptrace/instrumentation/metamodel/botocore/entities/__init__.py +0 -0
  24. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +65 -0
  25. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +26 -0
  26. monocle_apptrace/instrumentation/metamodel/botocore/methods.py +16 -0
  27. monocle_apptrace/instrumentation/metamodel/flask/__init__.py +0 -0
  28. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +29 -0
  29. monocle_apptrace/instrumentation/metamodel/flask/methods.py +13 -0
  30. monocle_apptrace/instrumentation/metamodel/haystack/__init__.py +0 -0
  31. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +127 -0
  32. monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py +0 -0
  33. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +76 -0
  34. monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +61 -0
  35. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +43 -0
  36. monocle_apptrace/instrumentation/metamodel/langchain/__init__.py +0 -0
  37. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -0
  38. monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py +0 -0
  39. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +72 -0
  40. monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +58 -0
  41. monocle_apptrace/{metamodel/maps/lang_chain_methods.json → instrumentation/metamodel/langchain/methods.py} +48 -43
  42. monocle_apptrace/instrumentation/metamodel/langgraph/__init__.py +0 -0
  43. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +48 -0
  44. monocle_apptrace/instrumentation/metamodel/langgraph/entities/__init__.py +0 -0
  45. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +56 -0
  46. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +14 -0
  47. monocle_apptrace/instrumentation/metamodel/llamaindex/__init__.py +0 -0
  48. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +172 -0
  49. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/__init__.py +0 -0
  50. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +47 -0
  51. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +73 -0
  52. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +57 -0
  53. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +101 -0
  54. monocle_apptrace/instrumentation/metamodel/openai/__init__.py +0 -0
  55. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +112 -0
  56. monocle_apptrace/instrumentation/metamodel/openai/entities/__init__.py +0 -0
  57. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +71 -0
  58. monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +43 -0
  59. monocle_apptrace/instrumentation/metamodel/openai/methods.py +45 -0
  60. monocle_apptrace/instrumentation/metamodel/requests/__init__.py +4 -0
  61. monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
  62. monocle_apptrace/instrumentation/metamodel/requests/methods.py +12 -0
  63. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0.dist-info}/METADATA +23 -2
  64. monocle_apptrace-0.3.0.dist-info/RECORD +68 -0
  65. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0.dist-info}/WHEEL +1 -1
  66. monocle_apptrace/constants.py +0 -22
  67. monocle_apptrace/haystack/__init__.py +0 -9
  68. monocle_apptrace/haystack/wrap_node.py +0 -27
  69. monocle_apptrace/haystack/wrap_openai.py +0 -44
  70. monocle_apptrace/haystack/wrap_pipeline.py +0 -62
  71. monocle_apptrace/instrumentor.py +0 -124
  72. monocle_apptrace/langchain/__init__.py +0 -6
  73. monocle_apptrace/llamaindex/__init__.py +0 -15
  74. monocle_apptrace/metamodel/README.md +0 -47
  75. monocle_apptrace/metamodel/entities/README.md +0 -54
  76. monocle_apptrace/metamodel/entities/entity_types.json +0 -157
  77. monocle_apptrace/metamodel/entities/entity_types.py +0 -51
  78. monocle_apptrace/metamodel/maps/haystack_methods.json +0 -25
  79. monocle_apptrace/metamodel/maps/llama_index_methods.json +0 -70
  80. monocle_apptrace/metamodel/spans/README.md +0 -121
  81. monocle_apptrace/metamodel/spans/span_example.json +0 -140
  82. monocle_apptrace/metamodel/spans/span_format.json +0 -55
  83. monocle_apptrace/utils.py +0 -93
  84. monocle_apptrace/wrap_common.py +0 -311
  85. monocle_apptrace/wrapper.py +0 -24
  86. monocle_apptrace-0.1.1.dist-info/RECORD +0 -29
  87. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0.dist-info}/licenses/LICENSE +0 -0
  88. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0.dist-info}/licenses/NOTICE +0 -0
@@ -1,106 +1,111 @@
1
- {
2
- "wrapper_methods" : [
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.langchain.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+ from monocle_apptrace.instrumentation.metamodel.langchain.entities.retrieval import (
6
+ RETRIEVAL,
7
+ )
8
+
9
+ LANGCHAIN_METHODS = [
3
10
  {
4
11
  "package": "langchain.prompts.base",
5
12
  "object": "BasePromptTemplate",
6
13
  "method": "invoke",
7
- "wrapper_package": "wrap_common",
8
- "wrapper_method": "task_wrapper"
14
+ "wrapper_method": task_wrapper,
15
+ "span_type": "workflow"
9
16
  },
10
17
  {
11
18
  "package": "langchain.prompts.base",
12
19
  "object": "BasePromptTemplate",
13
20
  "method": "ainvoke",
14
- "wrapper_package": "wrap_common",
15
- "wrapper_method": "atask_wrapper"
21
+ "wrapper_method": atask_wrapper,
22
+ "span_type": "workflow"
16
23
  },
17
24
  {
18
25
  "package": "langchain.chat_models.base",
19
26
  "object": "BaseChatModel",
20
27
  "method": "invoke",
21
- "wrapper_package": "wrap_common",
22
- "wrapper_method": "llm_wrapper"
28
+ "wrapper_method": task_wrapper,
29
+ "output_processor": INFERENCE
23
30
  },
24
31
  {
25
32
  "package": "langchain.chat_models.base",
26
33
  "object": "BaseChatModel",
27
34
  "method": "ainvoke",
28
- "wrapper_package": "wrap_common",
29
- "wrapper_method": "allm_wrapper"
35
+ "wrapper_method": atask_wrapper,
36
+ "output_processor": INFERENCE
30
37
  },
31
38
  {
32
39
  "package": "langchain_core.language_models.llms",
33
40
  "object": "LLM",
34
41
  "method": "_generate",
35
- "wrapper_package": "wrap_common",
36
- "wrapper_method": "llm_wrapper"
42
+ "wrapper_method": task_wrapper,
43
+ "output_processor": INFERENCE
37
44
  },
38
45
  {
39
46
  "package": "langchain_core.language_models.llms",
40
47
  "object": "LLM",
41
48
  "method": "_agenerate",
42
- "wrapper_package": "wrap_common",
43
- "wrapper_method": "llm_wrapper"
49
+ "wrapper_method": atask_wrapper,
50
+ "output_processor": INFERENCE
51
+ },
52
+ {
53
+ "package": "langchain_core.language_models.llms",
54
+ "object": "BaseLLM",
55
+ "method": "invoke",
56
+ "wrapper_method": task_wrapper,
57
+ "output_processor": INFERENCE
58
+ },
59
+ {
60
+ "package": "langchain_core.language_models.llms",
61
+ "object": "BaseLLM",
62
+ "method": "ainvoke",
63
+ "wrapper_method": atask_wrapper,
64
+ "output_processor": INFERENCE
44
65
  },
45
66
  {
46
67
  "package": "langchain_core.retrievers",
47
68
  "object": "BaseRetriever",
48
69
  "method": "invoke",
49
- "wrapper_package": "wrap_common",
50
- "wrapper_method": "task_wrapper"
70
+ "wrapper_method": task_wrapper,
71
+ "output_processor": RETRIEVAL
72
+
51
73
  },
52
74
  {
53
75
  "package": "langchain_core.retrievers",
54
76
  "object": "BaseRetriever",
55
77
  "method": "ainvoke",
56
- "wrapper_package": "wrap_common",
57
- "wrapper_method": "atask_wrapper"
78
+ "wrapper_method": atask_wrapper,
79
+ "output_processor": RETRIEVAL
58
80
  },
59
81
  {
60
82
  "package": "langchain.schema",
61
83
  "object": "BaseOutputParser",
62
84
  "method": "invoke",
63
- "wrapper_package": "wrap_common",
64
- "wrapper_method": "task_wrapper"
85
+ "wrapper_method": task_wrapper,
86
+ "span_type": "workflow"
65
87
  },
66
88
  {
67
89
  "package": "langchain.schema",
68
90
  "object": "BaseOutputParser",
69
91
  "method": "ainvoke",
70
- "wrapper_package": "wrap_common",
71
- "wrapper_method": "atask_wrapper"
92
+ "wrapper_method": atask_wrapper,
93
+ "span_type": "workflow"
72
94
  },
73
95
  {
74
96
  "package": "langchain.schema.runnable",
75
97
  "object": "RunnableSequence",
76
98
  "method": "invoke",
77
99
  "span_name": "langchain.workflow",
78
- "wrapper_package": "wrap_common",
79
- "wrapper_method": "task_wrapper"
100
+ "wrapper_method": task_wrapper,
101
+ "span_type": "workflow"
80
102
  },
81
103
  {
82
104
  "package": "langchain.schema.runnable",
83
105
  "object": "RunnableSequence",
84
106
  "method": "ainvoke",
85
107
  "span_name": "langchain.workflow",
86
- "wrapper_package": "wrap_common",
87
- "wrapper_method": "atask_wrapper"
88
- },
89
- {
90
- "package": "langchain.schema.runnable",
91
- "object": "RunnableParallel",
92
- "method": "invoke",
93
- "span_name": "langchain.workflow",
94
- "wrapper_package": "wrap_common",
95
- "wrapper_method": "task_wrapper"
96
- },
97
- {
98
- "package": "langchain.schema.runnable",
99
- "object": "RunnableParallel",
100
- "method": "ainvoke",
101
- "span_name": "langchain.workflow",
102
- "wrapper_package": "wrap_common",
103
- "wrapper_method": "atask_wrapper"
108
+ "wrapper_method": atask_wrapper,
109
+ "span_type": "workflow"
104
110
  }
105
111
  ]
106
- }
@@ -0,0 +1,48 @@
1
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
2
+ import logging
3
+ logger = logging.getLogger(__name__)
4
+
5
+ def handle_openai_response(response):
6
+ try:
7
+ if 'messages' in response:
8
+ output = response["messages"][-1]
9
+ return str(output.content)
10
+ except Exception as e:
11
+ logger.warning("Warning: Error occurred in handle_openai_response: %s", str(e))
12
+ return ""
13
+
14
+ def agent_instructions(arguments):
15
+ if callable(arguments['kwargs']['agent'].instructions):
16
+ return arguments['kwargs']['agent'].instructions(arguments['kwargs']['context_variables'])
17
+ else:
18
+ return arguments['kwargs']['agent'].instructions
19
+
20
+ def extract_input(arguments):
21
+ history = arguments['result']['messages']
22
+ for message in history:
23
+ if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
24
+ return message.content
25
+
26
+ def get_inference_endpoint(arguments):
27
+ inference_endpoint = resolve_from_alias(arguments['instance'].client.__dict__, ['azure_endpoint', 'api_base', '_base_url'])
28
+ return str(inference_endpoint)
29
+
30
+ def tools(instance):
31
+ if hasattr(instance,'nodes') and ('tools' in instance.nodes):
32
+ tools= instance.nodes['tools']
33
+ if hasattr(tools,'bound') and hasattr(tools.bound,'tools_by_name'):
34
+ return list(tools.bound.tools_by_name.keys())
35
+
36
+
37
+ def update_span_from_llm_response(response):
38
+ meta_dict = {}
39
+ token_usage = None
40
+ if response is not None and "messages" in response:
41
+ token = response["messages"][-1]
42
+ if token.response_metadata is not None:
43
+ token_usage = token.response_metadata["token_usage"]
44
+ if token_usage is not None:
45
+ meta_dict.update({"completion_tokens": token_usage.get('completion_tokens')})
46
+ meta_dict.update({"prompt_tokens": token_usage.get('prompt_tokens')})
47
+ meta_dict.update({"total_tokens": token_usage.get('total_tokens')})
48
+ return meta_dict
@@ -0,0 +1,56 @@
1
+ from monocle_apptrace.instrumentation.metamodel.langgraph import (
2
+ _helper
3
+ )
4
+ INFERENCE = {
5
+ "type": "agent",
6
+ "attributes": [
7
+ [
8
+ {
9
+ "_comment": "agent type",
10
+ "attribute": "type",
11
+ "accessor": lambda arguments:'agent.oai'
12
+ },
13
+ {
14
+ "_comment": "name of the agent",
15
+ "attribute": "name",
16
+ "accessor": lambda arguments:arguments['instance'].name
17
+ },
18
+ {
19
+ "_comment": "agent tools",
20
+ "attribute": "tools",
21
+ "accessor": lambda arguments: _helper.tools(arguments['instance'])
22
+ }
23
+ ]
24
+ ],
25
+ "events": [
26
+ {
27
+ "name":"data.input",
28
+ "attributes": [
29
+ {
30
+ "_comment": "this is LLM input",
31
+ "attribute": "query",
32
+ "accessor": lambda arguments: _helper.extract_input(arguments)
33
+ }
34
+ ]
35
+ },
36
+ {
37
+ "name":"data.output",
38
+ "attributes": [
39
+ {
40
+ "_comment": "this is response from LLM",
41
+ "attribute": "response",
42
+ "accessor": lambda arguments: _helper.handle_openai_response(arguments['result'])
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name": "metadata",
48
+ "attributes": [
49
+ {
50
+ "_comment": "this is metadata usage from LLM",
51
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
52
+ }
53
+ ]
54
+ }
55
+ ]
56
+ }
@@ -0,0 +1,14 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+ LANGGRAPH_METHODS = [
6
+ {
7
+ "package": "langgraph.graph.state",
8
+ "object": "CompiledStateGraph",
9
+ "method": "invoke",
10
+ "span_name": "langgraph.graph.invoke",
11
+ "wrapper_method": task_wrapper,
12
+ "output_processor": INFERENCE
13
+ }
14
+ ]
@@ -0,0 +1,172 @@
1
+ """
2
+ This module provides utility functions for extracting system, user,
3
+ and assistant messages from various input formats.
4
+ """
5
+
6
+ import logging
7
+ from urllib.parse import urlparse
8
+ from opentelemetry.sdk.trace import Span
9
+ from monocle_apptrace.instrumentation.common.utils import (
10
+ Option,
11
+ get_keys_as_tuple,
12
+ get_nested_value,
13
+ try_option,
14
+ )
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def extract_tools(instance):
20
+ tools = []
21
+ if not hasattr(instance, 'state') or not hasattr(instance.state, 'task_dict'):
22
+ return []
23
+ try:
24
+ data = next(iter(instance.state.task_dict.values())).task
25
+ except (AttributeError, StopIteration):
26
+ return []
27
+
28
+ if hasattr(data,'extra_state') and 'sources' in data.extra_state:
29
+ for tool_output in data.extra_state['sources']:
30
+ tool_name = tool_output.tool_name
31
+ if tool_name:
32
+ tools.append(tool_name)
33
+ return tools
34
+
35
+
36
+ def extract_messages(args):
37
+ """Extract system and user messages"""
38
+ try:
39
+ messages = []
40
+
41
+ def process_message(msg):
42
+ """Processes a single message and extracts relevant information."""
43
+ if hasattr(msg, 'content') and hasattr(msg, 'role'):
44
+ role = getattr(msg.role, 'value', msg.role)
45
+ content = msg.content if role == "system" else extract_query_from_content(msg.content)
46
+ messages.append({role: content})
47
+
48
+ if isinstance(args, (list, tuple)) and args:
49
+ for msg in args[0]:
50
+ process_message(msg)
51
+ if isinstance(args, dict):
52
+ for msg in args.get("messages", []):
53
+ process_message(msg)
54
+ if args and isinstance(args, tuple):
55
+ messages.append(args[0])
56
+
57
+ return [str(message) for message in messages]
58
+
59
+ except Exception as e:
60
+ logger.warning("Error in extract_messages: %s", str(e))
61
+ return []
62
+
63
+ def extract_assistant_message(response):
64
+ try:
65
+ if isinstance(response, str):
66
+ return [response]
67
+ if hasattr(response, "content"):
68
+ return [response.content]
69
+ if hasattr(response, "message") and hasattr(response.message, "content"):
70
+ return [response.message.content]
71
+ if hasattr(response,"response") and isinstance(response.response, str):
72
+ return [response.response]
73
+ except Exception as e:
74
+ logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
75
+ return []
76
+
77
+
78
+ def extract_query_from_content(content):
79
+ try:
80
+ query_prefix = "Query:"
81
+ answer_prefix = "Answer:"
82
+ query_start = content.find(query_prefix)
83
+ if query_start == -1:
84
+ return content
85
+
86
+ query_start += len(query_prefix)
87
+ answer_start = content.find(answer_prefix, query_start)
88
+ if answer_start == -1:
89
+ query = content[query_start:].strip()
90
+ else:
91
+ query = content[query_start:answer_start].strip()
92
+ return query
93
+ except Exception as e:
94
+ logger.warning("Warning: Error occurred in extract_query_from_content: %s", str(e))
95
+ return ""
96
+
97
+
98
+ def extract_provider_name(instance):
99
+ provider_url = try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
100
+ return provider_url
101
+
102
+
103
+ def extract_inference_endpoint(instance):
104
+ inference_endpoint = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
105
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
106
+
107
+
108
+ def extract_vectorstore_deployment(my_map):
109
+ if isinstance(my_map, dict):
110
+ if '_client_settings' in my_map:
111
+ client = my_map['_client_settings'].__dict__
112
+ host, port = get_keys_as_tuple(client, 'host', 'port')
113
+ if host:
114
+ return f"{host}:{port}" if port else host
115
+ keys_to_check = ['client', '_client']
116
+ host = __get_host_from_map(my_map, keys_to_check)
117
+ if host:
118
+ return host
119
+ else:
120
+ if hasattr(my_map, 'client') and '_endpoint' in my_map.client.__dict__:
121
+ return my_map.client.__dict__['_endpoint']
122
+ host, port = get_keys_as_tuple(my_map.__dict__, 'host', 'port')
123
+ if host:
124
+ return f"{host}:{port}" if port else host
125
+ return None
126
+
127
+
128
+ def __get_host_from_map(my_map, keys_to_check):
129
+ for key in keys_to_check:
130
+ seed_connections = get_nested_value(my_map, [key, 'transport', 'seed_connections'])
131
+ if seed_connections and 'host' in seed_connections[0].__dict__:
132
+ return seed_connections[0].__dict__['host']
133
+ return None
134
+
135
+
136
+ def resolve_from_alias(my_map, alias):
137
+ """Find a alias that is not none from list of aliases"""
138
+
139
+ for i in alias:
140
+ if i in my_map.keys():
141
+ return my_map[i]
142
+ return None
143
+
144
+
145
+ def update_input_span_events(args):
146
+ if isinstance(args, tuple):
147
+ return args[0].query_str if len(args) > 0 else ""
148
+
149
+
150
+ def update_output_span_events(results):
151
+ if isinstance(results, list) and len(results) >0:
152
+ output_arg_text = results[0].text
153
+ if len(output_arg_text) > 100:
154
+ output_arg_text = output_arg_text[:100] + "..."
155
+ return output_arg_text
156
+
157
+
158
+ def update_span_from_llm_response(response, instance):
159
+ meta_dict = {}
160
+ if response is not None and hasattr(response, "raw"):
161
+ if response.raw is not None:
162
+ token_usage = response.raw.get("usage") if isinstance(response.raw, dict) else getattr(response.raw, "usage", None)
163
+ if token_usage is not None:
164
+ temperature = instance.__dict__.get("temperature", None)
165
+ meta_dict.update({"temperature": temperature})
166
+ if getattr(token_usage, "completion_tokens", None):
167
+ meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens")})
168
+ if getattr(token_usage, "prompt_tokens", None):
169
+ meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens")})
170
+ if getattr(token_usage, "total_tokens", None):
171
+ meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens")})
172
+ return meta_dict
@@ -0,0 +1,47 @@
1
+ from monocle_apptrace.instrumentation.metamodel.llamaindex import (
2
+ _helper,
3
+ )
4
+
5
+ AGENT = {
6
+ "type": "agent",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "Agent name, type and Tools.",
11
+ "attribute": "name",
12
+ "accessor": lambda arguments: arguments['instance'].__class__.__name__
13
+ },
14
+ {
15
+ "attribute": "type",
16
+ "accessor": lambda arguments: 'Agent.oai'
17
+ },
18
+ {
19
+ "attribute": "tools",
20
+ "accessor": lambda arguments: _helper.extract_tools(arguments['instance'])
21
+ }
22
+ ]
23
+
24
+ ],
25
+ "events": [
26
+ {"name": "data.input",
27
+ "attributes": [
28
+
29
+ {
30
+ "_comment": "this is instruction and user query to LLM",
31
+ "attribute": "input",
32
+ "accessor": lambda arguments: _helper.extract_messages(arguments['args'])
33
+ }
34
+ ]
35
+ },
36
+ {
37
+ "name": "data.output",
38
+ "attributes": [
39
+ {
40
+ "_comment": "this is response from LLM",
41
+ "attribute": "response",
42
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
43
+ }
44
+ ]
45
+ }
46
+ ]
47
+ }
@@ -0,0 +1,73 @@
1
+ from monocle_apptrace.instrumentation.metamodel.llamaindex import (
2
+ _helper,
3
+ )
4
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type
5
+
6
+ INFERENCE = {
7
+ "type": "inference",
8
+ "attributes": [
9
+ [
10
+ {
11
+ "_comment": "provider type ,name , deployment , inference_endpoint",
12
+ "attribute": "type",
13
+ "accessor": lambda arguments: 'inference.' + (get_llm_type(arguments['instance']) or 'generic')
14
+
15
+ },
16
+ {
17
+ "attribute": "provider_name",
18
+ "accessor": lambda arguments: arguments['kwargs'].get('provider_name') or _helper.extract_provider_name(arguments['instance'])
19
+
20
+ },
21
+ {
22
+ "attribute": "deployment",
23
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
24
+ },
25
+ {
26
+ "attribute": "inference_endpoint",
27
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or _helper.extract_inference_endpoint(arguments['instance'])
28
+ }
29
+ ],
30
+ [
31
+ {
32
+ "_comment": "LLM Model",
33
+ "attribute": "name",
34
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])
35
+ },
36
+ {
37
+ "attribute": "type",
38
+ "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])
39
+ }
40
+ ]
41
+ ],
42
+ "events": [
43
+ {"name": "data.input",
44
+ "attributes": [
45
+
46
+ {
47
+ "_comment": "this is instruction and user query to LLM",
48
+ "attribute": "input",
49
+ "accessor": lambda arguments: _helper.extract_messages(arguments['args'])
50
+ }
51
+ ]
52
+ },
53
+ {
54
+ "name": "data.output",
55
+ "attributes": [
56
+ {
57
+ "_comment": "this is response from LLM",
58
+ "attribute": "response",
59
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
60
+ }
61
+ ]
62
+ },
63
+ {
64
+ "name": "metadata",
65
+ "attributes": [
66
+ {
67
+ "_comment": "this is metadata usage from LLM",
68
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'],arguments['instance'])
69
+ }
70
+ ]
71
+ }
72
+ ]
73
+ }
@@ -0,0 +1,57 @@
1
+ from monocle_apptrace.instrumentation.metamodel.llamaindex import (
2
+ _helper,
3
+ )
4
+
5
+ RETRIEVAL = {
6
+ "type": "retrieval",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "vector store name and type",
11
+ "attribute": "name",
12
+ "accessor": lambda arguments: type(arguments['instance']._vector_store).__name__
13
+ },
14
+ {
15
+ "attribute": "type",
16
+ "accessor": lambda arguments: 'vectorstore.' + type(arguments['instance']._vector_store).__name__
17
+ },
18
+ {
19
+ "attribute": "deployment",
20
+ "accessor": lambda arguments: _helper.extract_vectorstore_deployment(arguments['instance']._vector_store)
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "embedding model name and type",
26
+ "attribute": "name",
27
+ "accessor": lambda arguments: arguments['instance']._embed_model.model_name
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": lambda arguments: 'model.embedding.' + arguments['instance']._embed_model.model_name
32
+ }
33
+ ]
34
+ ],
35
+ "events": [
36
+ {"name": "data.input",
37
+ "attributes": [
38
+
39
+ {
40
+ "_comment": "this is instruction and user query to LLM",
41
+ "attribute": "input",
42
+ "accessor": lambda arguments: _helper.update_input_span_events(arguments['args'])
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name": "data.output",
48
+ "attributes": [
49
+ {
50
+ "_comment": "this is result from LLM",
51
+ "attribute": "response",
52
+ "accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
53
+ }
54
+ ]
55
+ }
56
+ ]
57
+ }