monocle-apptrace 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/exporters/file_exporter.py +7 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +1 -1
- monocle_apptrace/instrumentation/common/span_handler.py +2 -1
- monocle_apptrace/instrumentation/common/wrapper_method.py +3 -1
- monocle_apptrace/instrumentation/metamodel/adk/_helper.py +6 -4
- monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +6 -1
- monocle_apptrace/instrumentation/metamodel/agents/_helper.py +5 -5
- monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +7 -2
- monocle_apptrace/instrumentation/metamodel/finish_types.py +32 -1
- monocle_apptrace/instrumentation/metamodel/hugging_face/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/hugging_face/_helper.py +138 -0
- monocle_apptrace/instrumentation/metamodel/hugging_face/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/hugging_face/entities/inference.py +97 -0
- monocle_apptrace/instrumentation/metamodel/hugging_face/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +4 -2
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +7 -2
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +6 -5
- monocle_apptrace/instrumentation/metamodel/mistral/_helper.py +98 -49
- monocle_apptrace/instrumentation/metamodel/mistral/entities/inference.py +14 -5
- monocle_apptrace/instrumentation/metamodel/mistral/entities/retrieval.py +41 -0
- monocle_apptrace/instrumentation/metamodel/mistral/methods.py +17 -0
- {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.0.dist-info}/METADATA +9 -76
- {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.0.dist-info}/RECORD +26 -23
- monocle_apptrace/README.md +0 -101
- monocle_apptrace/mcp_server.py +0 -94
- monocle_apptrace-0.5.3.dist-info/licenses/NOTICE +0 -4
- {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.0.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.0.dist-info}/entry_points.txt +0 -0
- {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -15,16 +15,43 @@ from monocle_apptrace.instrumentation.common.utils import (
|
|
|
15
15
|
try_option,
|
|
16
16
|
get_exception_message,
|
|
17
17
|
)
|
|
18
|
-
from monocle_apptrace.instrumentation.metamodel.finish_types import
|
|
18
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import map_mistral_finish_reason_to_finish_type
|
|
19
19
|
from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, INFERENCE_AGENT_DELEGATION, INFERENCE_TURN_END, INFERENCE_TOOL_CALL
|
|
20
20
|
|
|
21
|
-
|
|
22
21
|
logger = logging.getLogger(__name__)
|
|
23
22
|
|
|
23
|
+
|
|
24
24
|
def extract_provider_name(instance):
|
|
25
25
|
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
26
26
|
return provider_url.unwrap_or(None)
|
|
27
27
|
|
|
28
|
+
def update_input_span_events(kwargs):
|
|
29
|
+
"""Extract embedding input for spans"""
|
|
30
|
+
if "inputs" in kwargs and isinstance(kwargs["inputs"], list):
|
|
31
|
+
# Join multiple strings into one
|
|
32
|
+
return " | ".join(kwargs["inputs"])
|
|
33
|
+
elif "inputs" in kwargs and isinstance(kwargs["inputs"], str):
|
|
34
|
+
return kwargs["inputs"]
|
|
35
|
+
return ""
|
|
36
|
+
|
|
37
|
+
def update_output_span_events(results):
|
|
38
|
+
"""Extract embedding output for spans"""
|
|
39
|
+
try:
|
|
40
|
+
if hasattr(results, "data") and isinstance(results.data, list):
|
|
41
|
+
embeddings = results.data
|
|
42
|
+
# just return the indices, not full vectors
|
|
43
|
+
embedding_summaries = [
|
|
44
|
+
f"index={e.index}, dim={len(e.embedding)}"
|
|
45
|
+
for e in embeddings
|
|
46
|
+
]
|
|
47
|
+
output = "\n".join(embedding_summaries)
|
|
48
|
+
if len(output) > 200:
|
|
49
|
+
output = output[:200] + "..."
|
|
50
|
+
return output
|
|
51
|
+
except Exception as e:
|
|
52
|
+
logger.warning("Error in update_output_span_events: %s", str(e))
|
|
53
|
+
return ""
|
|
54
|
+
|
|
28
55
|
def extract_inference_endpoint(instance):
|
|
29
56
|
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
30
57
|
if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
|
|
@@ -32,16 +59,18 @@ def extract_inference_endpoint(instance):
|
|
|
32
59
|
|
|
33
60
|
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
34
61
|
|
|
35
|
-
|
|
62
|
+
|
|
63
|
+
def dummy_method(arguments):
|
|
36
64
|
pass
|
|
37
65
|
|
|
66
|
+
|
|
38
67
|
def extract_messages(kwargs):
|
|
39
68
|
"""Extract system and user messages"""
|
|
40
69
|
try:
|
|
41
70
|
messages = []
|
|
42
71
|
if "system" in kwargs and isinstance(kwargs["system"], str):
|
|
43
72
|
messages.append({"system": kwargs["system"]})
|
|
44
|
-
if 'messages' in kwargs and
|
|
73
|
+
if 'messages' in kwargs and kwargs['messages']:
|
|
45
74
|
for msg in kwargs['messages']:
|
|
46
75
|
if msg.get('content') and msg.get('role'):
|
|
47
76
|
messages.append({msg['role']: msg['content']})
|
|
@@ -50,6 +79,7 @@ def extract_messages(kwargs):
|
|
|
50
79
|
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
51
80
|
return []
|
|
52
81
|
|
|
82
|
+
|
|
53
83
|
def get_exception_status_code(arguments):
|
|
54
84
|
exc = arguments.get("exception")
|
|
55
85
|
if exc is not None and hasattr(exc, "status_code"):
|
|
@@ -73,7 +103,7 @@ def extract_assistant_message(arguments):
|
|
|
73
103
|
Returns a JSON string like {"assistant": "<text>"}.
|
|
74
104
|
"""
|
|
75
105
|
try:
|
|
76
|
-
result = arguments.get("result")
|
|
106
|
+
result = arguments.get("result") if isinstance(arguments, dict) else arguments
|
|
77
107
|
if result is None:
|
|
78
108
|
return ""
|
|
79
109
|
|
|
@@ -86,9 +116,10 @@ def extract_assistant_message(arguments):
|
|
|
86
116
|
if isinstance(result, list):
|
|
87
117
|
content = []
|
|
88
118
|
for chunk in result:
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
119
|
+
if hasattr(chunk, "data") and hasattr(chunk.data, "choices") and chunk.data.choices:
|
|
120
|
+
choice = chunk.data.choices[0]
|
|
121
|
+
if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
|
|
122
|
+
content.append(choice.delta.content or "")
|
|
92
123
|
return get_json_dumps({"assistant": "".join(content)})
|
|
93
124
|
|
|
94
125
|
return ""
|
|
@@ -98,77 +129,95 @@ def extract_assistant_message(arguments):
|
|
|
98
129
|
return ""
|
|
99
130
|
|
|
100
131
|
|
|
101
|
-
|
|
102
|
-
def update_span_from_llm_response(response):
|
|
132
|
+
'''def update_span_from_llm_response(response):
|
|
103
133
|
meta_dict = {}
|
|
104
134
|
if response is not None and hasattr(response, "usage"):
|
|
105
|
-
|
|
106
|
-
token_usage = response.usage
|
|
107
|
-
else:
|
|
108
|
-
response_metadata = response.response_metadata
|
|
109
|
-
token_usage = response_metadata.get("token_usage")
|
|
135
|
+
token_usage = getattr(response, "usage", None) or getattr(response, "response_metadata", {}).get("token_usage")
|
|
110
136
|
if token_usage is not None:
|
|
111
137
|
meta_dict.update({"completion_tokens": getattr(response.usage, "output_tokens", 0)})
|
|
112
138
|
meta_dict.update({"prompt_tokens": getattr(response.usage, "input_tokens", 0)})
|
|
113
|
-
meta_dict.update({"total_tokens": getattr(response.usage, "input_tokens", 0)+getattr(response.usage, "output_tokens", 0)})
|
|
114
|
-
return meta_dict
|
|
139
|
+
meta_dict.update({"total_tokens": getattr(response.usage, "input_tokens", 0) + getattr(response.usage, "output_tokens", 0)})
|
|
140
|
+
return meta_dict'''
|
|
141
|
+
|
|
142
|
+
def update_span_from_llm_response(result, include_token_counts=False):
|
|
143
|
+
tokens = {
|
|
144
|
+
"completion_tokens": getattr(result, "completion_tokens", 0),
|
|
145
|
+
"prompt_tokens": getattr(result, "prompt_tokens", 0),
|
|
146
|
+
"total_tokens": getattr(result, "total_tokens", 0),
|
|
147
|
+
} if include_token_counts else {}
|
|
148
|
+
# Add other metadata fields like finish_reason, etc.
|
|
149
|
+
return {**tokens, "inference_sub_type": "turn_end"}
|
|
150
|
+
|
|
115
151
|
|
|
116
152
|
def extract_finish_reason(arguments):
|
|
117
|
-
"""
|
|
153
|
+
"""
|
|
154
|
+
Extract stop_reason from a Mistral response or stream chunks.
|
|
155
|
+
Works for both streaming (list of chunks) and full responses.
|
|
156
|
+
"""
|
|
118
157
|
try:
|
|
119
|
-
# Arguments may be a dict with 'result' or just the response object
|
|
120
158
|
response = arguments.get("result") if isinstance(arguments, dict) else arguments
|
|
121
|
-
if response is
|
|
159
|
+
if response is None:
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
# Handle full response: single object with stop_reason
|
|
163
|
+
if hasattr(response, "stop_reason") and response.stop_reason:
|
|
122
164
|
return response.stop_reason
|
|
165
|
+
|
|
166
|
+
# Handle streaming: list of chunks, last chunk may have finish_reason
|
|
167
|
+
if isinstance(response, list):
|
|
168
|
+
for chunk in reversed(response):
|
|
169
|
+
if hasattr(chunk, "data") and hasattr(chunk.data, "choices") and chunk.data.choices:
|
|
170
|
+
fr = getattr(chunk.data.choices[0], "finish_reason", None)
|
|
171
|
+
if fr is not None:
|
|
172
|
+
return fr
|
|
173
|
+
|
|
123
174
|
except Exception as e:
|
|
124
175
|
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
125
176
|
return None
|
|
177
|
+
|
|
126
178
|
return None
|
|
127
179
|
|
|
180
|
+
|
|
128
181
|
def map_finish_reason_to_finish_type(finish_reason):
|
|
129
|
-
"""Map
|
|
130
|
-
return
|
|
182
|
+
"""Map Mistral stop_reason to finish_type, similar to OpenAI mapping."""
|
|
183
|
+
return map_mistral_finish_reason_to_finish_type(finish_reason)
|
|
184
|
+
|
|
131
185
|
|
|
132
186
|
def agent_inference_type(arguments):
|
|
133
|
-
"""Extract agent inference type from
|
|
187
|
+
"""Extract agent inference type from Mistral response"""
|
|
134
188
|
try:
|
|
135
189
|
status = get_status_code(arguments)
|
|
136
|
-
if status
|
|
137
|
-
response = arguments
|
|
138
|
-
|
|
190
|
+
if status in ('success', 'completed'):
|
|
191
|
+
response = arguments.get("result")
|
|
192
|
+
if response is None:
|
|
193
|
+
return INFERENCE_TURN_END
|
|
194
|
+
|
|
139
195
|
# Check if stop_reason indicates tool use
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
if
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
if agent_prefix and tool_name.startswith(agent_prefix):
|
|
150
|
-
return INFERENCE_AGENT_DELEGATION
|
|
151
|
-
# If we found tool use but no agent delegation, it's a regular tool call
|
|
152
|
-
return INFERENCE_TOOL_CALL
|
|
153
|
-
|
|
196
|
+
stop_reason = getattr(response, "stop_reason", None)
|
|
197
|
+
if stop_reason == "tool_use" and hasattr(response, "content") and response.content:
|
|
198
|
+
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
199
|
+
for content_block in response.content:
|
|
200
|
+
if getattr(content_block, "type", None) == "tool_use" and hasattr(content_block, "name"):
|
|
201
|
+
if agent_prefix and content_block.name.startswith(agent_prefix):
|
|
202
|
+
return INFERENCE_AGENT_DELEGATION
|
|
203
|
+
return INFERENCE_TOOL_CALL
|
|
204
|
+
|
|
154
205
|
# Fallback: check the extracted message for tool content
|
|
155
206
|
assistant_message = extract_assistant_message(arguments)
|
|
156
207
|
if assistant_message:
|
|
157
208
|
try:
|
|
158
209
|
message = json.loads(assistant_message)
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
if agent_prefix and agent_prefix in assistant_content:
|
|
164
|
-
return INFERENCE_AGENT_DELEGATION
|
|
210
|
+
assistant_content = message.get("assistant", "") if isinstance(message, dict) else ""
|
|
211
|
+
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
212
|
+
if agent_prefix and agent_prefix in assistant_content:
|
|
213
|
+
return INFERENCE_AGENT_DELEGATION
|
|
165
214
|
except (json.JSONDecodeError, TypeError):
|
|
166
|
-
# If JSON parsing fails, fall back to string analysis
|
|
167
215
|
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
168
216
|
if agent_prefix and agent_prefix in assistant_message:
|
|
169
217
|
return INFERENCE_AGENT_DELEGATION
|
|
170
|
-
|
|
218
|
+
|
|
171
219
|
return INFERENCE_TURN_END
|
|
220
|
+
|
|
172
221
|
except Exception as e:
|
|
173
222
|
logger.warning("Warning: Error occurred in agent_inference_type: %s", str(e))
|
|
174
|
-
return INFERENCE_TURN_END
|
|
223
|
+
return INFERENCE_TURN_END
|
|
@@ -10,7 +10,6 @@ MISTRAL_INFERENCE = {
|
|
|
10
10
|
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
11
11
|
"attribute": "type",
|
|
12
12
|
"accessor": lambda arguments: 'inference.mistral'
|
|
13
|
-
|
|
14
13
|
},
|
|
15
14
|
{
|
|
16
15
|
"attribute": "provider_name",
|
|
@@ -52,9 +51,16 @@ MISTRAL_INFERENCE = {
|
|
|
52
51
|
"accessor": lambda arguments: get_error_message(arguments)
|
|
53
52
|
},
|
|
54
53
|
{
|
|
55
|
-
"_comment": "this is result from LLM",
|
|
54
|
+
"_comment": "this is result from LLM, works for streaming and non-streaming",
|
|
56
55
|
"attribute": "response",
|
|
57
|
-
"accessor": lambda arguments:
|
|
56
|
+
"accessor": lambda arguments: (
|
|
57
|
+
# Handle streaming: combine chunks if result is iterable and doesn't have 'choices'
|
|
58
|
+
_helper.extract_assistant_message(
|
|
59
|
+
{"result": list(arguments["result"])}
|
|
60
|
+
if hasattr(arguments.get("result"), "__iter__") and not hasattr(arguments.get("result"), "choices")
|
|
61
|
+
else arguments
|
|
62
|
+
)
|
|
63
|
+
)
|
|
58
64
|
}
|
|
59
65
|
]
|
|
60
66
|
},
|
|
@@ -62,8 +68,11 @@ MISTRAL_INFERENCE = {
|
|
|
62
68
|
"name": "metadata",
|
|
63
69
|
"attributes": [
|
|
64
70
|
{
|
|
65
|
-
"_comment": "this is metadata usage from LLM",
|
|
66
|
-
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
71
|
+
"_comment": "this is metadata usage from LLM, includes token counts",
|
|
72
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
73
|
+
arguments.get("result"),
|
|
74
|
+
include_token_counts=True # new flag for streaming handling
|
|
75
|
+
)
|
|
67
76
|
},
|
|
68
77
|
{
|
|
69
78
|
"_comment": "finish reason from Anthropic response",
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.mistral import _helper
|
|
2
|
+
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
3
|
+
|
|
4
|
+
MISTRAL_RETRIEVAL = {
|
|
5
|
+
"type": "embedding",
|
|
6
|
+
"attributes": [
|
|
7
|
+
[
|
|
8
|
+
{
|
|
9
|
+
"_comment": "LLM Model",
|
|
10
|
+
"attribute": "name",
|
|
11
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model'])
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"attribute": "type",
|
|
15
|
+
"accessor": lambda arguments: 'model.embedding.' + resolve_from_alias(arguments['kwargs'], ['model'])
|
|
16
|
+
}
|
|
17
|
+
]
|
|
18
|
+
],
|
|
19
|
+
"events": [
|
|
20
|
+
{
|
|
21
|
+
"name": "data.input",
|
|
22
|
+
"attributes": [
|
|
23
|
+
{
|
|
24
|
+
"_comment": "embedding input",
|
|
25
|
+
"attribute": "input",
|
|
26
|
+
"accessor": lambda arguments: _helper.update_input_span_events(arguments["kwargs"])
|
|
27
|
+
}
|
|
28
|
+
]
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"name": "data.output",
|
|
32
|
+
"attributes": [
|
|
33
|
+
{
|
|
34
|
+
"_comment": "embedding output summary",
|
|
35
|
+
"attribute": "response",
|
|
36
|
+
"accessor": lambda arguments: _helper.update_output_span_events(arguments["result"])
|
|
37
|
+
}
|
|
38
|
+
]
|
|
39
|
+
}
|
|
40
|
+
]
|
|
41
|
+
}
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, atask_wrapper
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.mistral.entities.inference import MISTRAL_INFERENCE
|
|
3
|
+
from monocle_apptrace.instrumentation.metamodel.mistral.entities.retrieval import MISTRAL_RETRIEVAL
|
|
3
4
|
|
|
4
5
|
MISTRAL_METHODS = [
|
|
5
6
|
{
|
|
@@ -33,6 +34,22 @@ MISTRAL_METHODS = [
|
|
|
33
34
|
"span_handler": "non_framework_handler",
|
|
34
35
|
"wrapper_method": atask_wrapper,
|
|
35
36
|
"output_processor": MISTRAL_INFERENCE,
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"package": "mistralai.embeddings", # where Embeddings is defined
|
|
40
|
+
"object": "Embeddings", # sync embeddings client
|
|
41
|
+
"method": "create", # sync create
|
|
42
|
+
"span_handler": "non_framework_handler",
|
|
43
|
+
"wrapper_method": task_wrapper,
|
|
44
|
+
"output_processor": MISTRAL_RETRIEVAL
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"package": "mistralai.embeddings", # where Embeddings is defined
|
|
48
|
+
"object": "AsyncEmbeddings", # async embeddings client
|
|
49
|
+
"method": "create", # async create
|
|
50
|
+
"span_handler": "non_framework_handler",
|
|
51
|
+
"wrapper_method": atask_wrapper,
|
|
52
|
+
"output_processor": MISTRAL_RETRIEVAL
|
|
36
53
|
}
|
|
37
54
|
]
|
|
38
55
|
|
|
@@ -1,31 +1,30 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: monocle_apptrace
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.0
|
|
4
4
|
Summary: package with monocle genAI tracing
|
|
5
5
|
Project-URL: Homepage, https://github.com/monocle2ai/monocle
|
|
6
6
|
Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
|
|
7
7
|
Author-email: "Okahu Inc." <okahu-pypi@okahu.ai>
|
|
8
8
|
License: Apache-2.0
|
|
9
9
|
License-File: LICENSE
|
|
10
|
-
License-File: NOTICE
|
|
11
10
|
Classifier: License :: OSI Approved :: Apache Software License
|
|
12
11
|
Classifier: Operating System :: OS Independent
|
|
13
12
|
Classifier: Programming Language :: Python :: 3
|
|
14
13
|
Requires-Python: >=3.8
|
|
15
|
-
Requires-Dist: click==8.2.1
|
|
16
|
-
Requires-Dist: mcp>=1.13.1
|
|
17
14
|
Requires-Dist: opentelemetry-api>=1.21.0
|
|
18
15
|
Requires-Dist: opentelemetry-instrumentation
|
|
19
16
|
Requires-Dist: opentelemetry-sdk>=1.21.0
|
|
20
|
-
Requires-Dist: pydantic>=2.11.7
|
|
21
17
|
Requires-Dist: requests
|
|
22
18
|
Requires-Dist: wrapt>=1.14.0
|
|
19
|
+
Provides-Extra: ai-test
|
|
20
|
+
Requires-Dist: bert-score; extra == 'ai-test'
|
|
21
|
+
Requires-Dist: transformers; extra == 'ai-test'
|
|
23
22
|
Provides-Extra: aws
|
|
24
23
|
Requires-Dist: boto3==1.37.24; extra == 'aws'
|
|
25
24
|
Provides-Extra: azure
|
|
26
25
|
Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
|
|
27
26
|
Provides-Extra: dev
|
|
28
|
-
Requires-Dist: a2a-sdk==0.
|
|
27
|
+
Requires-Dist: a2a-sdk==0.3.6; extra == 'dev'
|
|
29
28
|
Requires-Dist: anthropic-haystack; extra == 'dev'
|
|
30
29
|
Requires-Dist: anthropic==0.57.1; extra == 'dev'
|
|
31
30
|
Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
|
|
@@ -40,12 +39,13 @@ Requires-Dist: google-adk==1.10.0; extra == 'dev'
|
|
|
40
39
|
Requires-Dist: google-generativeai==0.8.5; extra == 'dev'
|
|
41
40
|
Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
|
|
42
41
|
Requires-Dist: httpx==0.28.1; extra == 'dev'
|
|
42
|
+
Requires-Dist: huggingface-hub==0.35.3; extra == 'dev'
|
|
43
43
|
Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
|
|
44
44
|
Requires-Dist: langchain-anthropic==0.3.13; extra == 'dev'
|
|
45
45
|
Requires-Dist: langchain-aws==0.2.23; extra == 'dev'
|
|
46
46
|
Requires-Dist: langchain-chroma==0.2.4; extra == 'dev'
|
|
47
47
|
Requires-Dist: langchain-community==0.3.24; extra == 'dev'
|
|
48
|
-
Requires-Dist: langchain-google-genai==2.
|
|
48
|
+
Requires-Dist: langchain-google-genai==2.0.10; extra == 'dev'
|
|
49
49
|
Requires-Dist: langchain-mcp-adapters==0.1.8; extra == 'dev'
|
|
50
50
|
Requires-Dist: langchain-mistralai==0.2.10; extra == 'dev'
|
|
51
51
|
Requires-Dist: langchain-openai==0.3.18; extra == 'dev'
|
|
@@ -64,6 +64,7 @@ Requires-Dist: llama-index-vector-stores-opensearch==0.6.0; extra == 'dev'
|
|
|
64
64
|
Requires-Dist: llama-index==0.13.0; extra == 'dev'
|
|
65
65
|
Requires-Dist: mcp==1.12.1; extra == 'dev'
|
|
66
66
|
Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
|
|
67
|
+
Requires-Dist: mistralai==1.9.9; extra == 'dev'
|
|
67
68
|
Requires-Dist: numpy==1.26.4; extra == 'dev'
|
|
68
69
|
Requires-Dist: openai-agents==0.2.6; extra == 'dev'
|
|
69
70
|
Requires-Dist: opendal==0.45.14; extra == 'dev'
|
|
@@ -80,42 +81,12 @@ Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'
|
|
|
80
81
|
Requires-Dist: uvicorn==0.35.0; extra == 'dev'
|
|
81
82
|
Description-Content-Type: text/markdown
|
|
82
83
|
|
|
83
|
-
# Monocle
|
|
84
|
+
# Monocle Apptrace
|
|
84
85
|
|
|
85
86
|
**Monocle** helps developers and platform engineers building or managing GenAI apps monitor these in prod by making it easy to instrument their code to capture traces that are compliant with open-source cloud-native observability ecosystem.
|
|
86
87
|
|
|
87
88
|
**Monocle** is a community-driven OSS framework for tracing GenAI app code governed as a [Linux Foundation AI & Data project](https://lfaidata.foundation/projects/monocle/).
|
|
88
89
|
|
|
89
|
-
## Why Monocle
|
|
90
|
-
|
|
91
|
-
Monocle is built for:
|
|
92
|
-
- **app developers** to trace their app code in any environment without lots of custom code decoration
|
|
93
|
-
- **platform engineers** to instrument apps in prod through wrapping instead of asking app devs to recode
|
|
94
|
-
- **GenAI component providers** to add observability features to their products
|
|
95
|
-
- **enterprises** to consume traces from GenAI apps in their existing open-source observability stack
|
|
96
|
-
|
|
97
|
-
Benefits:
|
|
98
|
-
- Monocle provides an implementation + package, not just a spec
|
|
99
|
-
- No expertise in OpenTelemetry spec required
|
|
100
|
-
- No bespoke implementation of that spec required
|
|
101
|
-
- No last-mile GenAI domain specific code required to instrument your app
|
|
102
|
-
- Monocle provides consistency
|
|
103
|
-
- Connect traces across app code executions, model inference or data retrievals
|
|
104
|
-
- No cleansing of telemetry data across GenAI component providers required
|
|
105
|
-
- Works the same in personal lab dev or org cloud prod environments
|
|
106
|
-
- Send traces to location that fits your scale, budget and observability stack
|
|
107
|
-
- Monocle is fully open source and community driven
|
|
108
|
-
- No vendor lock-in
|
|
109
|
-
- Implementation is transparent
|
|
110
|
-
- You can freely use or customize it to fit your needs
|
|
111
|
-
|
|
112
|
-
## What Monocle provides
|
|
113
|
-
|
|
114
|
-
- Easy to [use](#use-monocle) code instrumentation
|
|
115
|
-
- OpenTelemetry compatible format for [spans](src/monocle_apptrace/metamodel/spans/span_format.json).
|
|
116
|
-
- Community-curated and extensible [metamodel](src/monocle_apptrace/metamodel/README.md) for consisent tracing of GenAI components.
|
|
117
|
-
- Export to local and cloud storage
|
|
118
|
-
|
|
119
90
|
## Use Monocle
|
|
120
91
|
|
|
121
92
|
- Get the Monocle package
|
|
@@ -137,42 +108,4 @@ Benefits:
|
|
|
137
108
|
See [Monocle user guide](Monocle_User_Guide.md) for more details.
|
|
138
109
|
|
|
139
110
|
|
|
140
|
-
## Use Monocle MCP
|
|
141
|
-
|
|
142
|
-
First install monocle-apptrace: pip install monocle-apptrace
|
|
143
|
-
|
|
144
|
-
Open bash and run the following command to run the monocle mcp server with stdio:
|
|
145
|
-
monocle_apptrace
|
|
146
|
-
|
|
147
|
-
If you are using VS Code you can add following entry to your .vscode/mcp.json
|
|
148
|
-
|
|
149
|
-
```json
|
|
150
|
-
"monocle-mcp-server": {
|
|
151
|
-
"type": "stdio",
|
|
152
|
-
"command": "uvx",
|
|
153
|
-
"args": [
|
|
154
|
-
"monocle_apptrace"
|
|
155
|
-
],
|
|
156
|
-
"env": {}
|
|
157
|
-
}
|
|
158
|
-
```
|
|
159
|
-
|
|
160
|
-
## Roadmap
|
|
161
|
-
|
|
162
|
-
Goal of Monocle is to support tracing for apps written in *any language* with *any LLM orchestration or agentic framework* and built using models, vectors, agents or other components served up by *any cloud or model inference provider*.
|
|
163
|
-
|
|
164
|
-
Current version supports:
|
|
165
|
-
- Language: (🟢) Python , (🔜) [Typescript](https://github.com/monocle2ai/monocle-typescript)
|
|
166
|
-
- LLM-frameworks: (🟢) Langchain, (🟢) Llamaindex, (🟢) Haystack, (🔜) Flask
|
|
167
|
-
- LLM inference providers: (🟢) OpenAI, (🟢) Azure OpenAI, (🟢) Nvidia Triton, (🔜) AWS Bedrock, (🔜) Google Vertex, (🔜) Azure ML, (🔜) Hugging Face
|
|
168
|
-
- Vector stores: (🟢) FAISS, (🔜) OpenSearch, (🔜) Milvus
|
|
169
|
-
- Exporter: (🟢) stdout, (🟢) file, (🔜) Azure Blob Storage, (🔜) AWS S3, (🔜) Google Cloud Storage
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
## Get involved
|
|
173
|
-
### Provide feedback
|
|
174
|
-
- Submit issues and enhancements requests via Github issues
|
|
175
|
-
|
|
176
|
-
### Contribute
|
|
177
|
-
- Monocle is community based open source project. We welcome your contributions. Please refer to the CONTRIBUTING and CODE_OF_CONDUCT for guidelines. The [contributor's guide](CONTRIBUTING.md) provides technical details of the project.
|
|
178
111
|
|
|
@@ -1,10 +1,8 @@
|
|
|
1
|
-
monocle_apptrace/README.md,sha256=T5NFC01bF8VR0oVnAX_n0bhsEtttwqfTxDNAe5Y_ivE,3765
|
|
2
1
|
monocle_apptrace/__init__.py,sha256=XtoX7gHUSZgkY1nry8IFny8RslPhutZQUuEkqIrBzFQ,30
|
|
3
2
|
monocle_apptrace/__main__.py,sha256=MLLPbC4YTp8O2wQrn8BROSZGvQpQd5brk_d1n_dWVWA,573
|
|
4
|
-
monocle_apptrace/mcp_server.py,sha256=X5NFOE1QHkIktykGlRH-bzOnLsby5E9sTRAT-4BOQx0,5591
|
|
5
3
|
monocle_apptrace/exporters/base_exporter.py,sha256=xm2MkDjuVZc-vmSXBMMsNMyIoy4z0O4g6wOAyuEnHwo,2062
|
|
6
4
|
monocle_apptrace/exporters/exporter_processor.py,sha256=-spCIJ_UfJ0fax_jE-ii3ODQBwtnHZgYIGVNd91Q718,6298
|
|
7
|
-
monocle_apptrace/exporters/file_exporter.py,sha256=
|
|
5
|
+
monocle_apptrace/exporters/file_exporter.py,sha256=wgYuCJRCC_H4S7hkPh6K5w8ps51sUXkeldNbU_uicmw,8426
|
|
8
6
|
monocle_apptrace/exporters/monocle_exporters.py,sha256=qo6S53dp2ko6EzMP-ICL2buqgmw8HZboy19j7iHp1Qk,2882
|
|
9
7
|
monocle_apptrace/exporters/aws/s3_exporter.py,sha256=9GA2tiWOUBLtDdGIdLLyYJEdQ1jRC5SdxxVH3qqR8Qk,8260
|
|
10
8
|
monocle_apptrace/exporters/aws/s3_exporter_opendal.py,sha256=0aEUxdMgJaDUwqjw0DqlCMr8kjl01KgwUt3_RRCVFds,5917
|
|
@@ -14,32 +12,32 @@ monocle_apptrace/exporters/okahu/okahu_exporter.py,sha256=wFkHd87nOXzFMRejrUiO6N
|
|
|
14
12
|
monocle_apptrace/instrumentation/__init__.py,sha256=wCzg-Ivla7p2F01pM1fEEQMztzcZZB4vD5cZ9CsTigw,94
|
|
15
13
|
monocle_apptrace/instrumentation/common/__init__.py,sha256=iVcdQectswd-J_h5n0n-PqAXsrmCRxdK8YeyqYGArC8,432
|
|
16
14
|
monocle_apptrace/instrumentation/common/constants.py,sha256=dAbIKrfI97oYMKNj5gdPWvP-YEvnLcRbd6Qec2k2MYk,6281
|
|
17
|
-
monocle_apptrace/instrumentation/common/instrumentor.py,sha256=
|
|
15
|
+
monocle_apptrace/instrumentation/common/instrumentor.py,sha256=4n2USZsjfGSkK6FuNnx78PR3cMbxuSEI6UC86Asb92Q,10864
|
|
18
16
|
monocle_apptrace/instrumentation/common/method_wrappers.py,sha256=jC3G_R2YVD0JWCzxx1zNzJbe_BsNhsveVMegJRXA3IQ,10152
|
|
19
17
|
monocle_apptrace/instrumentation/common/scope_wrapper.py,sha256=Ysr4zmb71sZm3R-fNabctnNJHnmLVL9FE-4EmQo3HxA,3927
|
|
20
|
-
monocle_apptrace/instrumentation/common/span_handler.py,sha256=
|
|
18
|
+
monocle_apptrace/instrumentation/common/span_handler.py,sha256=j8hOrdc-bLZYC0ST1kOqn9OvA1KiRl9HAR77m1p5JQs,15282
|
|
21
19
|
monocle_apptrace/instrumentation/common/tracing.md,sha256=6Lr8QGxEFHKhj-mMvLV3xjFnplKSs6HEdwl0McPK47M,7577
|
|
22
20
|
monocle_apptrace/instrumentation/common/utils.py,sha256=hsF1Opoa7kJA9eEWNzYAU88u3JFYMsNQlUilL8fcqiE,15761
|
|
23
21
|
monocle_apptrace/instrumentation/common/wrapper.py,sha256=vbt2650Z3YNcxIvrT3odZ1RHIIeAHrrvYQOqFNUGXHQ,20285
|
|
24
|
-
monocle_apptrace/instrumentation/common/wrapper_method.py,sha256=
|
|
22
|
+
monocle_apptrace/instrumentation/common/wrapper_method.py,sha256=wwsClwlwyhPjfMiz1PY_VJLFzje8_j1R1Xa9HKMovRQ,6816
|
|
25
23
|
monocle_apptrace/instrumentation/metamodel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
-
monocle_apptrace/instrumentation/metamodel/finish_types.py,sha256=
|
|
24
|
+
monocle_apptrace/instrumentation/metamodel/finish_types.py,sha256=Bkto3dHkabTjSsPXLnzPiOW1_xal5CGNrIfIfqdHYSk,20748
|
|
27
25
|
monocle_apptrace/instrumentation/metamodel/a2a/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
26
|
monocle_apptrace/instrumentation/metamodel/a2a/_helper.py,sha256=BHuhF5OnEFEQR081lM4R538c83P--t1J5xHOREFDBpo,1281
|
|
29
27
|
monocle_apptrace/instrumentation/metamodel/a2a/methods.py,sha256=_KiCczpRZWfTkzpwQ36kLp5t6Fw-8UEse2Yg4kaHwUk,675
|
|
30
28
|
monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
29
|
monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py,sha256=5v37Q5mqE0JzWsmC6x1ZvJB3Zpkyce3hKyfCHF7iD9Q,3715
|
|
32
30
|
monocle_apptrace/instrumentation/metamodel/adk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
|
-
monocle_apptrace/instrumentation/metamodel/adk/_helper.py,sha256=
|
|
31
|
+
monocle_apptrace/instrumentation/metamodel/adk/_helper.py,sha256=ljQutMjk5j6dPEx8QCGcJSOgM2XN4_Wq1L6ri3NbLMA,6454
|
|
34
32
|
monocle_apptrace/instrumentation/metamodel/adk/methods.py,sha256=Wp6MJArvLotY2CIAf1PlwJzdcH8qBk7II1KPBRET7c0,917
|
|
35
|
-
monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py,sha256=
|
|
33
|
+
monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py,sha256=esRSIJRE91tzasEqMouXKsU8vj01Zf24a-7Uyxq0DKg,3905
|
|
36
34
|
monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py,sha256=EEZxYM9bdH7KCmO-jnTywXAiN45hel5eAqLyHKZ1BbU,2157
|
|
37
35
|
monocle_apptrace/instrumentation/metamodel/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
|
-
monocle_apptrace/instrumentation/metamodel/agents/_helper.py,sha256=
|
|
36
|
+
monocle_apptrace/instrumentation/metamodel/agents/_helper.py,sha256=5j6ZkR9Dh1WMUrC-zSH3M2OJvKd4ZsZs3gIR87Ws0lI,7595
|
|
39
37
|
monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py,sha256=P95dNBh18M74Bw-BklwcN3wRfyi4vC3Q9EOcR8QBheg,6194
|
|
40
38
|
monocle_apptrace/instrumentation/metamodel/agents/methods.py,sha256=l7KwBLm_olUfZsN9UxUVc_spvSGLNqBJzKh3cyX40-o,1758
|
|
41
39
|
monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
-
monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py,sha256=
|
|
40
|
+
monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py,sha256=UUbnGdCkF2CGRzDZdD3r3pv_-tXXb9W5shDEtcFq_3Q,6859
|
|
43
41
|
monocle_apptrace/instrumentation/metamodel/aiohttp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
42
|
monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py,sha256=6wTmycVbETB0fOc99No2rePVgACKR3J6HCUkaedV0o8,2539
|
|
45
43
|
monocle_apptrace/instrumentation/metamodel/aiohttp/methods.py,sha256=rcfGoRMLJeu-X2O9fGv6nhhjUrBJALKOJ-axiedavMI,435
|
|
@@ -84,6 +82,11 @@ monocle_apptrace/instrumentation/metamodel/haystack/methods.py,sha256=fuICw7KVTA
|
|
|
84
82
|
monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
83
|
monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py,sha256=pykGCW_ucEdPkp_yshSyHeK7p2WxJpnzxHEPt7TONp0,3880
|
|
86
84
|
monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py,sha256=bWagT0us1sGFlvHEToYlVk4PPDxFimQC0l_BJmrjnxc,2439
|
|
85
|
+
monocle_apptrace/instrumentation/metamodel/hugging_face/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
86
|
+
monocle_apptrace/instrumentation/metamodel/hugging_face/_helper.py,sha256=f5-U7Uga7UIO0VKbsH0PzxEWKwDugbEEebFdFqX03Sc,4812
|
|
87
|
+
monocle_apptrace/instrumentation/metamodel/hugging_face/methods.py,sha256=U4kui4NiZ2drAvahira1__qVAUCT-OR0zZEn-rN_4nc,751
|
|
88
|
+
monocle_apptrace/instrumentation/metamodel/hugging_face/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
|
+
monocle_apptrace/instrumentation/metamodel/hugging_face/entities/inference.py,sha256=FO9cV4teok2WCV95FOUPy6XtGHtycuzJ-AN8TbbD028,3654
|
|
87
90
|
monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py,sha256=mKD-IbylOUugK9GGuGv-S53AUei_8swQ4Ak8A5iwUxw,3043
|
|
88
91
|
monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py,sha256=-b5dfI5oZVdRmBjfrVJgQuN910p7SUOu9Tc1AUhkz3A,934
|
|
89
92
|
monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py,sha256=nxnfCwPftoRdHfjuRNrILEFOvB1e8oXqHRfPn-qxyZY,716
|
|
@@ -95,11 +98,11 @@ monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py,sha256
|
|
|
95
98
|
monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py,sha256=Bqv7pDj-wQGtD5iJf4sG67aEHF4nJxHy7FfTEw4Ec5g,3646
|
|
96
99
|
monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py,sha256=rRzp_oi_-yEKgCnQUxIS2ForJKtUQFGADYOou91sXU0,2121
|
|
97
100
|
monocle_apptrace/instrumentation/metamodel/langgraph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
98
|
-
monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py,sha256=
|
|
101
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py,sha256=HOciW1fomZrXg52K8wLbdiV6oUlvC1C1j8TTvDAx7RM,4085
|
|
99
102
|
monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py,sha256=0JZKLwWcdXTvp7QoBhCV6CoplohMoH3jdZ0EtfUNi2s,3156
|
|
100
103
|
monocle_apptrace/instrumentation/metamodel/langgraph/methods.py,sha256=xu3BkxjupktwdAPAvavOd2_ZhjllqfYQQ3s1RWrhWlE,1295
|
|
101
104
|
monocle_apptrace/instrumentation/metamodel/langgraph/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
102
|
-
monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py,sha256=
|
|
105
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py,sha256=6_EEYgFfyOWbLP4PmdXx13SLVlnizBBhzYLtnBDo9t0,5367
|
|
103
106
|
monocle_apptrace/instrumentation/metamodel/litellm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
107
|
monocle_apptrace/instrumentation/metamodel/litellm/_helper.py,sha256=Yo0RtiJ4WKDRuC9VqUmXmdQmRLduOsVGHMNwswwdcLk,3433
|
|
105
108
|
monocle_apptrace/instrumentation/metamodel/litellm/methods.py,sha256=D3rT7bQKzPRxGIs3GxwPmjmmti8ndF7_5Cmz8ojfSJQ,627
|
|
@@ -114,16 +117,17 @@ monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py,sha256=t
|
|
|
114
117
|
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py,sha256=sWXR1-Vp6QxQVm9yYrrb3N6i8vS4vuR7G1MkS-DFY9o,3401
|
|
115
118
|
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py,sha256=z9jWZW_UCYL0fKCUKXEiIzloZeYi14kGkOPqewO4If8,1952
|
|
116
119
|
monocle_apptrace/instrumentation/metamodel/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
117
|
-
monocle_apptrace/instrumentation/metamodel/mcp/_helper.py,sha256=
|
|
120
|
+
monocle_apptrace/instrumentation/metamodel/mcp/_helper.py,sha256=Pq_7Qv8aiaRcxskjiwFSMVHBPLcRNcIGRSwVLt7RDng,3977
|
|
118
121
|
monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py,sha256=bCAEUYNudGcXhpS-U7GP6Zt917AhvxJWJpoykfjE044,377
|
|
119
122
|
monocle_apptrace/instrumentation/metamodel/mcp/methods.py,sha256=rgd5lZG8Z8x4vGZ5JxZiPeAwBoaZp6wOuwO8uYzHRCs,685
|
|
120
123
|
monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
121
124
|
monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py,sha256=hd1K63T3DHInaTvcCHm8VO7IZsT5cV3todvOucWPL34,1783
|
|
122
125
|
monocle_apptrace/instrumentation/metamodel/mistral/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
123
|
-
monocle_apptrace/instrumentation/metamodel/mistral/_helper.py,sha256=
|
|
124
|
-
monocle_apptrace/instrumentation/metamodel/mistral/methods.py,sha256=
|
|
126
|
+
monocle_apptrace/instrumentation/metamodel/mistral/_helper.py,sha256=u9L6GEwwOrEj6U_-wB9wtX_LMURnkZ_hBlEEFBDaWak,8965
|
|
127
|
+
monocle_apptrace/instrumentation/metamodel/mistral/methods.py,sha256=FGaCINeeEi8fthnDzzf8sPHz1qUh83u67BlJWxul9Ik,2254
|
|
125
128
|
monocle_apptrace/instrumentation/metamodel/mistral/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
126
|
-
monocle_apptrace/instrumentation/metamodel/mistral/entities/inference.py,sha256=
|
|
129
|
+
monocle_apptrace/instrumentation/metamodel/mistral/entities/inference.py,sha256=Pz6lI2yZEZFcb4CvC0TF8rzZCRk7HU5Nwh3aAVUHSiY,3849
|
|
130
|
+
monocle_apptrace/instrumentation/metamodel/mistral/entities/retrieval.py,sha256=M5nc4bPbln0wKxzdXwkZzNBQ2etieBSAoHtbECUbHhg,1327
|
|
127
131
|
monocle_apptrace/instrumentation/metamodel/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
128
132
|
monocle_apptrace/instrumentation/metamodel/openai/_helper.py,sha256=Iy8bsvpMTXEj0Ay60aBZKT1u2h2fxllBCnj0zb7FLHc,14985
|
|
129
133
|
monocle_apptrace/instrumentation/metamodel/openai/methods.py,sha256=jpqZyfiJbzMz1r3W3fwMCGiQsbiDSkhqgADJextGxFQ,1796
|
|
@@ -142,9 +146,8 @@ monocle_apptrace/instrumentation/metamodel/teamsai/entities/__init__.py,sha256=4
|
|
|
142
146
|
monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
143
147
|
monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py,sha256=FRR9iBdDBXfYscP-lkORMNKl_lllflZN6gMlC7m_94w,3206
|
|
144
148
|
monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py,sha256=o9jrBIEqPDg3VfR6zexUCpkq3jlX0lQji8CKLUUK4Wk,3022
|
|
145
|
-
monocle_apptrace-0.
|
|
146
|
-
monocle_apptrace-0.
|
|
147
|
-
monocle_apptrace-0.
|
|
148
|
-
monocle_apptrace-0.
|
|
149
|
-
monocle_apptrace-0.
|
|
150
|
-
monocle_apptrace-0.5.3.dist-info/RECORD,,
|
|
149
|
+
monocle_apptrace-0.6.0.dist-info/METADATA,sha256=MuWDvk_AW8zeODDSnclP5a5iN3qBn_IcJXaGL4wXvp8,4981
|
|
150
|
+
monocle_apptrace-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
151
|
+
monocle_apptrace-0.6.0.dist-info/entry_points.txt,sha256=rxpPKb3klrgZEbSyOFQ2J6KRPO7ri9ES-zmC8Jtikx8,70
|
|
152
|
+
monocle_apptrace-0.6.0.dist-info/licenses/LICENSE,sha256=ay9trLiP5I7ZsFXo6AqtkLYdRqe5S9r-DrPOvsNlZrg,9136
|
|
153
|
+
monocle_apptrace-0.6.0.dist-info/RECORD,,
|