monocle-apptrace 0.5.2__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (47) hide show
  1. monocle_apptrace/exporters/file_exporter.py +15 -2
  2. monocle_apptrace/instrumentation/common/instrumentor.py +1 -1
  3. monocle_apptrace/instrumentation/common/span_handler.py +8 -4
  4. monocle_apptrace/instrumentation/common/utils.py +10 -2
  5. monocle_apptrace/instrumentation/common/wrapper_method.py +5 -1
  6. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +6 -4
  7. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +12 -2
  8. monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +8 -3
  9. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +5 -5
  10. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +22 -5
  11. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +22 -7
  12. monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +14 -3
  13. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +21 -11
  14. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +7 -2
  15. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +19 -6
  16. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +6 -2
  17. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +19 -19
  18. monocle_apptrace/instrumentation/metamodel/finish_types.py +32 -1
  19. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +20 -6
  20. monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +7 -2
  21. monocle_apptrace/instrumentation/metamodel/hugging_face/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/hugging_face/_helper.py +138 -0
  23. monocle_apptrace/instrumentation/metamodel/hugging_face/entities/__init__.py +0 -0
  24. monocle_apptrace/instrumentation/metamodel/hugging_face/entities/inference.py +97 -0
  25. monocle_apptrace/instrumentation/metamodel/hugging_face/methods.py +23 -0
  26. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +25 -14
  27. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +7 -2
  28. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +4 -2
  29. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +8 -3
  30. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +1 -1
  31. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +6 -5
  32. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +5 -0
  33. monocle_apptrace/instrumentation/metamodel/mistral/__init__.py +0 -0
  34. monocle_apptrace/instrumentation/metamodel/mistral/_helper.py +223 -0
  35. monocle_apptrace/instrumentation/metamodel/mistral/entities/__init__.py +0 -0
  36. monocle_apptrace/instrumentation/metamodel/mistral/entities/inference.py +94 -0
  37. monocle_apptrace/instrumentation/metamodel/mistral/entities/retrieval.py +41 -0
  38. monocle_apptrace/instrumentation/metamodel/mistral/methods.py +58 -0
  39. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +2 -2
  40. {monocle_apptrace-0.5.2.dist-info → monocle_apptrace-0.6.0.dist-info}/METADATA +9 -76
  41. {monocle_apptrace-0.5.2.dist-info → monocle_apptrace-0.6.0.dist-info}/RECORD +44 -36
  42. monocle_apptrace/README.md +0 -101
  43. monocle_apptrace/mcp_server.py +0 -94
  44. monocle_apptrace-0.5.2.dist-info/licenses/NOTICE +0 -4
  45. {monocle_apptrace-0.5.2.dist-info → monocle_apptrace-0.6.0.dist-info}/WHEEL +0 -0
  46. {monocle_apptrace-0.5.2.dist-info → monocle_apptrace-0.6.0.dist-info}/entry_points.txt +0 -0
  47. {monocle_apptrace-0.5.2.dist-info → monocle_apptrace-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,223 @@
1
+ """
2
+ This module provides utility functions for extracting system, user,
3
+ and assistant messages from various input formats.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from opentelemetry.context import get_value
9
+ from monocle_apptrace.instrumentation.common.utils import (
10
+ Option,
11
+ get_json_dumps,
12
+ get_keys_as_tuple,
13
+ get_nested_value,
14
+ get_status_code,
15
+ try_option,
16
+ get_exception_message,
17
+ )
18
+ from monocle_apptrace.instrumentation.metamodel.finish_types import map_mistral_finish_reason_to_finish_type
19
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, INFERENCE_AGENT_DELEGATION, INFERENCE_TURN_END, INFERENCE_TOOL_CALL
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def extract_provider_name(instance):
25
+ provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
26
+ return provider_url.unwrap_or(None)
27
+
28
+ def update_input_span_events(kwargs):
29
+ """Extract embedding input for spans"""
30
+ if "inputs" in kwargs and isinstance(kwargs["inputs"], list):
31
+ # Join multiple strings into one
32
+ return " | ".join(kwargs["inputs"])
33
+ elif "inputs" in kwargs and isinstance(kwargs["inputs"], str):
34
+ return kwargs["inputs"]
35
+ return ""
36
+
37
+ def update_output_span_events(results):
38
+ """Extract embedding output for spans"""
39
+ try:
40
+ if hasattr(results, "data") and isinstance(results.data, list):
41
+ embeddings = results.data
42
+ # just return the indices, not full vectors
43
+ embedding_summaries = [
44
+ f"index={e.index}, dim={len(e.embedding)}"
45
+ for e in embeddings
46
+ ]
47
+ output = "\n".join(embedding_summaries)
48
+ if len(output) > 200:
49
+ output = output[:200] + "..."
50
+ return output
51
+ except Exception as e:
52
+ logger.warning("Error in update_output_span_events: %s", str(e))
53
+ return ""
54
+
55
+ def extract_inference_endpoint(instance):
56
+ inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
57
+ if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
58
+ inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
59
+
60
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
61
+
62
+
63
+ def dummy_method(arguments):
64
+ pass
65
+
66
+
67
+ def extract_messages(kwargs):
68
+ """Extract system and user messages"""
69
+ try:
70
+ messages = []
71
+ if "system" in kwargs and isinstance(kwargs["system"], str):
72
+ messages.append({"system": kwargs["system"]})
73
+ if 'messages' in kwargs and kwargs['messages']:
74
+ for msg in kwargs['messages']:
75
+ if msg.get('content') and msg.get('role'):
76
+ messages.append({msg['role']: msg['content']})
77
+ return [get_json_dumps(message) for message in messages]
78
+ except Exception as e:
79
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
80
+ return []
81
+
82
+
83
+ def get_exception_status_code(arguments):
84
+ exc = arguments.get("exception")
85
+ if exc is not None and hasattr(exc, "status_code"):
86
+ if exc.status_code == 401:
87
+ return "unauthorized"
88
+ elif exc.status_code == 403:
89
+ return "forbidden"
90
+ elif exc.status_code == 404:
91
+ return "not_found"
92
+ else:
93
+ return str(exc.status_code)
94
+ elif exc is not None:
95
+ return "error"
96
+ else:
97
+ return "success"
98
+
99
+
100
+ def extract_assistant_message(arguments):
101
+ """
102
+ Extract the assistant message from a Mistral response or stream chunks.
103
+ Returns a JSON string like {"assistant": "<text>"}.
104
+ """
105
+ try:
106
+ result = arguments.get("result") if isinstance(arguments, dict) else arguments
107
+ if result is None:
108
+ return ""
109
+
110
+ # Handle full response
111
+ if hasattr(result, "choices") and result.choices:
112
+ msg_obj = result.choices[0].message
113
+ return get_json_dumps({msg_obj.role: msg_obj.content})
114
+
115
+ # Handle streaming: result might be a list of CompletionEvent chunks
116
+ if isinstance(result, list):
117
+ content = []
118
+ for chunk in result:
119
+ if hasattr(chunk, "data") and hasattr(chunk.data, "choices") and chunk.data.choices:
120
+ choice = chunk.data.choices[0]
121
+ if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
122
+ content.append(choice.delta.content or "")
123
+ return get_json_dumps({"assistant": "".join(content)})
124
+
125
+ return ""
126
+
127
+ except Exception as e:
128
+ logger.warning("Warning in extract_assistant_message: %s", str(e))
129
+ return ""
130
+
131
+
132
+ '''def update_span_from_llm_response(response):
133
+ meta_dict = {}
134
+ if response is not None and hasattr(response, "usage"):
135
+ token_usage = getattr(response, "usage", None) or getattr(response, "response_metadata", {}).get("token_usage")
136
+ if token_usage is not None:
137
+ meta_dict.update({"completion_tokens": getattr(response.usage, "output_tokens", 0)})
138
+ meta_dict.update({"prompt_tokens": getattr(response.usage, "input_tokens", 0)})
139
+ meta_dict.update({"total_tokens": getattr(response.usage, "input_tokens", 0) + getattr(response.usage, "output_tokens", 0)})
140
+ return meta_dict'''
141
+
142
+ def update_span_from_llm_response(result, include_token_counts=False):
143
+ tokens = {
144
+ "completion_tokens": getattr(result, "completion_tokens", 0),
145
+ "prompt_tokens": getattr(result, "prompt_tokens", 0),
146
+ "total_tokens": getattr(result, "total_tokens", 0),
147
+ } if include_token_counts else {}
148
+ # Add other metadata fields like finish_reason, etc.
149
+ return {**tokens, "inference_sub_type": "turn_end"}
150
+
151
+
152
+ def extract_finish_reason(arguments):
153
+ """
154
+ Extract stop_reason from a Mistral response or stream chunks.
155
+ Works for both streaming (list of chunks) and full responses.
156
+ """
157
+ try:
158
+ response = arguments.get("result") if isinstance(arguments, dict) else arguments
159
+ if response is None:
160
+ return None
161
+
162
+ # Handle full response: single object with stop_reason
163
+ if hasattr(response, "stop_reason") and response.stop_reason:
164
+ return response.stop_reason
165
+
166
+ # Handle streaming: list of chunks, last chunk may have finish_reason
167
+ if isinstance(response, list):
168
+ for chunk in reversed(response):
169
+ if hasattr(chunk, "data") and hasattr(chunk.data, "choices") and chunk.data.choices:
170
+ fr = getattr(chunk.data.choices[0], "finish_reason", None)
171
+ if fr is not None:
172
+ return fr
173
+
174
+ except Exception as e:
175
+ logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
176
+ return None
177
+
178
+ return None
179
+
180
+
181
+ def map_finish_reason_to_finish_type(finish_reason):
182
+ """Map Mistral stop_reason to finish_type, similar to OpenAI mapping."""
183
+ return map_mistral_finish_reason_to_finish_type(finish_reason)
184
+
185
+
186
+ def agent_inference_type(arguments):
187
+ """Extract agent inference type from Mistral response"""
188
+ try:
189
+ status = get_status_code(arguments)
190
+ if status in ('success', 'completed'):
191
+ response = arguments.get("result")
192
+ if response is None:
193
+ return INFERENCE_TURN_END
194
+
195
+ # Check if stop_reason indicates tool use
196
+ stop_reason = getattr(response, "stop_reason", None)
197
+ if stop_reason == "tool_use" and hasattr(response, "content") and response.content:
198
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
199
+ for content_block in response.content:
200
+ if getattr(content_block, "type", None) == "tool_use" and hasattr(content_block, "name"):
201
+ if agent_prefix and content_block.name.startswith(agent_prefix):
202
+ return INFERENCE_AGENT_DELEGATION
203
+ return INFERENCE_TOOL_CALL
204
+
205
+ # Fallback: check the extracted message for tool content
206
+ assistant_message = extract_assistant_message(arguments)
207
+ if assistant_message:
208
+ try:
209
+ message = json.loads(assistant_message)
210
+ assistant_content = message.get("assistant", "") if isinstance(message, dict) else ""
211
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
212
+ if agent_prefix and agent_prefix in assistant_content:
213
+ return INFERENCE_AGENT_DELEGATION
214
+ except (json.JSONDecodeError, TypeError):
215
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
216
+ if agent_prefix and agent_prefix in assistant_message:
217
+ return INFERENCE_AGENT_DELEGATION
218
+
219
+ return INFERENCE_TURN_END
220
+
221
+ except Exception as e:
222
+ logger.warning("Warning: Error occurred in agent_inference_type: %s", str(e))
223
+ return INFERENCE_TURN_END
@@ -0,0 +1,94 @@
1
+ from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
2
+ from monocle_apptrace.instrumentation.metamodel.mistral import _helper
3
+ from monocle_apptrace.instrumentation.common.utils import get_error_message, resolve_from_alias
4
+
5
+ MISTRAL_INFERENCE = {
6
+ "type": SPAN_TYPES.INFERENCE,
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "provider type ,name , deployment , inference_endpoint",
11
+ "attribute": "type",
12
+ "accessor": lambda arguments: 'inference.mistral'
13
+ },
14
+ {
15
+ "attribute": "provider_name",
16
+ "accessor": lambda arguments: "mistral"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": lambda arguments: "https://api.mistral.ai"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
32
+ }
33
+ ]
34
+ ],
35
+ "events": [
36
+ {
37
+ "name": "data.input",
38
+ "attributes": [
39
+ {
40
+ "_comment": "this is instruction and user query to LLM",
41
+ "attribute": "input",
42
+ "accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "name": "data.output",
48
+ "attributes": [
49
+ {
50
+ "attribute": "error_code",
51
+ "accessor": lambda arguments: get_error_message(arguments)
52
+ },
53
+ {
54
+ "_comment": "this is result from LLM, works for streaming and non-streaming",
55
+ "attribute": "response",
56
+ "accessor": lambda arguments: (
57
+ # Handle streaming: combine chunks if result is iterable and doesn't have 'choices'
58
+ _helper.extract_assistant_message(
59
+ {"result": list(arguments["result"])}
60
+ if hasattr(arguments.get("result"), "__iter__") and not hasattr(arguments.get("result"), "choices")
61
+ else arguments
62
+ )
63
+ )
64
+ }
65
+ ]
66
+ },
67
+ {
68
+ "name": "metadata",
69
+ "attributes": [
70
+ {
71
+ "_comment": "this is metadata usage from LLM, includes token counts",
72
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(
73
+ arguments.get("result"),
74
+ include_token_counts=True # new flag for streaming handling
75
+ )
76
+ },
77
+ {
78
+ "_comment": "finish reason from Anthropic response",
79
+ "attribute": "finish_reason",
80
+ "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
81
+ },
82
+ {
83
+ "_comment": "finish type mapped from finish reason",
84
+ "attribute": "finish_type",
85
+ "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(_helper.extract_finish_reason(arguments))
86
+ },
87
+ {
88
+ "attribute": "inference_sub_type",
89
+ "accessor": lambda arguments: _helper.agent_inference_type(arguments)
90
+ }
91
+ ]
92
+ }
93
+ ]
94
+ }
@@ -0,0 +1,41 @@
1
+ from monocle_apptrace.instrumentation.metamodel.mistral import _helper
2
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
3
+
4
+ MISTRAL_RETRIEVAL = {
5
+ "type": "embedding",
6
+ "attributes": [
7
+ [
8
+ {
9
+ "_comment": "LLM Model",
10
+ "attribute": "name",
11
+ "accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model'])
12
+ },
13
+ {
14
+ "attribute": "type",
15
+ "accessor": lambda arguments: 'model.embedding.' + resolve_from_alias(arguments['kwargs'], ['model'])
16
+ }
17
+ ]
18
+ ],
19
+ "events": [
20
+ {
21
+ "name": "data.input",
22
+ "attributes": [
23
+ {
24
+ "_comment": "embedding input",
25
+ "attribute": "input",
26
+ "accessor": lambda arguments: _helper.update_input_span_events(arguments["kwargs"])
27
+ }
28
+ ]
29
+ },
30
+ {
31
+ "name": "data.output",
32
+ "attributes": [
33
+ {
34
+ "_comment": "embedding output summary",
35
+ "attribute": "response",
36
+ "accessor": lambda arguments: _helper.update_output_span_events(arguments["result"])
37
+ }
38
+ ]
39
+ }
40
+ ]
41
+ }
@@ -0,0 +1,58 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, atask_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.mistral.entities.inference import MISTRAL_INFERENCE
3
+ from monocle_apptrace.instrumentation.metamodel.mistral.entities.retrieval import MISTRAL_RETRIEVAL
4
+
5
+ MISTRAL_METHODS = [
6
+ {
7
+ "package": "mistralai.chat", # where Chat is defined
8
+ "object": "Chat", # class name
9
+ "method": "complete", # the sync method
10
+ "span_handler": "non_framework_handler",
11
+ "wrapper_method": task_wrapper,
12
+ "output_processor": MISTRAL_INFERENCE
13
+ },
14
+ {
15
+ "package": "mistralai.chat", # where Chat is defined
16
+ "object": "Chat", # class name
17
+ "method": "complete_async", # the async method
18
+ "span_handler": "non_framework_handler",
19
+ "wrapper_method": atask_wrapper,
20
+ "output_processor": MISTRAL_INFERENCE
21
+ },
22
+ {
23
+ "package": "mistralai.chat",
24
+ "object": "Chat",
25
+ "method": "stream", # sync streaming
26
+ "span_handler": "non_framework_handler",
27
+ "wrapper_method": task_wrapper,
28
+ "output_processor": MISTRAL_INFERENCE,
29
+ },
30
+ {
31
+ "package": "mistralai.chat",
32
+ "object": "Chat",
33
+ "method": "stream_async", # async streaming
34
+ "span_handler": "non_framework_handler",
35
+ "wrapper_method": atask_wrapper,
36
+ "output_processor": MISTRAL_INFERENCE,
37
+ },
38
+ {
39
+ "package": "mistralai.embeddings", # where Embeddings is defined
40
+ "object": "Embeddings", # sync embeddings client
41
+ "method": "create", # sync create
42
+ "span_handler": "non_framework_handler",
43
+ "wrapper_method": task_wrapper,
44
+ "output_processor": MISTRAL_RETRIEVAL
45
+ },
46
+ {
47
+ "package": "mistralai.embeddings", # where Embeddings is defined
48
+ "object": "AsyncEmbeddings", # async embeddings client
49
+ "method": "create", # async create
50
+ "span_handler": "non_framework_handler",
51
+ "wrapper_method": atask_wrapper,
52
+ "output_processor": MISTRAL_RETRIEVAL
53
+ }
54
+ ]
55
+
56
+
57
+
58
+
@@ -100,7 +100,7 @@ def status_check(arguments):
100
100
  if hasattr(arguments["result"], "error") and arguments["result"].error is not None:
101
101
  error_msg:str = arguments["result"].error
102
102
  error_code:str = arguments["result"].status if hasattr(arguments["result"], "status") else "unknown"
103
- raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
103
+ raise MonocleSpanException(f"Error: {error_code} - {error_msg}", error_code)
104
104
 
105
105
  def get_prompt_template(arguments):
106
106
  pass
@@ -152,7 +152,7 @@ def extract_status_code(arguments):
152
152
  def check_status(arguments):
153
153
  status = get_status_code(arguments)
154
154
  if status != 'success' and arguments['exception'] is None:
155
- raise MonocleSpanException(f"{status}")
155
+ raise MonocleSpanException(f"{status}", status)
156
156
 
157
157
  def map_finish_reason_to_finish_type(finish_reason):
158
158
  """Map TeamsAI finish_reason to standardized finish_type."""
@@ -1,31 +1,30 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: monocle_apptrace
3
- Version: 0.5.2
3
+ Version: 0.6.0
4
4
  Summary: package with monocle genAI tracing
5
5
  Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
6
  Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
7
7
  Author-email: "Okahu Inc." <okahu-pypi@okahu.ai>
8
8
  License: Apache-2.0
9
9
  License-File: LICENSE
10
- License-File: NOTICE
11
10
  Classifier: License :: OSI Approved :: Apache Software License
12
11
  Classifier: Operating System :: OS Independent
13
12
  Classifier: Programming Language :: Python :: 3
14
13
  Requires-Python: >=3.8
15
- Requires-Dist: click==8.2.1
16
- Requires-Dist: mcp>=1.13.1
17
14
  Requires-Dist: opentelemetry-api>=1.21.0
18
15
  Requires-Dist: opentelemetry-instrumentation
19
16
  Requires-Dist: opentelemetry-sdk>=1.21.0
20
- Requires-Dist: pydantic>=2.11.7
21
17
  Requires-Dist: requests
22
18
  Requires-Dist: wrapt>=1.14.0
19
+ Provides-Extra: ai-test
20
+ Requires-Dist: bert-score; extra == 'ai-test'
21
+ Requires-Dist: transformers; extra == 'ai-test'
23
22
  Provides-Extra: aws
24
23
  Requires-Dist: boto3==1.37.24; extra == 'aws'
25
24
  Provides-Extra: azure
26
25
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
27
26
  Provides-Extra: dev
28
- Requires-Dist: a2a-sdk==0.2.8; extra == 'dev'
27
+ Requires-Dist: a2a-sdk==0.3.6; extra == 'dev'
29
28
  Requires-Dist: anthropic-haystack; extra == 'dev'
30
29
  Requires-Dist: anthropic==0.57.1; extra == 'dev'
31
30
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
@@ -40,12 +39,13 @@ Requires-Dist: google-adk==1.10.0; extra == 'dev'
40
39
  Requires-Dist: google-generativeai==0.8.5; extra == 'dev'
41
40
  Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
42
41
  Requires-Dist: httpx==0.28.1; extra == 'dev'
42
+ Requires-Dist: huggingface-hub==0.35.3; extra == 'dev'
43
43
  Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
44
44
  Requires-Dist: langchain-anthropic==0.3.13; extra == 'dev'
45
45
  Requires-Dist: langchain-aws==0.2.23; extra == 'dev'
46
46
  Requires-Dist: langchain-chroma==0.2.4; extra == 'dev'
47
47
  Requires-Dist: langchain-community==0.3.24; extra == 'dev'
48
- Requires-Dist: langchain-google-genai==2.1.8; extra == 'dev'
48
+ Requires-Dist: langchain-google-genai==2.0.10; extra == 'dev'
49
49
  Requires-Dist: langchain-mcp-adapters==0.1.8; extra == 'dev'
50
50
  Requires-Dist: langchain-mistralai==0.2.10; extra == 'dev'
51
51
  Requires-Dist: langchain-openai==0.3.18; extra == 'dev'
@@ -64,6 +64,7 @@ Requires-Dist: llama-index-vector-stores-opensearch==0.6.0; extra == 'dev'
64
64
  Requires-Dist: llama-index==0.13.0; extra == 'dev'
65
65
  Requires-Dist: mcp==1.12.1; extra == 'dev'
66
66
  Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
67
+ Requires-Dist: mistralai==1.9.9; extra == 'dev'
67
68
  Requires-Dist: numpy==1.26.4; extra == 'dev'
68
69
  Requires-Dist: openai-agents==0.2.6; extra == 'dev'
69
70
  Requires-Dist: opendal==0.45.14; extra == 'dev'
@@ -80,42 +81,12 @@ Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'
80
81
  Requires-Dist: uvicorn==0.35.0; extra == 'dev'
81
82
  Description-Content-Type: text/markdown
82
83
 
83
- # Monocle for tracing GenAI app code
84
+ # Monocle Apptrace
84
85
 
85
86
  **Monocle** helps developers and platform engineers building or managing GenAI apps monitor these in prod by making it easy to instrument their code to capture traces that are compliant with open-source cloud-native observability ecosystem.
86
87
 
87
88
  **Monocle** is a community-driven OSS framework for tracing GenAI app code governed as a [Linux Foundation AI & Data project](https://lfaidata.foundation/projects/monocle/).
88
89
 
89
- ## Why Monocle
90
-
91
- Monocle is built for:
92
- - **app developers** to trace their app code in any environment without lots of custom code decoration
93
- - **platform engineers** to instrument apps in prod through wrapping instead of asking app devs to recode
94
- - **GenAI component providers** to add observability features to their products
95
- - **enterprises** to consume traces from GenAI apps in their existing open-source observability stack
96
-
97
- Benefits:
98
- - Monocle provides an implementation + package, not just a spec
99
- - No expertise in OpenTelemetry spec required
100
- - No bespoke implementation of that spec required
101
- - No last-mile GenAI domain specific code required to instrument your app
102
- - Monocle provides consistency
103
- - Connect traces across app code executions, model inference or data retrievals
104
- - No cleansing of telemetry data across GenAI component providers required
105
- - Works the same in personal lab dev or org cloud prod environments
106
- - Send traces to location that fits your scale, budget and observability stack
107
- - Monocle is fully open source and community driven
108
- - No vendor lock-in
109
- - Implementation is transparent
110
- - You can freely use or customize it to fit your needs
111
-
112
- ## What Monocle provides
113
-
114
- - Easy to [use](#use-monocle) code instrumentation
115
- - OpenTelemetry compatible format for [spans](src/monocle_apptrace/metamodel/spans/span_format.json).
116
- - Community-curated and extensible [metamodel](src/monocle_apptrace/metamodel/README.md) for consisent tracing of GenAI components.
117
- - Export to local and cloud storage
118
-
119
90
  ## Use Monocle
120
91
 
121
92
  - Get the Monocle package
@@ -137,42 +108,4 @@ Benefits:
137
108
  See [Monocle user guide](Monocle_User_Guide.md) for more details.
138
109
 
139
110
 
140
- ## Use Monocle MCP
141
-
142
- First install monocle-apptrace: pip install monocle-apptrace
143
-
144
- Open bash and run the following command to run the monocle mcp server with stdio:
145
- monocle_apptrace
146
-
147
- If you are using VS Code you can add following entry to your .vscode/mcp.json
148
-
149
- ```json
150
- "monocle-mcp-server": {
151
- "type": "stdio",
152
- "command": "uvx",
153
- "args": [
154
- "monocle_apptrace"
155
- ],
156
- "env": {}
157
- }
158
- ```
159
-
160
- ## Roadmap
161
-
162
- Goal of Monocle is to support tracing for apps written in *any language* with *any LLM orchestration or agentic framework* and built using models, vectors, agents or other components served up by *any cloud or model inference provider*.
163
-
164
- Current version supports:
165
- - Language: (🟢) Python , (🔜) [Typescript](https://github.com/monocle2ai/monocle-typescript)
166
- - LLM-frameworks: (🟢) Langchain, (🟢) Llamaindex, (🟢) Haystack, (🔜) Flask
167
- - LLM inference providers: (🟢) OpenAI, (🟢) Azure OpenAI, (🟢) Nvidia Triton, (🔜) AWS Bedrock, (🔜) Google Vertex, (🔜) Azure ML, (🔜) Hugging Face
168
- - Vector stores: (🟢) FAISS, (🔜) OpenSearch, (🔜) Milvus
169
- - Exporter: (🟢) stdout, (🟢) file, (🔜) Azure Blob Storage, (🔜) AWS S3, (🔜) Google Cloud Storage
170
-
171
-
172
- ## Get involved
173
- ### Provide feedback
174
- - Submit issues and enhancements requests via Github issues
175
-
176
- ### Contribute
177
- - Monocle is community based open source project. We welcome your contributions. Please refer to the CONTRIBUTING and CODE_OF_CONDUCT for guidelines. The [contributor's guide](CONTRIBUTING.md) provides technical details of the project.
178
111