monocle-apptrace 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__init__.py +1 -0
- monocle_apptrace/__main__.py +19 -0
- monocle_apptrace/exporters/aws/s3_exporter.py +50 -27
- monocle_apptrace/exporters/aws/s3_exporter_opendal.py +137 -0
- monocle_apptrace/exporters/azure/blob_exporter.py +30 -12
- monocle_apptrace/exporters/azure/blob_exporter_opendal.py +162 -0
- monocle_apptrace/exporters/base_exporter.py +19 -18
- monocle_apptrace/exporters/exporter_processor.py +128 -3
- monocle_apptrace/exporters/file_exporter.py +16 -0
- monocle_apptrace/exporters/monocle_exporters.py +48 -20
- monocle_apptrace/exporters/okahu/okahu_exporter.py +8 -6
- monocle_apptrace/instrumentation/__init__.py +1 -0
- monocle_apptrace/instrumentation/common/__init__.py +2 -0
- monocle_apptrace/instrumentation/common/constants.py +70 -0
- monocle_apptrace/instrumentation/common/instrumentor.py +362 -0
- monocle_apptrace/instrumentation/common/span_handler.py +220 -0
- monocle_apptrace/instrumentation/common/utils.py +356 -0
- monocle_apptrace/instrumentation/common/wrapper.py +92 -0
- monocle_apptrace/instrumentation/common/wrapper_method.py +72 -0
- monocle_apptrace/instrumentation/metamodel/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +95 -0
- monocle_apptrace/instrumentation/metamodel/botocore/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +65 -0
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +26 -0
- monocle_apptrace/instrumentation/metamodel/botocore/methods.py +16 -0
- monocle_apptrace/instrumentation/metamodel/flask/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +29 -0
- monocle_apptrace/instrumentation/metamodel/flask/methods.py +13 -0
- monocle_apptrace/instrumentation/metamodel/haystack/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +127 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +76 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +61 -0
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +43 -0
- monocle_apptrace/instrumentation/metamodel/langchain/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +72 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +58 -0
- monocle_apptrace/instrumentation/metamodel/langchain/methods.py +111 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +48 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +56 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +14 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +172 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +47 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +73 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +57 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +101 -0
- monocle_apptrace/instrumentation/metamodel/openai/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +112 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +71 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/openai/methods.py +45 -0
- monocle_apptrace/instrumentation/metamodel/requests/__init__.py +4 -0
- monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
- monocle_apptrace/instrumentation/metamodel/requests/methods.py +12 -0
- {monocle_apptrace-0.2.0.dist-info → monocle_apptrace-0.3.0.dist-info}/METADATA +19 -2
- monocle_apptrace-0.3.0.dist-info/RECORD +68 -0
- {monocle_apptrace-0.2.0.dist-info → monocle_apptrace-0.3.0.dist-info}/WHEEL +1 -1
- monocle_apptrace/constants.py +0 -22
- monocle_apptrace/haystack/__init__.py +0 -9
- monocle_apptrace/haystack/wrap_node.py +0 -27
- monocle_apptrace/haystack/wrap_openai.py +0 -44
- monocle_apptrace/haystack/wrap_pipeline.py +0 -63
- monocle_apptrace/instrumentor.py +0 -121
- monocle_apptrace/langchain/__init__.py +0 -9
- monocle_apptrace/llamaindex/__init__.py +0 -16
- monocle_apptrace/metamodel/README.md +0 -47
- monocle_apptrace/metamodel/entities/README.md +0 -77
- monocle_apptrace/metamodel/entities/app_hosting_types.json +0 -29
- monocle_apptrace/metamodel/entities/entities.json +0 -49
- monocle_apptrace/metamodel/entities/inference_types.json +0 -33
- monocle_apptrace/metamodel/entities/model_types.json +0 -41
- monocle_apptrace/metamodel/entities/vector_store_types.json +0 -25
- monocle_apptrace/metamodel/entities/workflow_types.json +0 -22
- monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +0 -35
- monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +0 -35
- monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +0 -27
- monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +0 -27
- monocle_apptrace/metamodel/maps/haystack_methods.json +0 -25
- monocle_apptrace/metamodel/maps/langchain_methods.json +0 -129
- monocle_apptrace/metamodel/maps/llamaindex_methods.json +0 -74
- monocle_apptrace/metamodel/spans/README.md +0 -121
- monocle_apptrace/metamodel/spans/span_example.json +0 -140
- monocle_apptrace/metamodel/spans/span_format.json +0 -55
- monocle_apptrace/metamodel/spans/span_types.json +0 -16
- monocle_apptrace/utils.py +0 -172
- monocle_apptrace/wrap_common.py +0 -417
- monocle_apptrace/wrapper.py +0 -26
- monocle_apptrace-0.2.0.dist-info/RECORD +0 -44
- {monocle_apptrace-0.2.0.dist-info → monocle_apptrace-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.2.0.dist-info → monocle_apptrace-0.3.0.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.inference import (
|
|
3
|
+
INFERENCE,
|
|
4
|
+
)
|
|
5
|
+
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.agent import AGENT
|
|
6
|
+
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.retrieval import (
|
|
7
|
+
RETRIEVAL,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
LLAMAINDEX_METHODS = [
|
|
12
|
+
{
|
|
13
|
+
"package": "llama_index.core.indices.base_retriever",
|
|
14
|
+
"object": "BaseRetriever",
|
|
15
|
+
"method": "retrieve",
|
|
16
|
+
"span_name": "llamaindex.retrieve",
|
|
17
|
+
"wrapper_method": task_wrapper,
|
|
18
|
+
"output_processor": RETRIEVAL
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"package": "llama_index.core.indices.base_retriever",
|
|
22
|
+
"object": "BaseRetriever",
|
|
23
|
+
"method": "aretrieve",
|
|
24
|
+
"span_name": "llamaindex.retrieve",
|
|
25
|
+
"wrapper_method": atask_wrapper,
|
|
26
|
+
"output_processor": RETRIEVAL
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"package": "llama_index.core.base.base_query_engine",
|
|
30
|
+
"object": "BaseQueryEngine",
|
|
31
|
+
"method": "query",
|
|
32
|
+
"span_name": "llamaindex.query",
|
|
33
|
+
"wrapper_method": task_wrapper,
|
|
34
|
+
"span_type": "workflow"
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
"package": "llama_index.core.base.base_query_engine",
|
|
38
|
+
"object": "BaseQueryEngine",
|
|
39
|
+
"method": "aquery",
|
|
40
|
+
"span_name": "llamaindex.query",
|
|
41
|
+
"wrapper_method": atask_wrapper,
|
|
42
|
+
"span_type": "workflow"
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"package": "llama_index.core.llms.custom",
|
|
46
|
+
"object": "CustomLLM",
|
|
47
|
+
"method": "chat",
|
|
48
|
+
"span_name": "llamaindex.llmchat",
|
|
49
|
+
"wrapper_method": task_wrapper,
|
|
50
|
+
"output_processor": INFERENCE
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
"package": "llama_index.core.llms.custom",
|
|
54
|
+
"object": "CustomLLM",
|
|
55
|
+
"method": "achat",
|
|
56
|
+
"span_name": "llamaindex.llmchat",
|
|
57
|
+
"wrapper_method": atask_wrapper,
|
|
58
|
+
"output_processor": INFERENCE,
|
|
59
|
+
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
"package": "llama_index.llms.openai.base",
|
|
63
|
+
"object": "OpenAI",
|
|
64
|
+
"method": "chat",
|
|
65
|
+
"span_name": "llamaindex.openai",
|
|
66
|
+
"wrapper_method": task_wrapper,
|
|
67
|
+
"output_processor": INFERENCE
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"package": "llama_index.llms.openai.base",
|
|
71
|
+
"object": "OpenAI",
|
|
72
|
+
"method": "achat",
|
|
73
|
+
"span_name": "llamaindex.openai",
|
|
74
|
+
"wrapper_method": atask_wrapper,
|
|
75
|
+
"output_processor": INFERENCE
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
"package": "llama_index.llms.mistralai.base",
|
|
79
|
+
"object": "MistralAI",
|
|
80
|
+
"method": "chat",
|
|
81
|
+
"span_name": "llamaindex.mistralai",
|
|
82
|
+
"wrapper_method": task_wrapper,
|
|
83
|
+
"output_processor": INFERENCE
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
"package": "llama_index.llms.mistralai.base",
|
|
87
|
+
"object": "MistralAI",
|
|
88
|
+
"method": "achat",
|
|
89
|
+
"span_name": "llamaindex.mistralai",
|
|
90
|
+
"wrapper_method": atask_wrapper,
|
|
91
|
+
"output_processor": INFERENCE
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
"package": "llama_index.core.agent",
|
|
95
|
+
"object": "ReActAgent",
|
|
96
|
+
"method": "chat",
|
|
97
|
+
"span_name": "react.agent",
|
|
98
|
+
"wrapper_method": task_wrapper,
|
|
99
|
+
"output_processor": AGENT
|
|
100
|
+
}
|
|
101
|
+
]
|
|
File without changes
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides utility functions for extracting system, user,
|
|
3
|
+
and assistant messages from various input formats.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
8
|
+
Option,
|
|
9
|
+
get_keys_as_tuple,
|
|
10
|
+
get_nested_value,
|
|
11
|
+
try_option,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def extract_messages(kwargs):
|
|
19
|
+
"""Extract system and user messages"""
|
|
20
|
+
try:
|
|
21
|
+
messages = []
|
|
22
|
+
if 'messages' in kwargs and len(kwargs['messages']) >0:
|
|
23
|
+
for msg in kwargs['messages']:
|
|
24
|
+
if msg.get('content') and msg.get('role'):
|
|
25
|
+
messages.append({msg['role']: msg['content']})
|
|
26
|
+
|
|
27
|
+
return [str(message) for message in messages]
|
|
28
|
+
except Exception as e:
|
|
29
|
+
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
30
|
+
return []
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def extract_assistant_message(response):
|
|
34
|
+
try:
|
|
35
|
+
if response is not None and hasattr(response,"choices") and len(response.choices) >0:
|
|
36
|
+
if hasattr(response.choices[0],"message"):
|
|
37
|
+
return response.choices[0].message.content
|
|
38
|
+
except (IndexError, AttributeError) as e:
|
|
39
|
+
logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
def extract_provider_name(instance):
|
|
43
|
+
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
44
|
+
return provider_url.unwrap_or(None)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def extract_inference_endpoint(instance):
|
|
48
|
+
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
49
|
+
if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
|
|
50
|
+
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
51
|
+
|
|
52
|
+
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
53
|
+
|
|
54
|
+
def resolve_from_alias(my_map, alias):
|
|
55
|
+
"""Find a alias that is not none from list of aliases"""
|
|
56
|
+
|
|
57
|
+
for i in alias:
|
|
58
|
+
if i in my_map.keys():
|
|
59
|
+
return my_map[i]
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def update_input_span_events(kwargs):
|
|
64
|
+
if 'input' in kwargs and isinstance(kwargs['input'], list):
|
|
65
|
+
query = ' '.join(kwargs['input'])
|
|
66
|
+
return query
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def update_output_span_events(results):
|
|
70
|
+
if hasattr(results,'data') and isinstance(results.data, list):
|
|
71
|
+
embeddings = results.data
|
|
72
|
+
embedding_strings = [f"index={e.index}, embedding={e.embedding}" for e in embeddings]
|
|
73
|
+
output = '\n'.join(embedding_strings)
|
|
74
|
+
if len(output) > 100:
|
|
75
|
+
output = output[:100] + "..."
|
|
76
|
+
return output
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def update_span_from_llm_response(response):
|
|
80
|
+
meta_dict = {}
|
|
81
|
+
if response is not None and hasattr(response, "usage"):
|
|
82
|
+
if hasattr(response, "usage") and response.usage is not None:
|
|
83
|
+
token_usage = response.usage
|
|
84
|
+
else:
|
|
85
|
+
response_metadata = response.response_metadata
|
|
86
|
+
token_usage = response_metadata.get("token_usage")
|
|
87
|
+
if token_usage is not None:
|
|
88
|
+
meta_dict.update(
|
|
89
|
+
{"completion_tokens": getattr(response.usage, "completion_tokens", None)})
|
|
90
|
+
meta_dict.update({"prompt_tokens": getattr(response.usage, "prompt_tokens", None)})
|
|
91
|
+
meta_dict.update({"total_tokens": getattr(response.usage, "total_tokens", None)})
|
|
92
|
+
return meta_dict
|
|
93
|
+
|
|
94
|
+
def extract_vector_input(vector_input: dict):
|
|
95
|
+
if 'input' in vector_input:
|
|
96
|
+
return vector_input['input']
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
def extract_vector_output(vector_output):
|
|
100
|
+
try:
|
|
101
|
+
if hasattr(vector_output, 'data') and len(vector_output.data) > 0:
|
|
102
|
+
return vector_output.data[0].embedding
|
|
103
|
+
except Exception as e:
|
|
104
|
+
pass
|
|
105
|
+
return ""
|
|
106
|
+
|
|
107
|
+
def get_inference_type(instance):
|
|
108
|
+
inference_type: Option[str] = try_option(getattr, instance._client, '_api_version')
|
|
109
|
+
if inference_type.unwrap_or(None):
|
|
110
|
+
return 'azure_openai'
|
|
111
|
+
else:
|
|
112
|
+
return 'openai'
|
|
File without changes
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.openai import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
5
|
+
|
|
6
|
+
INFERENCE = {
|
|
7
|
+
"type": "inference",
|
|
8
|
+
"attributes": [
|
|
9
|
+
[
|
|
10
|
+
{
|
|
11
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
12
|
+
"attribute": "type",
|
|
13
|
+
"accessor": lambda arguments: 'inference.' + (_helper.get_inference_type(arguments['instance'])) or 'openai'
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"attribute": "provider_name",
|
|
17
|
+
"accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"attribute": "deployment",
|
|
21
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"attribute": "inference_endpoint",
|
|
25
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base', 'endpoint']) or _helper.extract_inference_endpoint(arguments['instance'])
|
|
26
|
+
}
|
|
27
|
+
],
|
|
28
|
+
[
|
|
29
|
+
{
|
|
30
|
+
"_comment": "LLM Model",
|
|
31
|
+
"attribute": "name",
|
|
32
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"attribute": "type",
|
|
36
|
+
"accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
37
|
+
}
|
|
38
|
+
]
|
|
39
|
+
],
|
|
40
|
+
"events": [
|
|
41
|
+
{"name": "data.input",
|
|
42
|
+
"attributes": [
|
|
43
|
+
|
|
44
|
+
{
|
|
45
|
+
"_comment": "this is instruction and user query to LLM",
|
|
46
|
+
"attribute": "input",
|
|
47
|
+
"accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
|
|
48
|
+
}
|
|
49
|
+
]
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"name": "data.output",
|
|
53
|
+
"attributes": [
|
|
54
|
+
{
|
|
55
|
+
"_comment": "this is result from LLM",
|
|
56
|
+
"attribute": "response",
|
|
57
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
|
|
58
|
+
}
|
|
59
|
+
]
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
"name": "metadata",
|
|
63
|
+
"attributes": [
|
|
64
|
+
{
|
|
65
|
+
"_comment": "this is metadata usage from LLM",
|
|
66
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.openai import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
5
|
+
|
|
6
|
+
RETRIEVAL = {
|
|
7
|
+
"type": "retrieval",
|
|
8
|
+
"attributes": [
|
|
9
|
+
[
|
|
10
|
+
{
|
|
11
|
+
"_comment": "LLM Model",
|
|
12
|
+
"attribute": "name",
|
|
13
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"attribute": "type",
|
|
17
|
+
"accessor": lambda arguments: 'model.embedding.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
18
|
+
}
|
|
19
|
+
]
|
|
20
|
+
],
|
|
21
|
+
"events": [
|
|
22
|
+
{
|
|
23
|
+
"name": "data.input",
|
|
24
|
+
"attributes": [
|
|
25
|
+
{
|
|
26
|
+
"_comment": "this is instruction and user query to LLM",
|
|
27
|
+
"attribute": "input",
|
|
28
|
+
"accessor": lambda arguments: _helper.update_input_span_events(arguments['kwargs'])
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "data.output",
|
|
34
|
+
"attributes": [
|
|
35
|
+
{
|
|
36
|
+
"_comment": "this is result from LLM",
|
|
37
|
+
"attribute": "response",
|
|
38
|
+
"accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
|
|
39
|
+
}
|
|
40
|
+
]
|
|
41
|
+
}
|
|
42
|
+
]
|
|
43
|
+
}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.openai.entities.inference import (
|
|
3
|
+
INFERENCE,
|
|
4
|
+
)
|
|
5
|
+
from monocle_apptrace.instrumentation.metamodel.openai.entities.retrieval import (
|
|
6
|
+
RETRIEVAL,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
OPENAI_METHODS = [
|
|
10
|
+
{
|
|
11
|
+
"package": "openai.resources.chat.completions",
|
|
12
|
+
"object": "Completions",
|
|
13
|
+
"method": "create",
|
|
14
|
+
"wrapper_method": task_wrapper,
|
|
15
|
+
"span_handler": "non_framework_handler",
|
|
16
|
+
"output_processor": INFERENCE
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"package": "openai.resources.chat.completions",
|
|
20
|
+
"object": "AsyncCompletions",
|
|
21
|
+
"method": "create",
|
|
22
|
+
"wrapper_method": atask_wrapper,
|
|
23
|
+
"span_handler": "non_framework_handler",
|
|
24
|
+
"output_processor": INFERENCE
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
"package": "openai.resources.embeddings",
|
|
28
|
+
"object": "Embeddings",
|
|
29
|
+
"method": "create",
|
|
30
|
+
"wrapper_method": task_wrapper,
|
|
31
|
+
"span_name": "openai_embeddings",
|
|
32
|
+
"span_handler": "non_framework_handler",
|
|
33
|
+
"output_processor": RETRIEVAL
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"package": "openai.resources.embeddings",
|
|
37
|
+
"object": "AsyncEmbeddings",
|
|
38
|
+
"method": "create",
|
|
39
|
+
"wrapper_method": atask_wrapper,
|
|
40
|
+
"span_name": "openai_embeddings",
|
|
41
|
+
"span_handler": "non_framework_handler",
|
|
42
|
+
"output_processor": RETRIEVAL
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.requests import allowed_urls
|
|
3
|
+
from opentelemetry.propagate import inject
|
|
4
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
5
|
+
|
|
6
|
+
def request_pre_task_processor(kwargs):
|
|
7
|
+
# add traceparent to the request headers in kwargs
|
|
8
|
+
if 'headers' not in kwargs:
|
|
9
|
+
headers = {}
|
|
10
|
+
else:
|
|
11
|
+
headers = kwargs['headers'].copy()
|
|
12
|
+
inject(headers)
|
|
13
|
+
kwargs['headers'] = headers
|
|
14
|
+
|
|
15
|
+
def request_skip_span(kwargs) -> bool:
|
|
16
|
+
# add traceparent to the request headers in kwargs
|
|
17
|
+
if 'url' in kwargs:
|
|
18
|
+
url:str = kwargs['url']
|
|
19
|
+
for allowed_url in allowed_urls:
|
|
20
|
+
if url.startswith(allowed_url.strip()):
|
|
21
|
+
return False
|
|
22
|
+
return True
|
|
23
|
+
|
|
24
|
+
class RequestSpanHandler(SpanHandler):
|
|
25
|
+
|
|
26
|
+
def pre_task_processing(self, to_wrap, wrapped, instance, args,kwargs, span):
|
|
27
|
+
request_pre_task_processor(kwargs)
|
|
28
|
+
super().pre_task_processing(to_wrap, wrapped, instance, args,kwargs,span)
|
|
29
|
+
|
|
30
|
+
def skip_span(self, to_wrap, wrapped, instance, args, kwargs) -> bool:
|
|
31
|
+
return request_skip_span(kwargs)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
|
|
3
|
+
REQUESTS_METHODS = [
|
|
4
|
+
{
|
|
5
|
+
"package": "requests.sessions",
|
|
6
|
+
"object": "Session",
|
|
7
|
+
"method": "request",
|
|
8
|
+
"span_name": "http_requests",
|
|
9
|
+
"wrapper_method": task_wrapper,
|
|
10
|
+
"span_handler":"request_handler",
|
|
11
|
+
}
|
|
12
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: monocle_apptrace
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: package with monocle genAI tracing
|
|
5
5
|
Project-URL: Homepage, https://github.com/monocle2ai/monocle
|
|
6
6
|
Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
|
|
@@ -20,19 +20,36 @@ Requires-Dist: boto3==1.35.19; extra == 'aws'
|
|
|
20
20
|
Provides-Extra: azure
|
|
21
21
|
Requires-Dist: azure-storage-blob==12.22.0; extra == 'azure'
|
|
22
22
|
Provides-Extra: dev
|
|
23
|
+
Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: boto3==1.34.131; extra == 'dev'
|
|
25
|
+
Requires-Dist: chromadb==0.4.22; extra == 'dev'
|
|
23
26
|
Requires-Dist: datasets==2.20.0; extra == 'dev'
|
|
24
27
|
Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
|
|
28
|
+
Requires-Dist: flask; extra == 'dev'
|
|
29
|
+
Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
|
|
25
30
|
Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
|
|
31
|
+
Requires-Dist: langchain-aws==0.1.10; extra == 'dev'
|
|
26
32
|
Requires-Dist: langchain-chroma==0.1.1; extra == 'dev'
|
|
27
33
|
Requires-Dist: langchain-community==0.2.5; extra == 'dev'
|
|
34
|
+
Requires-Dist: langchain-mistralai==0.1.13; extra == 'dev'
|
|
28
35
|
Requires-Dist: langchain-openai==0.1.8; extra == 'dev'
|
|
29
36
|
Requires-Dist: langchain==0.2.5; extra == 'dev'
|
|
37
|
+
Requires-Dist: langchainhub==0.1.21; extra == 'dev'
|
|
38
|
+
Requires-Dist: langgraph==0.2.68; extra == 'dev'
|
|
30
39
|
Requires-Dist: llama-index-embeddings-huggingface==0.2.0; extra == 'dev'
|
|
40
|
+
Requires-Dist: llama-index-llms-azure-openai==0.1.9; extra == 'dev'
|
|
41
|
+
Requires-Dist: llama-index-llms-mistralai==0.1.20; extra == 'dev'
|
|
31
42
|
Requires-Dist: llama-index-vector-stores-chroma==0.1.9; extra == 'dev'
|
|
43
|
+
Requires-Dist: llama-index-vector-stores-opensearch==0.1.10; extra == 'dev'
|
|
32
44
|
Requires-Dist: llama-index==0.10.30; extra == 'dev'
|
|
45
|
+
Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
|
|
33
46
|
Requires-Dist: numpy==1.26.4; extra == 'dev'
|
|
47
|
+
Requires-Dist: opendal==0.45.14; extra == 'dev'
|
|
48
|
+
Requires-Dist: opensearch-haystack==1.2.0; extra == 'dev'
|
|
49
|
+
Requires-Dist: opentelemetry-instrumentation-flask; extra == 'dev'
|
|
34
50
|
Requires-Dist: parameterized==0.9.0; extra == 'dev'
|
|
35
51
|
Requires-Dist: pytest==8.0.0; extra == 'dev'
|
|
52
|
+
Requires-Dist: requests-aws4auth==1.2.3; extra == 'dev'
|
|
36
53
|
Requires-Dist: sentence-transformers==2.6.1; extra == 'dev'
|
|
37
54
|
Requires-Dist: types-requests==2.31.0.20240106; extra == 'dev'
|
|
38
55
|
Description-Content-Type: text/markdown
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
monocle_apptrace/README.md,sha256=T5NFC01bF8VR0oVnAX_n0bhsEtttwqfTxDNAe5Y_ivE,3765
|
|
2
|
+
monocle_apptrace/__init__.py,sha256=XtoX7gHUSZgkY1nry8IFny8RslPhutZQUuEkqIrBzFQ,30
|
|
3
|
+
monocle_apptrace/__main__.py,sha256=wBwV0fpwIuj9XSorPRP1MpkHHkZPM9Tg-lIFj1nokkU,609
|
|
4
|
+
monocle_apptrace/exporters/base_exporter.py,sha256=Gov_QKp5fonVZ-YdNM2ynoPot7GCaSNmKbCHIP3bDlE,1680
|
|
5
|
+
monocle_apptrace/exporters/exporter_processor.py,sha256=-spCIJ_UfJ0fax_jE-ii3ODQBwtnHZgYIGVNd91Q718,6298
|
|
6
|
+
monocle_apptrace/exporters/file_exporter.py,sha256=BSEYUb9Z_dascR9i_FL_HxnxnxjyxtR_5teoSjIpZQc,3198
|
|
7
|
+
monocle_apptrace/exporters/monocle_exporters.py,sha256=TKULSQDZLIrf76NMhxYfsnG3vV11B1l2liI1wEWGaLo,2759
|
|
8
|
+
monocle_apptrace/exporters/aws/s3_exporter.py,sha256=fvUUuukFM6hIliGqP61WXlVMFbxlIQtMgT3iwjUYDTA,8187
|
|
9
|
+
monocle_apptrace/exporters/aws/s3_exporter_opendal.py,sha256=0aEUxdMgJaDUwqjw0DqlCMr8kjl01KgwUt3_RRCVFds,5917
|
|
10
|
+
monocle_apptrace/exporters/azure/blob_exporter.py,sha256=75G8rcISQ0sZCECN2G67-DGFkJGGu2clNyrcoxEm9H8,7371
|
|
11
|
+
monocle_apptrace/exporters/azure/blob_exporter_opendal.py,sha256=wQUtciyFMD28tpWTiP0-kBjUuxy4LuQSo04aMuHwtb4,7140
|
|
12
|
+
monocle_apptrace/exporters/okahu/okahu_exporter.py,sha256=qj7paDHbWbYudH18xanUuxmhOHHlYEVj1kpzK7f2OTY,4601
|
|
13
|
+
monocle_apptrace/instrumentation/__init__.py,sha256=oa412OuokRm9Vf3XlCJLqpZjz9ZcuxAKxnEBvOK7u2M,21
|
|
14
|
+
monocle_apptrace/instrumentation/common/__init__.py,sha256=_YD94HPvDvHcrkt9Ll11BaHNzJ4W56GUJ7GPjp_diyA,223
|
|
15
|
+
monocle_apptrace/instrumentation/common/constants.py,sha256=SnZBXiv4g4h1FwpXU7yomddbrwyAjXQWC1HkE19EQOI,2640
|
|
16
|
+
monocle_apptrace/instrumentation/common/instrumentor.py,sha256=v-ZriWJdHlSOWkwprlwDaxm6kOVKyqehZ3m_kbECm0k,15087
|
|
17
|
+
monocle_apptrace/instrumentation/common/span_handler.py,sha256=WHvLc3TSqsrv62qJ_qclC57QT0bFoTCJ4hc-qe3SOYg,10229
|
|
18
|
+
monocle_apptrace/instrumentation/common/utils.py,sha256=iGxvC8V-2uLbrhFG9u9NKOyHkbd1moIkg6ukujDT88Y,12023
|
|
19
|
+
monocle_apptrace/instrumentation/common/wrapper.py,sha256=FNam-sz5gbTxa0Ym6-xyVhCA5HVAEObKDdQFubasIpU,4474
|
|
20
|
+
monocle_apptrace/instrumentation/common/wrapper_method.py,sha256=7k_rHOzbFRfeW40CMfa78wwyPVfSgcXiyDsgezjDcaA,3188
|
|
21
|
+
monocle_apptrace/instrumentation/metamodel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
|
+
monocle_apptrace/instrumentation/metamodel/botocore/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
|
+
monocle_apptrace/instrumentation/metamodel/botocore/_helper.py,sha256=JIYtaN57OXKO9zPuxMZzDycJbgHgAQaQUkwuCI_SzF8,3744
|
|
24
|
+
monocle_apptrace/instrumentation/metamodel/botocore/methods.py,sha256=LzmjbZjDWy7Uozc0chNjWG6PZhLngh_KJe5L6rw5rqI,452
|
|
25
|
+
monocle_apptrace/instrumentation/metamodel/botocore/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
+
monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py,sha256=JfTRmrxgU6e-b3dBbunWt5ObY_Ry_ZBYJBwKJB5UlJ8,2255
|
|
27
|
+
monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py,sha256=Vfbx4g7P3_9iXXCySuqc2FOU_CTP-OZy7PHc7D2qOls,1419
|
|
28
|
+
monocle_apptrace/instrumentation/metamodel/flask/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
29
|
+
monocle_apptrace/instrumentation/metamodel/flask/_helper.py,sha256=AcQ5F6_IDmu9PXaeKKeiGIyq2I2YzA7wu1cvLzR-uyU,1175
|
|
30
|
+
monocle_apptrace/instrumentation/metamodel/flask/methods.py,sha256=QkWHX4wKQf_GiJBHmiS9_JD2CiKMTCWMcig2dxAiKgU,340
|
|
31
|
+
monocle_apptrace/instrumentation/metamodel/haystack/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
+
monocle_apptrace/instrumentation/metamodel/haystack/_helper.py,sha256=VgTrKn7rZMcv4OVdVEBI76G-5B0Rux4guiI6Nsso14s,4833
|
|
33
|
+
monocle_apptrace/instrumentation/metamodel/haystack/methods.py,sha256=1XpEfU8-cczTiH2KbxGgSm-27V7xk1j5LxVciWfNuJo,1467
|
|
34
|
+
monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
|
+
monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py,sha256=bCAp8qpw2GGt1RRZcrucOGqP_Z9gkN8iCCQh6Mlf_Z0,3022
|
|
36
|
+
monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py,sha256=nq3lsk2qFxXqwrAHsBt8zrh4ZVGAJABkPtylrjUCCqc,2357
|
|
37
|
+
monocle_apptrace/instrumentation/metamodel/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
|
+
monocle_apptrace/instrumentation/metamodel/langchain/_helper.py,sha256=g88Hz4n25ALJnjYFhdbdoIlSFUJUkN-8gho8ru7txEQ,4910
|
|
39
|
+
monocle_apptrace/instrumentation/metamodel/langchain/methods.py,sha256=hlLR43KXwiwYshvgoBrlqMOemFifhpgeR7smTb4zkCc,3225
|
|
40
|
+
monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
|
+
monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py,sha256=2CNHloheb4LG7rPEUIF3E3M1cuc8CWVZf9J6l_hvK1E,2764
|
|
42
|
+
monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py,sha256=r4UqTCT5vOfkbz9lwoTRoiMkUUJtPMwqOYbqo53A6K8,2039
|
|
43
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py,sha256=-XmAbhkgqwaunFZa-BP0zWZ3e-uD-ihSszbn5Cz75yc,2043
|
|
45
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/methods.py,sha256=gnrKhcEPoy_qjyZWEkKZAUGTjRHvE3rqm3b4hQZoWMQ,453
|
|
46
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
|
+
monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py,sha256=OaPeQ8pkyEP5j6ad537MTPp0BdDI7nabxf60u66Dzbk,1659
|
|
48
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py,sha256=5nqG-bSW3-ZEADZcwlHXIhhGZoTZu2a5Sc3Lo_AByeo,6199
|
|
50
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py,sha256=3Lr7C3GPQMScLX7gQTrPxU7hs8TTIYFTXApAGyB2yjU,3137
|
|
51
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py,sha256=g7IEwFMLjYvxljX7iHoYSPJW6k-wC7Z3i_y2qlNEZcs,1338
|
|
53
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py,sha256=Hich1AoEHnCUvh0MIISNOjbH9t71eex_IsY_4j3JN5U,2727
|
|
54
|
+
monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py,sha256=QBF1nrqog5KHh925jiY2V-kejL6iVLKUowZmqUDoiJ4,1870
|
|
55
|
+
monocle_apptrace/instrumentation/metamodel/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
56
|
+
monocle_apptrace/instrumentation/metamodel/openai/_helper.py,sha256=VDjpKRXXbOTma3clD54SYG1TaMXr-To0S3yotp7_9aY,3877
|
|
57
|
+
monocle_apptrace/instrumentation/metamodel/openai/methods.py,sha256=bQ0cW_9Ry5bKKsYGzatys-R6wBW3kpYha5QX328AWLM,1420
|
|
58
|
+
monocle_apptrace/instrumentation/metamodel/openai/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
|
+
monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py,sha256=Egpx7ROZvwH6E3hqDWXa1gCXiNijnH3LD0HqQWhfspg,2716
|
|
60
|
+
monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py,sha256=LU7aec302ZqPrs9MzFWU-JTnhK8OpYfgQKMmktlD6-8,1457
|
|
61
|
+
monocle_apptrace/instrumentation/metamodel/requests/__init__.py,sha256=mg04UgoPzzcH-cPOahYUqN9T-TolZyOZipnBwDg5TP8,250
|
|
62
|
+
monocle_apptrace/instrumentation/metamodel/requests/_helper.py,sha256=lKU7py-M0eweHA_LWatwdyWbSGSlQNhScGZ43Xko7us,1115
|
|
63
|
+
monocle_apptrace/instrumentation/metamodel/requests/methods.py,sha256=OJtosy_07xy01o5Qv-53--aCLQLkr82NZtyi2t6ZDEM,326
|
|
64
|
+
monocle_apptrace-0.3.0.dist-info/METADATA,sha256=iRg3IAP7fZNrAfUCB18xVIz2732dDorR7O-JzngPXKM,6312
|
|
65
|
+
monocle_apptrace-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
66
|
+
monocle_apptrace-0.3.0.dist-info/licenses/LICENSE,sha256=ay9trLiP5I7ZsFXo6AqtkLYdRqe5S9r-DrPOvsNlZrg,9136
|
|
67
|
+
monocle_apptrace-0.3.0.dist-info/licenses/NOTICE,sha256=9jn4xtwM_uUetJMx5WqGnhrR7MIhpoRlpokjSTlyt8c,112
|
|
68
|
+
monocle_apptrace-0.3.0.dist-info/RECORD,,
|
monocle_apptrace/constants.py
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
# Azure environment constants
|
|
2
|
-
AZURE_ML_ENDPOINT_ENV_NAME = "AZUREML_ENTRY_SCRIPT"
|
|
3
|
-
AZURE_FUNCTION_WORKER_ENV_NAME = "FUNCTIONS_WORKER_RUNTIME"
|
|
4
|
-
AZURE_APP_SERVICE_ENV_NAME = "WEBSITE_SITE_NAME"
|
|
5
|
-
AWS_LAMBDA_ENV_NAME = "AWS_LAMBDA_RUNTIME_API"
|
|
6
|
-
|
|
7
|
-
# Azure naming reference can be found here
|
|
8
|
-
# https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/ready/azure-best-practices/resource-abbreviations
|
|
9
|
-
AZURE_FUNCTION_NAME = "azure.func"
|
|
10
|
-
AZURE_APP_SERVICE_NAME = "azure.asp"
|
|
11
|
-
AZURE_ML_SERVICE_NAME = "azure.mlw"
|
|
12
|
-
AWS_LAMBDA_SERVICE_NAME = "aws.lambda"
|
|
13
|
-
|
|
14
|
-
azure_service_map = {
|
|
15
|
-
AZURE_ML_ENDPOINT_ENV_NAME: AZURE_ML_SERVICE_NAME,
|
|
16
|
-
AZURE_APP_SERVICE_ENV_NAME: AZURE_APP_SERVICE_NAME,
|
|
17
|
-
AZURE_FUNCTION_WORKER_ENV_NAME: AZURE_FUNCTION_NAME
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
aws_service_map = {
|
|
21
|
-
AWS_LAMBDA_ENV_NAME: AWS_LAMBDA_SERVICE_NAME
|
|
22
|
-
}
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import logging
|
|
3
|
-
from monocle_apptrace.utils import get_wrapper_methods_config
|
|
4
|
-
|
|
5
|
-
logger = logging.getLogger(__name__)
|
|
6
|
-
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
|
7
|
-
HAYSTACK_METHODS = get_wrapper_methods_config(
|
|
8
|
-
wrapper_methods_config_path=os.path.join(parent_dir, 'metamodel', 'maps', 'haystack_methods.json'),
|
|
9
|
-
attributes_config_base_path=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
from opentelemetry import context as context_api
|
|
5
|
-
from opentelemetry.context import attach, set_value
|
|
6
|
-
from opentelemetry.instrumentation.utils import (
|
|
7
|
-
_SUPPRESS_INSTRUMENTATION_KEY,
|
|
8
|
-
)
|
|
9
|
-
from monocle_apptrace.wrap_common import WORKFLOW_TYPE_MAP, with_tracer_wrapper
|
|
10
|
-
|
|
11
|
-
logger = logging.getLogger(__name__)
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@with_tracer_wrapper
|
|
15
|
-
def wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
16
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
17
|
-
return wrapped(*args, **kwargs)
|
|
18
|
-
name = instance.name
|
|
19
|
-
attach(set_value("workflow_name", name))
|
|
20
|
-
with tracer.start_as_current_span(f"{name}.task") as span:
|
|
21
|
-
workflow_name = span.resource.attributes.get("service.name")
|
|
22
|
-
span.set_attribute("workflow_name",workflow_name)
|
|
23
|
-
span.set_attribute("workflow_type", WORKFLOW_TYPE_MAP["haystack"])
|
|
24
|
-
|
|
25
|
-
response = wrapped(*args, **kwargs)
|
|
26
|
-
|
|
27
|
-
return response
|