monocle-apptrace 0.3.0b2__py3-none-any.whl → 0.3.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/exporters/aws/s3_exporter.py +1 -1
- monocle_apptrace/exporters/aws/s3_exporter_opendal.py +126 -0
- monocle_apptrace/exporters/azure/blob_exporter_opendal.py +147 -0
- monocle_apptrace/exporters/monocle_exporters.py +38 -20
- monocle_apptrace/instrumentation/__init__.py +0 -0
- monocle_apptrace/instrumentation/common/__init__.py +0 -0
- monocle_apptrace/{constants.py → instrumentation/common/constants.py} +13 -0
- monocle_apptrace/instrumentation/common/instrumentor.py +208 -0
- monocle_apptrace/instrumentation/common/span_handler.py +154 -0
- monocle_apptrace/instrumentation/common/utils.py +171 -0
- monocle_apptrace/instrumentation/common/wrapper.py +69 -0
- monocle_apptrace/instrumentation/common/wrapper_method.py +45 -0
- monocle_apptrace/instrumentation/metamodel/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +126 -0
- monocle_apptrace/instrumentation/metamodel/botocore/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +65 -0
- monocle_apptrace/instrumentation/metamodel/botocore/methods.py +16 -0
- monocle_apptrace/instrumentation/metamodel/haystack/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +127 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +76 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +61 -0
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +42 -0
- monocle_apptrace/instrumentation/metamodel/langchain/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +121 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +71 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +58 -0
- monocle_apptrace/instrumentation/metamodel/langchain/methods.py +105 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +154 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +71 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +57 -0
- monocle_apptrace/{metamodel/maps/llamaindex_methods.json → instrumentation/metamodel/llamaindex/methods.py} +28 -31
- {monocle_apptrace-0.3.0b2.dist-info → monocle_apptrace-0.3.0b3.dist-info}/METADATA +14 -1
- monocle_apptrace-0.3.0b3.dist-info/RECORD +48 -0
- monocle_apptrace/botocore/__init__.py +0 -9
- monocle_apptrace/haystack/__init__.py +0 -9
- monocle_apptrace/haystack/wrap_pipeline.py +0 -63
- monocle_apptrace/instrumentor.py +0 -121
- monocle_apptrace/langchain/__init__.py +0 -9
- monocle_apptrace/llamaindex/__init__.py +0 -16
- monocle_apptrace/message_processing.py +0 -80
- monocle_apptrace/metamodel/README.md +0 -47
- monocle_apptrace/metamodel/entities/README.md +0 -77
- monocle_apptrace/metamodel/entities/app_hosting_types.json +0 -29
- monocle_apptrace/metamodel/entities/entities.json +0 -49
- monocle_apptrace/metamodel/entities/inference_types.json +0 -33
- monocle_apptrace/metamodel/entities/model_types.json +0 -41
- monocle_apptrace/metamodel/entities/vector_store_types.json +0 -25
- monocle_apptrace/metamodel/entities/workflow_types.json +0 -22
- monocle_apptrace/metamodel/maps/attributes/inference/botocore_entities.json +0 -27
- monocle_apptrace/metamodel/maps/attributes/inference/haystack_entities.json +0 -57
- monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +0 -57
- monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +0 -57
- monocle_apptrace/metamodel/maps/attributes/retrieval/haystack_entities.json +0 -31
- monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +0 -31
- monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +0 -31
- monocle_apptrace/metamodel/maps/botocore_methods.json +0 -13
- monocle_apptrace/metamodel/maps/haystack_methods.json +0 -45
- monocle_apptrace/metamodel/maps/langchain_methods.json +0 -129
- monocle_apptrace/metamodel/spans/README.md +0 -121
- monocle_apptrace/metamodel/spans/span_example.json +0 -140
- monocle_apptrace/metamodel/spans/span_format.json +0 -55
- monocle_apptrace/metamodel/spans/span_types.json +0 -16
- monocle_apptrace/utils.py +0 -252
- monocle_apptrace/wrap_common.py +0 -511
- monocle_apptrace/wrapper.py +0 -27
- monocle_apptrace-0.3.0b2.dist-info/RECORD +0 -48
- {monocle_apptrace-0.3.0b2.dist-info → monocle_apptrace-0.3.0b3.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.3.0b2.dist-info → monocle_apptrace-0.3.0b3.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.3.0b2.dist-info → monocle_apptrace-0.3.0b3.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"type": "inference",
|
|
3
|
-
"attributes": [
|
|
4
|
-
[
|
|
5
|
-
{
|
|
6
|
-
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
7
|
-
"attribute": "type",
|
|
8
|
-
"accessor": "lambda arguments:'inference.azure_oai'"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"attribute": "provider_name",
|
|
12
|
-
"accessor": "lambda arguments:arguments['kwargs']['provider_name']"
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
"attribute": "deployment",
|
|
16
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
|
|
17
|
-
},
|
|
18
|
-
{
|
|
19
|
-
"attribute": "inference_endpoint",
|
|
20
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
|
|
21
|
-
}
|
|
22
|
-
],
|
|
23
|
-
[
|
|
24
|
-
{
|
|
25
|
-
"_comment": "LLM Model",
|
|
26
|
-
"attribute": "name",
|
|
27
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id"
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
"attribute": "type",
|
|
31
|
-
"accessor": "lambda arguments: 'model.llm.'+ (resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id)"
|
|
32
|
-
}
|
|
33
|
-
]
|
|
34
|
-
],
|
|
35
|
-
"events": [
|
|
36
|
-
{ "name":"data.input",
|
|
37
|
-
"attributes": [
|
|
38
|
-
|
|
39
|
-
{
|
|
40
|
-
"_comment": "this is instruction and user query to LLM",
|
|
41
|
-
"attribute": "input",
|
|
42
|
-
"accessor": "lambda arguments: extract_messages(arguments['args'])"
|
|
43
|
-
}
|
|
44
|
-
]
|
|
45
|
-
},
|
|
46
|
-
{
|
|
47
|
-
"name":"data.output",
|
|
48
|
-
"attributes": [
|
|
49
|
-
{
|
|
50
|
-
"_comment": "this is response from LLM",
|
|
51
|
-
"attribute": "response",
|
|
52
|
-
"accessor": "lambda response: extract_assistant_message(response)"
|
|
53
|
-
}
|
|
54
|
-
]
|
|
55
|
-
}
|
|
56
|
-
]
|
|
57
|
-
}
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"type": "inference",
|
|
3
|
-
"attributes": [
|
|
4
|
-
[
|
|
5
|
-
{
|
|
6
|
-
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
7
|
-
"attribute": "type",
|
|
8
|
-
"accessor": "lambda arguments:'inference.azure_oai'"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"attribute": "provider_name",
|
|
12
|
-
"accessor": "lambda arguments:arguments['kwargs']['provider_name']"
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
"attribute": "deployment",
|
|
16
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
|
|
17
|
-
},
|
|
18
|
-
{
|
|
19
|
-
"attribute": "inference_endpoint",
|
|
20
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
|
|
21
|
-
}
|
|
22
|
-
],
|
|
23
|
-
[
|
|
24
|
-
{
|
|
25
|
-
"_comment": "LLM Model",
|
|
26
|
-
"attribute": "name",
|
|
27
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
"attribute": "type",
|
|
31
|
-
"accessor": "lambda arguments: 'model.llm.'+resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
32
|
-
}
|
|
33
|
-
]
|
|
34
|
-
],
|
|
35
|
-
"events": [
|
|
36
|
-
{ "name":"data.input",
|
|
37
|
-
"attributes": [
|
|
38
|
-
|
|
39
|
-
{
|
|
40
|
-
"_comment": "this is instruction and user query to LLM",
|
|
41
|
-
"attribute": "input",
|
|
42
|
-
"accessor": "lambda arguments: extract_messages(arguments['args'])"
|
|
43
|
-
}
|
|
44
|
-
]
|
|
45
|
-
},
|
|
46
|
-
{
|
|
47
|
-
"name":"data.output",
|
|
48
|
-
"attributes": [
|
|
49
|
-
{
|
|
50
|
-
"_comment": "this is response from LLM",
|
|
51
|
-
"attribute": "response",
|
|
52
|
-
"accessor": "lambda response: extract_assistant_message(response)"
|
|
53
|
-
}
|
|
54
|
-
]
|
|
55
|
-
}
|
|
56
|
-
]
|
|
57
|
-
}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"type": "retrieval",
|
|
3
|
-
"attributes": [
|
|
4
|
-
[
|
|
5
|
-
{
|
|
6
|
-
"_comment": "vector store name and type",
|
|
7
|
-
"attribute": "name",
|
|
8
|
-
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"attribute": "type",
|
|
12
|
-
"accessor": "lambda arguments: 'vectorstore.'+resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
"attribute": "deployment",
|
|
16
|
-
"accessor": "lambda arguments: get_vectorstore_deployment(resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__dict__)"
|
|
17
|
-
}
|
|
18
|
-
],
|
|
19
|
-
[
|
|
20
|
-
{
|
|
21
|
-
"_comment": "embedding model name and type",
|
|
22
|
-
"attribute": "name",
|
|
23
|
-
"accessor": "lambda arguments: get_embedding_model()"
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"attribute": "type",
|
|
27
|
-
"accessor": "lambda arguments: 'model.embedding.'+get_embedding_model()"
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
]
|
|
31
|
-
}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"type": "retrieval",
|
|
3
|
-
"attributes": [
|
|
4
|
-
[
|
|
5
|
-
{
|
|
6
|
-
"_comment": "vector store name and type",
|
|
7
|
-
"attribute": "name",
|
|
8
|
-
"accessor": "lambda arguments: type(arguments['instance'].vectorstore).__name__"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"attribute": "type",
|
|
12
|
-
"accessor": "lambda arguments: 'vectorstore.'+type(arguments['instance'].vectorstore).__name__"
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
"attribute": "deployment",
|
|
16
|
-
"accessor": "lambda arguments: get_vectorstore_deployment(arguments['instance'].vectorstore.__dict__)"
|
|
17
|
-
}
|
|
18
|
-
],
|
|
19
|
-
[
|
|
20
|
-
{
|
|
21
|
-
"_comment": "embedding model name and type",
|
|
22
|
-
"attribute": "name",
|
|
23
|
-
"accessor": "lambda arguments: arguments['instance'].vectorstore.embeddings.model"
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"attribute": "type",
|
|
27
|
-
"accessor": "lambda arguments: 'model.embedding.'+arguments['instance'].vectorstore.embeddings.model"
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
]
|
|
31
|
-
}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"type": "retrieval",
|
|
3
|
-
"attributes": [
|
|
4
|
-
[
|
|
5
|
-
{
|
|
6
|
-
"_comment": "vector store name and type",
|
|
7
|
-
"attribute": "name",
|
|
8
|
-
"accessor": "lambda arguments: type(arguments['instance']._vector_store).__name__"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"attribute": "type",
|
|
12
|
-
"accessor": "lambda arguments: 'vectorstore.'+type(arguments['instance']._vector_store).__name__"
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
"attribute": "deployment",
|
|
16
|
-
"accessor": "lambda arguments: get_vectorstore_deployment(arguments['instance']._vector_store)"
|
|
17
|
-
}
|
|
18
|
-
],
|
|
19
|
-
[
|
|
20
|
-
{
|
|
21
|
-
"_comment": "embedding model name and type",
|
|
22
|
-
"attribute": "name",
|
|
23
|
-
"accessor": "lambda arguments: arguments['instance']._embed_model.model_name"
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"attribute": "type",
|
|
27
|
-
"accessor": "lambda arguments: 'model.embedding.'+arguments['instance']._embed_model.model_name"
|
|
28
|
-
}
|
|
29
|
-
]
|
|
30
|
-
]
|
|
31
|
-
}
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"wrapper_methods": [
|
|
3
|
-
{
|
|
4
|
-
"package": "botocore.client",
|
|
5
|
-
"object": "ClientCreator",
|
|
6
|
-
"method": "create_client",
|
|
7
|
-
"wrapper_package": "wrap_common",
|
|
8
|
-
"wrapper_method": "task_wrapper",
|
|
9
|
-
"skip_span": true,
|
|
10
|
-
"output_processor": ["metamodel/maps/attributes/inference/botocore_entities.json"]
|
|
11
|
-
}
|
|
12
|
-
]
|
|
13
|
-
}
|
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"wrapper_methods" : [
|
|
3
|
-
{
|
|
4
|
-
"package": "haystack_integrations.components.retrievers.opensearch",
|
|
5
|
-
"object": "OpenSearchEmbeddingRetriever",
|
|
6
|
-
"method": "run",
|
|
7
|
-
"span_name": "haystack.retriever",
|
|
8
|
-
"wrapper_package": "wrap_common",
|
|
9
|
-
"wrapper_method": "task_wrapper",
|
|
10
|
-
"output_processor": ["metamodel/maps/attributes/retrieval/haystack_entities.json"]
|
|
11
|
-
},
|
|
12
|
-
{
|
|
13
|
-
"package": "haystack.components.retrievers.in_memory",
|
|
14
|
-
"object": "InMemoryEmbeddingRetriever",
|
|
15
|
-
"method": "run",
|
|
16
|
-
"span_name": "haystack.retriever",
|
|
17
|
-
"wrapper_package": "wrap_common",
|
|
18
|
-
"wrapper_method": "task_wrapper",
|
|
19
|
-
"output_processor": ["metamodel/maps/attributes/retrieval/haystack_entities.json"]
|
|
20
|
-
},
|
|
21
|
-
{
|
|
22
|
-
"package": "haystack.components.generators.openai",
|
|
23
|
-
"object": "OpenAIGenerator",
|
|
24
|
-
"method": "run",
|
|
25
|
-
"wrapper_package": "wrap_common",
|
|
26
|
-
"wrapper_method": "llm_wrapper",
|
|
27
|
-
"output_processor": ["metamodel/maps/attributes/inference/haystack_entities.json"]
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
"package": "haystack.components.generators.chat.openai",
|
|
31
|
-
"object": "OpenAIChatGenerator",
|
|
32
|
-
"method": "run",
|
|
33
|
-
"wrapper_package": "wrap_common",
|
|
34
|
-
"wrapper_method": "llm_wrapper",
|
|
35
|
-
"output_processor": ["metamodel/maps/attributes/inference/haystack_entities.json"]
|
|
36
|
-
},
|
|
37
|
-
{
|
|
38
|
-
"package": "haystack.core.pipeline.pipeline",
|
|
39
|
-
"object": "Pipeline",
|
|
40
|
-
"method": "run",
|
|
41
|
-
"wrapper_package": "wrap_common",
|
|
42
|
-
"wrapper_method": "task_wrapper"
|
|
43
|
-
}
|
|
44
|
-
]
|
|
45
|
-
}
|
|
@@ -1,129 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"wrapper_methods" : [
|
|
3
|
-
{
|
|
4
|
-
"package": "langchain.prompts.base",
|
|
5
|
-
"object": "BasePromptTemplate",
|
|
6
|
-
"method": "invoke",
|
|
7
|
-
"wrapper_package": "wrap_common",
|
|
8
|
-
"wrapper_method": "task_wrapper"
|
|
9
|
-
},
|
|
10
|
-
{
|
|
11
|
-
"package": "langchain.prompts.base",
|
|
12
|
-
"object": "BasePromptTemplate",
|
|
13
|
-
"method": "ainvoke",
|
|
14
|
-
"wrapper_package": "wrap_common",
|
|
15
|
-
"wrapper_method": "atask_wrapper"
|
|
16
|
-
},
|
|
17
|
-
{
|
|
18
|
-
"package": "langchain.chat_models.base",
|
|
19
|
-
"object": "BaseChatModel",
|
|
20
|
-
"method": "invoke",
|
|
21
|
-
"wrapper_package": "wrap_common",
|
|
22
|
-
"wrapper_method": "llm_wrapper",
|
|
23
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"package": "langchain.chat_models.base",
|
|
27
|
-
"object": "BaseChatModel",
|
|
28
|
-
"method": "ainvoke",
|
|
29
|
-
"wrapper_package": "wrap_common",
|
|
30
|
-
"wrapper_method": "allm_wrapper",
|
|
31
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
32
|
-
},
|
|
33
|
-
{
|
|
34
|
-
"package": "langchain_core.language_models.llms",
|
|
35
|
-
"object": "LLM",
|
|
36
|
-
"method": "_generate",
|
|
37
|
-
"wrapper_package": "wrap_common",
|
|
38
|
-
"wrapper_method": "llm_wrapper",
|
|
39
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
40
|
-
},
|
|
41
|
-
{
|
|
42
|
-
"package": "langchain_core.language_models.llms",
|
|
43
|
-
"object": "LLM",
|
|
44
|
-
"method": "_agenerate",
|
|
45
|
-
"wrapper_package": "wrap_common",
|
|
46
|
-
"wrapper_method": "allm_wrapper",
|
|
47
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
"package": "langchain_core.language_models.llms",
|
|
51
|
-
"object": "BaseLLM",
|
|
52
|
-
"method": "invoke",
|
|
53
|
-
"wrapper_package": "wrap_common",
|
|
54
|
-
"wrapper_method": "llm_wrapper",
|
|
55
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
56
|
-
},
|
|
57
|
-
{
|
|
58
|
-
"package": "langchain_core.language_models.llms",
|
|
59
|
-
"object": "BaseLLM",
|
|
60
|
-
"method": "ainvoke",
|
|
61
|
-
"wrapper_package": "wrap_common",
|
|
62
|
-
"wrapper_method": "allm_wrapper",
|
|
63
|
-
"output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
|
|
64
|
-
},
|
|
65
|
-
{
|
|
66
|
-
"package": "langchain_core.retrievers",
|
|
67
|
-
"object": "BaseRetriever",
|
|
68
|
-
"method": "invoke",
|
|
69
|
-
"wrapper_package": "wrap_common",
|
|
70
|
-
"wrapper_method": "task_wrapper",
|
|
71
|
-
"output_processor": ["metamodel/maps/attributes/retrieval/langchain_entities.json"]
|
|
72
|
-
|
|
73
|
-
},
|
|
74
|
-
{
|
|
75
|
-
"package": "langchain_core.retrievers",
|
|
76
|
-
"object": "BaseRetriever",
|
|
77
|
-
"method": "ainvoke",
|
|
78
|
-
"wrapper_package": "wrap_common",
|
|
79
|
-
"wrapper_method": "atask_wrapper",
|
|
80
|
-
"output_processor": ["metamodel/maps/attributes/retrieval/langchain_entities.json"]
|
|
81
|
-
},
|
|
82
|
-
{
|
|
83
|
-
"package": "langchain.schema",
|
|
84
|
-
"object": "BaseOutputParser",
|
|
85
|
-
"method": "invoke",
|
|
86
|
-
"wrapper_package": "wrap_common",
|
|
87
|
-
"wrapper_method": "task_wrapper"
|
|
88
|
-
},
|
|
89
|
-
{
|
|
90
|
-
"package": "langchain.schema",
|
|
91
|
-
"object": "BaseOutputParser",
|
|
92
|
-
"method": "ainvoke",
|
|
93
|
-
"wrapper_package": "wrap_common",
|
|
94
|
-
"wrapper_method": "atask_wrapper"
|
|
95
|
-
},
|
|
96
|
-
{
|
|
97
|
-
"package": "langchain.schema.runnable",
|
|
98
|
-
"object": "RunnableSequence",
|
|
99
|
-
"method": "invoke",
|
|
100
|
-
"span_name": "langchain.workflow",
|
|
101
|
-
"wrapper_package": "wrap_common",
|
|
102
|
-
"wrapper_method": "task_wrapper"
|
|
103
|
-
},
|
|
104
|
-
{
|
|
105
|
-
"package": "langchain.schema.runnable",
|
|
106
|
-
"object": "RunnableSequence",
|
|
107
|
-
"method": "ainvoke",
|
|
108
|
-
"span_name": "langchain.workflow",
|
|
109
|
-
"wrapper_package": "wrap_common",
|
|
110
|
-
"wrapper_method": "atask_wrapper"
|
|
111
|
-
},
|
|
112
|
-
{
|
|
113
|
-
"package": "langchain.schema.runnable",
|
|
114
|
-
"object": "RunnableParallel",
|
|
115
|
-
"method": "invoke",
|
|
116
|
-
"span_name": "langchain.workflow",
|
|
117
|
-
"wrapper_package": "wrap_common",
|
|
118
|
-
"wrapper_method": "task_wrapper"
|
|
119
|
-
},
|
|
120
|
-
{
|
|
121
|
-
"package": "langchain.schema.runnable",
|
|
122
|
-
"object": "RunnableParallel",
|
|
123
|
-
"method": "ainvoke",
|
|
124
|
-
"span_name": "langchain.workflow",
|
|
125
|
-
"wrapper_package": "wrap_common",
|
|
126
|
-
"wrapper_method": "atask_wrapper"
|
|
127
|
-
}
|
|
128
|
-
]
|
|
129
|
-
}
|
|
@@ -1,121 +0,0 @@
|
|
|
1
|
-
# Monocle Span format
|
|
2
|
-
Monocle generates [traces](../../../../Monocle_User_Guide.md#traces) which comprises of [spans](../../../../Monocle_User_Guide.md#spans). Note that Monocle trace is [OpenTelemetry format](https://opentelemetry.io/docs/concepts/signals/traces/) compatible. Each span is essentially a step in the execution that interacts with one of more GenAI technology components. This document explains the [span format](./span_format.json) that Monocle generates for GenAI application tracing.
|
|
3
|
-
|
|
4
|
-
Per the OpenTelemetry convention, each span contains an attribute section and event section. In Monocle generated trace, the attribute sections includes details of GenAI entities used in the span. The event section includes the input, output and metadata related to the execution of that span.
|
|
5
|
-
|
|
6
|
-
## Attributes
|
|
7
|
-
The attribute sections includes details of GenAI entities used in the span. For each entity used in the span in includes the entity name and entity type. For every type of entity, there are required and optional attributes listed below.
|
|
8
|
-
### Json format
|
|
9
|
-
```json
|
|
10
|
-
attributes:
|
|
11
|
-
"span.type": "Monocle-span-type",
|
|
12
|
-
"entity.count": "count-of-entities",
|
|
13
|
-
|
|
14
|
-
"entity.<index>.name": "Monocle-Entity-name",
|
|
15
|
-
"entity.<index>.type": "MonocleEntity.<entity-type>"
|
|
16
|
-
...
|
|
17
|
-
```
|
|
18
|
-
The ```entity.count``` indicates total number of entities used in the given span. For each entity, the details are captured in ```entity.<index>.X```. For example,
|
|
19
|
-
```json
|
|
20
|
-
"attributes": {
|
|
21
|
-
"span.type": "Inference",
|
|
22
|
-
"entity.count": 2,
|
|
23
|
-
"entity.1.name": "AzureOpenAI",
|
|
24
|
-
"entity.1.type": "Inference.Azure_oai",
|
|
25
|
-
"entity.2.name": "gpt-35-turbo",
|
|
26
|
-
"entity.2.type": "Model.LLM",
|
|
27
|
-
"entity.2.model_name": "gpt-35-turbo",
|
|
28
|
-
```
|
|
29
|
-
|
|
30
|
-
### Entity type specific attributes
|
|
31
|
-
#### MonocleEntity.Workflow
|
|
32
|
-
| Name | Description | Values | Required |
|
|
33
|
-
| - | - | - | - |
|
|
34
|
-
| name | Entity name generated by Monocle | Name String | Required |
|
|
35
|
-
| type | Monocle Entity type | MonocleEntity.Workflow | Required |
|
|
36
|
-
| optional-attribute | Additional attribute specific to entity | | Optional |
|
|
37
|
-
|
|
38
|
-
### MonocleEntity.Model
|
|
39
|
-
| Name | Description | Values | Required |
|
|
40
|
-
| - | - | - | - |
|
|
41
|
-
| name | Entity name generated by Monocle | Name String | Required |
|
|
42
|
-
| type | Monocle Entity type | MonocleEntity.Model | Required |
|
|
43
|
-
| model_name | Name of model | String | Required |
|
|
44
|
-
| optional-attribute | Additional attribute specific to entity | | Optional |
|
|
45
|
-
|
|
46
|
-
### MonocleEntity.AppHosting
|
|
47
|
-
| Name | Description | Values | Required |
|
|
48
|
-
| - | - | - | - |
|
|
49
|
-
| name | Entity name generated by Monocle | Name String | Required |
|
|
50
|
-
| type | Monocle Entity type | MonocleEntity.AppHosting | Required |
|
|
51
|
-
| optional-attribute | Additional attribute specific to entity | | Optional |
|
|
52
|
-
|
|
53
|
-
### MonocleEntity.Inference
|
|
54
|
-
| Name | Description | Values | Required |
|
|
55
|
-
| - | - | - | - |
|
|
56
|
-
| name | Entity name generated by Monocle | Name String | Required |
|
|
57
|
-
| type | Monocle Entity type | MonocleEntity.Inference | Required |
|
|
58
|
-
| optional-attribute | Additional attribute specific to entity | | Optional |
|
|
59
|
-
|
|
60
|
-
### MonocleEntity.VectorDB
|
|
61
|
-
| Name | Description | Values | Required |
|
|
62
|
-
| - | - | - | - |
|
|
63
|
-
| name | Entity name generated by Monocle | Name String | Required |
|
|
64
|
-
| type | Monocle Entity type | MonocleEntity.VectorDB | Required |
|
|
65
|
-
| optional-attribute | Additional attribute specific to entity | | Optional |
|
|
66
|
-
|
|
67
|
-
## Events
|
|
68
|
-
The event section includes the input, output and metadata generated by that span execution. For each type of span, there are required and option input, output and metadata items listed below. If there's no data genearated in the space, the events will be an empty array.
|
|
69
|
-
|
|
70
|
-
### Json format
|
|
71
|
-
```json
|
|
72
|
-
"events" : [
|
|
73
|
-
{
|
|
74
|
-
"name": "data.input",
|
|
75
|
-
"timestamp": "UTC timestamp",
|
|
76
|
-
"attributes": {
|
|
77
|
-
"input_attribute": "value"
|
|
78
|
-
}
|
|
79
|
-
},
|
|
80
|
-
{
|
|
81
|
-
"name": "data.output",
|
|
82
|
-
"timestamp": "UTC timestamp",
|
|
83
|
-
"attributes": {
|
|
84
|
-
"output_attribute": "value"
|
|
85
|
-
}
|
|
86
|
-
},
|
|
87
|
-
{
|
|
88
|
-
"name": "metadata",
|
|
89
|
-
"timestamp": "UTC timestamp",
|
|
90
|
-
"attributes": {
|
|
91
|
-
"metadata_attribute": "value"
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
]
|
|
95
|
-
```
|
|
96
|
-
|
|
97
|
-
## Span types and events
|
|
98
|
-
The ```span.type``` captured in ```attributes``` section of the span dectates the format of the ```events```
|
|
99
|
-
### SpanType.Retrieval
|
|
100
|
-
| Name | Description | Values | Required |
|
|
101
|
-
| - | - | - | - |
|
|
102
|
-
| name | event name | data.input or data.output or metadata | Required |
|
|
103
|
-
| timestamp | timestap when the event occurred | UTC timestamp | Required |
|
|
104
|
-
| attributes | input/output/metadata attributes generated in span | Dictionary | Required |
|
|
105
|
-
|
|
106
|
-
### SpanType.Inference
|
|
107
|
-
| Name | Description | Values | Required |
|
|
108
|
-
| - | - | - | - |
|
|
109
|
-
| name | event name | data.input or data.output or metadata | Required |
|
|
110
|
-
| timestamp | timestap when the event occurred | UTC timestamp | Required |
|
|
111
|
-
| attributes | input/output/metadata attributes generated in span | Dictionary | Required |
|
|
112
|
-
|
|
113
|
-
### SpanType.Workflow
|
|
114
|
-
| Name | Description | Values | Required |
|
|
115
|
-
| - | - | - | - |
|
|
116
|
-
| name | event name | data.input or data.output or metadata | Required |
|
|
117
|
-
| timestamp | timestap when the event occurred | UTC timestamp | Required |
|
|
118
|
-
| attributes | input/output/metadata attributes generated in span | Dictionary | Required |
|
|
119
|
-
|
|
120
|
-
### SpanType.Internal
|
|
121
|
-
Events will be empty
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"name": "llamaindex.retrieve",
|
|
3
|
-
"context": {
|
|
4
|
-
"trace_id": "0x93cd0bf865b3ffcc3cf9c075dc3e3797",
|
|
5
|
-
"span_id": "0x5d3f839e900bda24",
|
|
6
|
-
"trace_state": "[]"
|
|
7
|
-
},
|
|
8
|
-
"kind": "SpanKind.CLIENT",
|
|
9
|
-
"parent_id": "0x7a63d63e42ccac60",
|
|
10
|
-
"start_time": "2024-09-09T14:38:45.237182Z",
|
|
11
|
-
"end_time": "2024-09-09T14:38:45.620112Z",
|
|
12
|
-
"status": {
|
|
13
|
-
"status_code": "OK"
|
|
14
|
-
},
|
|
15
|
-
"attributes": {
|
|
16
|
-
"span.type": "retrieval",
|
|
17
|
-
"entity.count": 2,
|
|
18
|
-
"entity.1.name": "ChromaVectorStore",
|
|
19
|
-
"entity.1.type": "vectorstore.chroma",
|
|
20
|
-
"entity.1.embedding-model-name": "BAAI/bge-small-en-v1.5",
|
|
21
|
-
"entity.2.name": "BAAI/bge-small-en-v1.5",
|
|
22
|
-
"entity.2.type": "model.embedding",
|
|
23
|
-
"entity.2.model_name": "BAAI/bge-small-en-v1.5"
|
|
24
|
-
},
|
|
25
|
-
"events": [
|
|
26
|
-
{
|
|
27
|
-
"name": "data.input",
|
|
28
|
-
"timestamp": "timestamp",
|
|
29
|
-
"attributes": {
|
|
30
|
-
"context_input": "question: What is an americano?"
|
|
31
|
-
}
|
|
32
|
-
},
|
|
33
|
-
{
|
|
34
|
-
"name": "data.output",
|
|
35
|
-
"timestamp": "timestamp",
|
|
36
|
-
"attributes": {
|
|
37
|
-
"context_output": "Coffee is a hot drink made from the roasted and ground seeds (coffee beans) of a tropical shrub\nA latte consists of one or more shots of espresso, served in a glass (or sometimes a cup), into which hot steamed milk is added\nAmericano is a type of coffee drink prepared by diluting an espresso shot with hot water at a 1:3 to 1:4 ratio, resulting in a drink that retains the complex flavors of espresso, but in a lighter way"
|
|
38
|
-
}
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
],
|
|
42
|
-
"links": [],
|
|
43
|
-
"resource": {
|
|
44
|
-
"attributes": {
|
|
45
|
-
"service.name": "coffee-bot"
|
|
46
|
-
},
|
|
47
|
-
"schema_url": ""
|
|
48
|
-
}
|
|
49
|
-
},
|
|
50
|
-
{
|
|
51
|
-
"name": "llamaindex.openai",
|
|
52
|
-
"context": {
|
|
53
|
-
"trace_id": "0x93cd0bf865b3ffcc3cf9c075dc3e3797",
|
|
54
|
-
"span_id": "0x8b6363e1937a4d7b",
|
|
55
|
-
"trace_state": "[]"
|
|
56
|
-
},
|
|
57
|
-
"kind": "SpanKind.CLIENT",
|
|
58
|
-
"parent_id": "0x7a63d63e42ccac60",
|
|
59
|
-
"start_time": "2024-09-09T14:38:45.622174Z",
|
|
60
|
-
"end_time": "2024-09-09T14:38:46.514120Z",
|
|
61
|
-
"status": {
|
|
62
|
-
"status_code": "OK"
|
|
63
|
-
},
|
|
64
|
-
"attributes": {
|
|
65
|
-
"span.type": "inference",
|
|
66
|
-
"entity.count": 2,
|
|
67
|
-
"entity.1.name": "AzureOpenAI",
|
|
68
|
-
"entity.1.type": "inference.azure_oai",
|
|
69
|
-
"entity.1.provider_name": "openai.azure.com",
|
|
70
|
-
"entity.1.deployment": "kshitiz-gpt",
|
|
71
|
-
"entity.1.inference_endpoint": "https://okahu-openai-dev.openai.azure.com/",
|
|
72
|
-
|
|
73
|
-
"entity.2.name": "gpt-35-turbo",
|
|
74
|
-
"entity.2.type": "model.llm",
|
|
75
|
-
"entity.2.model_name": "gpt-35-turbo"
|
|
76
|
-
},
|
|
77
|
-
"events": [
|
|
78
|
-
{
|
|
79
|
-
"name": "data.input",
|
|
80
|
-
"timestamp": "timestamp",
|
|
81
|
-
"attributes": {
|
|
82
|
-
"question": "What is an americano?",
|
|
83
|
-
}
|
|
84
|
-
},
|
|
85
|
-
{
|
|
86
|
-
"name": "data.output",
|
|
87
|
-
"timestamp": "timestamp",
|
|
88
|
-
"attributes": {
|
|
89
|
-
"response": "An americano is a type of coffee drink that is made by diluting an espresso shot with hot water at a 1:3 to 1:4 ratio, resulting in a drink that retains the complex flavors of espresso, but in a lighter way.",
|
|
90
|
-
}
|
|
91
|
-
},
|
|
92
|
-
{
|
|
93
|
-
"name": "metadata",
|
|
94
|
-
"timestamp": "timestamp",
|
|
95
|
-
"attributes": {
|
|
96
|
-
"temperature": 0.1,
|
|
97
|
-
"completion_tokens": 52,
|
|
98
|
-
"prompt_tokens": 233,
|
|
99
|
-
"total_tokens": 285
|
|
100
|
-
}
|
|
101
|
-
}
|
|
102
|
-
],
|
|
103
|
-
"links": [],
|
|
104
|
-
"resource": {
|
|
105
|
-
"attributes": {
|
|
106
|
-
"service.name": "coffee-bot"
|
|
107
|
-
},
|
|
108
|
-
"schema_url": ""
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
{
|
|
112
|
-
"name": "llamaindex.query",
|
|
113
|
-
"context": {
|
|
114
|
-
"trace_id": "0x93cd0bf865b3ffcc3cf9c075dc3e3797",
|
|
115
|
-
"span_id": "0x7a63d63e42ccac60",
|
|
116
|
-
"trace_state": "[]"
|
|
117
|
-
},
|
|
118
|
-
"kind": "SpanKind.CLIENT",
|
|
119
|
-
"parent_id": null,
|
|
120
|
-
"start_time": "2024-09-09T14:38:45.236627Z",
|
|
121
|
-
"end_time": "2024-09-09T14:38:46.514442Z",
|
|
122
|
-
"status": {
|
|
123
|
-
"status_code": "OK"
|
|
124
|
-
},
|
|
125
|
-
"attributes": {
|
|
126
|
-
"span.type": "workflow",
|
|
127
|
-
"entity.count": 1,
|
|
128
|
-
"entity.1.name": "coffee-bot",
|
|
129
|
-
"entity.1.type": "workflow.llama_index"
|
|
130
|
-
},
|
|
131
|
-
"events": [
|
|
132
|
-
],
|
|
133
|
-
"links": [],
|
|
134
|
-
"resource": {
|
|
135
|
-
"attributes": {
|
|
136
|
-
"service.name": "coffee-bot"
|
|
137
|
-
},
|
|
138
|
-
"schema_url": ""
|
|
139
|
-
}
|
|
140
|
-
}
|