monocle-apptrace 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (37) hide show
  1. monocle_apptrace/exporters/aws/s3_exporter.py +158 -0
  2. monocle_apptrace/exporters/azure/blob_exporter.py +128 -0
  3. monocle_apptrace/exporters/base_exporter.py +47 -0
  4. monocle_apptrace/exporters/exporter_processor.py +19 -0
  5. monocle_apptrace/exporters/monocle_exporters.py +27 -0
  6. monocle_apptrace/exporters/okahu/okahu_exporter.py +115 -0
  7. monocle_apptrace/haystack/__init__.py +4 -4
  8. monocle_apptrace/haystack/wrap_pipeline.py +3 -2
  9. monocle_apptrace/instrumentor.py +12 -15
  10. monocle_apptrace/langchain/__init__.py +6 -3
  11. monocle_apptrace/llamaindex/__init__.py +8 -7
  12. monocle_apptrace/metamodel/entities/README.md +33 -10
  13. monocle_apptrace/metamodel/entities/app_hosting_types.json +29 -0
  14. monocle_apptrace/metamodel/entities/entities.json +49 -0
  15. monocle_apptrace/metamodel/entities/inference_types.json +33 -0
  16. monocle_apptrace/metamodel/entities/model_types.json +41 -0
  17. monocle_apptrace/metamodel/entities/vector_store_types.json +25 -0
  18. monocle_apptrace/metamodel/entities/workflow_types.json +22 -0
  19. monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +35 -0
  20. monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +35 -0
  21. monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +27 -0
  22. monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +27 -0
  23. monocle_apptrace/metamodel/maps/{lang_chain_methods.json → langchain_methods.json} +31 -8
  24. monocle_apptrace/metamodel/maps/{llama_index_methods.json → llamaindex_methods.json} +12 -8
  25. monocle_apptrace/metamodel/spans/span_example.json +1 -1
  26. monocle_apptrace/metamodel/spans/span_types.json +16 -0
  27. monocle_apptrace/utils.py +90 -11
  28. monocle_apptrace/wrap_common.py +228 -122
  29. monocle_apptrace/wrapper.py +3 -1
  30. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.2.0.dist-info}/METADATA +5 -1
  31. monocle_apptrace-0.2.0.dist-info/RECORD +44 -0
  32. monocle_apptrace/metamodel/entities/entity_types.json +0 -157
  33. monocle_apptrace/metamodel/entities/entity_types.py +0 -51
  34. monocle_apptrace-0.1.1.dist-info/RECORD +0 -29
  35. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.2.0.dist-info}/WHEEL +0 -0
  36. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.2.0.dist-info}/licenses/LICENSE +0 -0
  37. {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.2.0.dist-info}/licenses/NOTICE +0 -0
@@ -1,33 +1,44 @@
1
1
  # Monocle Entities
2
2
  The entity type defines the type of GenAI component that Monocle understand. The monocle instrumentation can extract the relevenat information for this entity. There are a fixed set of [entity types](./entity_types.py) that are defined by Monocle out of the box, eg workflow, model etc. As the GenAI landscape evolves, the Monocle community will introduce a new entity type if the current entities won't represent a new technology component.
3
3
 
4
- ## Entity Types
4
+ ## Entities
5
5
  Following attributes are supported for all entities
6
6
  | Name | Description | Required |
7
7
  | - | - | - |
8
8
  | name | Entity name generated by Monocle | Required |
9
- | type | Monocle Entity type | True |
9
+ | type | entity.category.type | True |
10
10
 
11
11
  ### MonocleEntity.Workflow
12
- Workflow ie the core application code. Supported types are -
12
+ Workflow ie the core application code.
13
+ #### Workflow types
13
14
  - generic
14
15
  - langchain
15
- - llama_index
16
+ - llamaindex
16
17
  - haystack
17
18
 
18
19
  ### MonocleEntity.Model
19
- GenAI models. Supported types are -
20
+ GenAI models. It has following sub categories
20
21
  - generic
21
22
  - llm
22
23
  - embedding
24
+ #### Model types
25
+ - gpt3
26
+ - gpt-3.5
27
+ - gpt-3.5-turbo
28
+ - gpt4
29
+ - gpt4o
30
+ - gpt4o-mini
31
+ - ollama
32
+
23
33
  Following attributes are supported for all model type entities
24
34
  | Name | Description | Required |
25
35
  | - | - | - |
26
- | model_name | Name of model | True |
27
-
36
+ | parameters | model parameters | false |
37
+ | algorithm | model algorithm | false |
28
38
 
29
39
  ### MonocleEntity.AppHosting
30
- Application host services where the workflow code is run. Supported types are -
40
+ Application host services where the workflow code is run.
41
+ #### AppHosting types
31
42
  - generic
32
43
  - aws_lambda
33
44
  - aws_sagemaker
@@ -36,7 +47,12 @@ Application host services where the workflow code is run. Supported types are -
36
47
  - azure_mlw
37
48
 
38
49
  ### MonocleEntity.Inference
39
- The model hosting infrastructure services. Supported types are -
50
+ The model hosting infrastructure services.
51
+ Following attributes are supported for inference type entities
52
+ | Name | Description | Required |
53
+ | - | - | - |
54
+ | inference_endpoint | connection endpoint for inference service | false |
55
+ #### Inference types
40
56
  - generic
41
57
  - nvidia_triton
42
58
  - openai
@@ -45,8 +61,15 @@ The model hosting infrastructure services. Supported types are -
45
61
  - aws_bedrock
46
62
  - hugging_face
47
63
 
64
+ Following attributes are supported for inference type entities
65
+ | Name | Description | Inference type | Required |
66
+ | - | - | - | - |
67
+ | deployment | model deployment name | azure_oai | false |
68
+
69
+
48
70
  ### MonocleEntity.VectorStore
49
- Vector search data stores. Supported types are -
71
+ Vector search data stores.
72
+ #### VectorStore types
50
73
  - generic
51
74
  - chroma
52
75
  - aws_es
@@ -0,0 +1,29 @@
1
+ {
2
+ "entity" : "app_hosting",
3
+ "types": [
4
+ {
5
+ "name": "aws_lambda",
6
+ "attributes" : []
7
+ },
8
+ {
9
+ "name": "aws_sagemaker",
10
+ "attributes" : []
11
+ },
12
+ {
13
+ "name": "azure_func",
14
+ "attributes" : []
15
+ },
16
+ {
17
+ "name": "azure_mlw",
18
+ "attributes" : []
19
+ },
20
+ {
21
+ "name": "github_codespace",
22
+ "attributes" : []
23
+ },
24
+ {
25
+ "name": "generic",
26
+ "attributes" : []
27
+ }
28
+ ]
29
+ }
@@ -0,0 +1,49 @@
1
+ {
2
+ "description": "Monocle entities represents kinds GenAI technology components and their implementations supported by Monocle",
3
+ "monocle_entities": [
4
+ {
5
+ "name": "workflow",
6
+ "attributes" : [],
7
+ "categories": []
8
+ },
9
+ {
10
+ "name": "model",
11
+ "attributes" : [
12
+ {
13
+ "attribute_name": "model_name",
14
+ "attribute_description": "Model name",
15
+ "required": true
16
+ }
17
+ ],
18
+ "categories": [
19
+ {
20
+ "name": "llm",
21
+ "attributes" : []
22
+ },
23
+ {
24
+ "name": "embedding",
25
+ "attributes" : []
26
+ },
27
+ {
28
+ "name": "generic",
29
+ "attributes" : []
30
+ }
31
+ ]
32
+ },
33
+ {
34
+ "name": "vector_store",
35
+ "attributes" : [],
36
+ "categories": []
37
+ },
38
+ {
39
+ "name": "app_hosting",
40
+ "attributes" : [],
41
+ "categories": []
42
+ },
43
+ {
44
+ "name": "inference",
45
+ "attributes" : [],
46
+ "categories": []
47
+ }
48
+ ]
49
+ }
@@ -0,0 +1,33 @@
1
+ {
2
+ "entity" : "vector_store",
3
+ "types": [
4
+ {
5
+ "name": "aws_sagemaker",
6
+ "attributes" : []
7
+ },
8
+ {
9
+ "name": "aws_bedrock",
10
+ "attributes" : []
11
+ },
12
+ {
13
+ "name": "azure_oai",
14
+ "attributes" : []
15
+ },
16
+ {
17
+ "name": "openai",
18
+ "attributes" : []
19
+ },
20
+ {
21
+ "name": "nvidia_triton",
22
+ "attributes" : []
23
+ },
24
+ {
25
+ "name": "hugging_face",
26
+ "attributes" : []
27
+ },
28
+ {
29
+ "name": "generic",
30
+ "attributes" : []
31
+ }
32
+ ]
33
+ }
@@ -0,0 +1,41 @@
1
+ {
2
+ "entity" : "model",
3
+ "types": [
4
+ {
5
+ "name": "gpt3",
6
+ "attributes" : []
7
+ },
8
+ {
9
+ "name": "gpt-3.5",
10
+ "attributes" : []
11
+ },
12
+ {
13
+ "name": "gpt-3.5-turbo",
14
+ "attributes" : []
15
+ },
16
+ {
17
+ "name": "gpt-3.5",
18
+ "attributes" : []
19
+ },
20
+ {
21
+ "name": "gpt4",
22
+ "attributes" : []
23
+ },
24
+ {
25
+ "name": "gpt4o",
26
+ "attributes" : []
27
+ },
28
+ {
29
+ "name": "gpt4o-mini",
30
+ "attributes" : []
31
+ },
32
+ {
33
+ "name": "ollama",
34
+ "attributes" : []
35
+ },
36
+ {
37
+ "name": "generic",
38
+ "attributes" : []
39
+ }
40
+ ]
41
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "entity" : "vector_store",
3
+ "types": [
4
+ {
5
+ "name": "chroma",
6
+ "attributes" : []
7
+ },
8
+ {
9
+ "name": "aws_es",
10
+ "attributes" : []
11
+ },
12
+ {
13
+ "name": "milvus",
14
+ "attributes" : []
15
+ },
16
+ {
17
+ "name": "pinecone",
18
+ "attributes" : []
19
+ },
20
+ {
21
+ "name": "generic",
22
+ "attributes" : []
23
+ }
24
+ ]
25
+ }
@@ -0,0 +1,22 @@
1
+ {
2
+ "entity" : "workflow",
3
+ "types": [
4
+ {
5
+ "name": "llama_index",
6
+ "attributes" : []
7
+ },
8
+ {
9
+ "name": "langchain",
10
+ "attributes" : []
11
+ },
12
+ {
13
+ "name": "haystack",
14
+ "attributes" : []
15
+ },
16
+ {
17
+ "name": "generic",
18
+ "attributes" : []
19
+ }
20
+
21
+ ]
22
+ }
@@ -0,0 +1,35 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type ,name , deployment , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda instance,args:'inference.azure_oai'"
9
+ },
10
+ {
11
+ "attribute": "provider_name",
12
+ "accessor": "lambda instance,args:args['provider_name']"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['azure_endpoint', 'api_base']) or args['inference_endpoint']"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['model', 'model_name'])"
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": "lambda instance,args: 'model.llm.'+resolve_from_alias(instance.__dict__, ['model', 'model_name'])"
32
+ }
33
+ ]
34
+ ]
35
+ }
@@ -0,0 +1,35 @@
1
+ {
2
+ "type": "inference",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "provider type ,name , deployment , inference_endpoint",
7
+ "attribute": "type",
8
+ "accessor": "lambda instance,args:'inference.azure_oai'"
9
+ },
10
+ {
11
+ "attribute": "provider_name",
12
+ "accessor": "lambda instance,args:args['provider_name']"
13
+ },
14
+ {
15
+ "attribute": "deployment",
16
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
17
+ },
18
+ {
19
+ "attribute": "inference_endpoint",
20
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['azure_endpoint', 'api_base'])"
21
+ }
22
+ ],
23
+ [
24
+ {
25
+ "_comment": "LLM Model",
26
+ "attribute": "name",
27
+ "accessor": "lambda instance,args: resolve_from_alias(instance.__dict__, ['model', 'model_name'])"
28
+ },
29
+ {
30
+ "attribute": "type",
31
+ "accessor": "lambda instance,args: 'model.llm.'+resolve_from_alias(instance.__dict__, ['model', 'model_name'])"
32
+ }
33
+ ]
34
+ ]
35
+ }
@@ -0,0 +1,27 @@
1
+ {
2
+ "type": "retrieval",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "vector store name and type",
7
+ "attribute": "name",
8
+ "accessor": "lambda instance,args: type(instance.vectorstore).__name__"
9
+ },
10
+ {
11
+ "attribute": "type",
12
+ "accessor": "lambda instance,args: 'vectorstore.'+type(instance.vectorstore).__name__"
13
+ }
14
+ ],
15
+ [
16
+ {
17
+ "_comment": "embedding model name and type",
18
+ "attribute": "name",
19
+ "accessor": "lambda instance,args: instance.vectorstore.embeddings.model"
20
+ },
21
+ {
22
+ "attribute": "type",
23
+ "accessor": "lambda instance ,args: 'model.embedding.'+instance.vectorstore.embeddings.model"
24
+ }
25
+ ]
26
+ ]
27
+ }
@@ -0,0 +1,27 @@
1
+ {
2
+ "type": "retrieval",
3
+ "attributes": [
4
+ [
5
+ {
6
+ "_comment": "vector store name and type",
7
+ "attribute": "name",
8
+ "accessor": "lambda instance,args: type(instance._vector_store).__name__"
9
+ },
10
+ {
11
+ "attribute": "type",
12
+ "accessor": "lambda instance,args: 'vectorstore.'+type(instance._vector_store).__name__"
13
+ }
14
+ ],
15
+ [
16
+ {
17
+ "_comment": "embedding model name and type",
18
+ "attribute": "name",
19
+ "accessor": "lambda instance,args: instance._embed_model.model_name"
20
+ },
21
+ {
22
+ "attribute": "type",
23
+ "accessor": "lambda instance ,args: 'model.embedding.'+instance._embed_model.model_name"
24
+ }
25
+ ]
26
+ ]
27
+ }
@@ -1,4 +1,4 @@
1
- {
1
+ {
2
2
  "wrapper_methods" : [
3
3
  {
4
4
  "package": "langchain.prompts.base",
@@ -19,42 +19,65 @@
19
19
  "object": "BaseChatModel",
20
20
  "method": "invoke",
21
21
  "wrapper_package": "wrap_common",
22
- "wrapper_method": "llm_wrapper"
22
+ "wrapper_method": "llm_wrapper",
23
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
23
24
  },
24
25
  {
25
26
  "package": "langchain.chat_models.base",
26
27
  "object": "BaseChatModel",
27
28
  "method": "ainvoke",
28
29
  "wrapper_package": "wrap_common",
29
- "wrapper_method": "allm_wrapper"
30
+ "wrapper_method": "allm_wrapper",
31
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
30
32
  },
31
33
  {
32
34
  "package": "langchain_core.language_models.llms",
33
35
  "object": "LLM",
34
36
  "method": "_generate",
35
37
  "wrapper_package": "wrap_common",
36
- "wrapper_method": "llm_wrapper"
38
+ "wrapper_method": "llm_wrapper",
39
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
37
40
  },
38
41
  {
39
42
  "package": "langchain_core.language_models.llms",
40
43
  "object": "LLM",
41
44
  "method": "_agenerate",
42
45
  "wrapper_package": "wrap_common",
43
- "wrapper_method": "llm_wrapper"
46
+ "wrapper_method": "allm_wrapper",
47
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
48
+ },
49
+ {
50
+ "package": "langchain_core.language_models.llms",
51
+ "object": "BaseLLM",
52
+ "method": "invoke",
53
+ "wrapper_package": "wrap_common",
54
+ "wrapper_method": "llm_wrapper",
55
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
56
+ },
57
+ {
58
+ "package": "langchain_core.language_models.llms",
59
+ "object": "BaseLLM",
60
+ "method": "ainvoke",
61
+ "wrapper_package": "wrap_common",
62
+ "wrapper_method": "allm_wrapper",
63
+ "output_processor": ["metamodel/maps/attributes/inference/langchain_entities.json"]
44
64
  },
45
65
  {
46
66
  "package": "langchain_core.retrievers",
47
67
  "object": "BaseRetriever",
48
68
  "method": "invoke",
49
69
  "wrapper_package": "wrap_common",
50
- "wrapper_method": "task_wrapper"
70
+ "wrapper_method": "task_wrapper",
71
+ "output_processor": ["metamodel/maps/attributes/retrieval/langchain_entities.json"]
72
+
51
73
  },
52
74
  {
53
75
  "package": "langchain_core.retrievers",
54
76
  "object": "BaseRetriever",
55
77
  "method": "ainvoke",
56
78
  "wrapper_package": "wrap_common",
57
- "wrapper_method": "atask_wrapper"
79
+ "wrapper_method": "atask_wrapper",
80
+ "output_processor": ["metamodel/maps/attributes/retrieval/langchain_entities.json"]
58
81
  },
59
82
  {
60
83
  "package": "langchain.schema",
@@ -103,4 +126,4 @@
103
126
  "wrapper_method": "atask_wrapper"
104
127
  }
105
128
  ]
106
- }
129
+ }
@@ -6,7 +6,8 @@
6
6
  "method": "retrieve",
7
7
  "span_name": "llamaindex.retrieve",
8
8
  "wrapper_package": "wrap_common",
9
- "wrapper_method": "task_wrapper"
9
+ "wrapper_method": "task_wrapper",
10
+ "output_processor": ["metamodel/maps/attributes/retrieval/llamaindex_entities.json"]
10
11
  },
11
12
  {
12
13
  "package": "llama_index.core.indices.base_retriever",
@@ -14,7 +15,8 @@
14
15
  "method": "aretrieve",
15
16
  "span_name": "llamaindex.retrieve",
16
17
  "wrapper_package": "wrap_common",
17
- "wrapper_method": "atask_wrapper"
18
+ "wrapper_method": "atask_wrapper",
19
+ "output_processor": ["metamodel/maps/attributes/retrieval/llamaindex_entities.json"]
18
20
  },
19
21
  {
20
22
  "package": "llama_index.core.base.base_query_engine",
@@ -38,7 +40,8 @@
38
40
  "method": "chat",
39
41
  "span_name": "llamaindex.llmchat",
40
42
  "wrapper_package": "wrap_common",
41
- "wrapper_method": "task_wrapper"
43
+ "wrapper_method": "task_wrapper",
44
+ "output_processor": ["metamodel/maps/attributes/inference/llamaindex_entities.json"]
42
45
  },
43
46
  {
44
47
  "package": "llama_index.core.llms.custom",
@@ -46,7 +49,8 @@
46
49
  "method": "achat",
47
50
  "span_name": "llamaindex.llmchat",
48
51
  "wrapper_package": "wrap_common",
49
- "wrapper_method": "atask_wrapper"
52
+ "wrapper_method": "atask_wrapper",
53
+ "output_processor": ["metamodel/maps/attributes/inference/llamaindex_entities.json"]
50
54
  },
51
55
  {
52
56
  "package": "llama_index.llms.openai.base",
@@ -55,8 +59,7 @@
55
59
  "span_name": "llamaindex.openai",
56
60
  "wrapper_package": "wrap_common",
57
61
  "wrapper_method": "llm_wrapper",
58
- "span_name_getter_package" : "llamaindex",
59
- "span_name_getter_mothod" : "get_llm_span_name_for_openai"
62
+ "output_processor": ["metamodel/maps/attributes/inference/llamaindex_entities.json"]
60
63
  },
61
64
  {
62
65
  "package": "llama_index.llms.openai.base",
@@ -64,7 +67,8 @@
64
67
  "method": "achat",
65
68
  "span_name": "llamaindex.openai",
66
69
  "wrapper_package": "wrap_common",
67
- "wrapper_method": "allm_wrapper"
70
+ "wrapper_method": "allm_wrapper",
71
+ "output_processor": ["metamodel/maps/attributes/inference/llamaindex_entities.json"]
68
72
  }
69
73
  ]
70
- }
74
+ }
@@ -13,7 +13,7 @@
13
13
  "status_code": "OK"
14
14
  },
15
15
  "attributes": {
16
- "span.type": "Retrieval",
16
+ "span.type": "retrieval",
17
17
  "entity.count": 2,
18
18
  "entity.1.name": "ChromaVectorStore",
19
19
  "entity.1.type": "vectorstore.chroma",
@@ -0,0 +1,16 @@
1
+ {
2
+ "span_types" : [
3
+ {
4
+ "type": "inference",
5
+ "description": "Model inference span"
6
+ },
7
+ {
8
+ "type": "retrieval",
9
+ "description": "vector embedding retrieval"
10
+ },
11
+ {
12
+ "type": "workflow",
13
+ "description": "workflow orchetraction at top level"
14
+ }
15
+ ]
16
+ }