monocle-apptrace 0.1.1__py3-none-any.whl → 0.3.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/botocore/__init__.py +9 -0
- monocle_apptrace/constants.py +18 -4
- monocle_apptrace/exporters/aws/s3_exporter.py +158 -0
- monocle_apptrace/exporters/azure/blob_exporter.py +125 -0
- monocle_apptrace/exporters/base_exporter.py +48 -0
- monocle_apptrace/exporters/exporter_processor.py +19 -0
- monocle_apptrace/exporters/monocle_exporters.py +27 -0
- monocle_apptrace/exporters/okahu/okahu_exporter.py +115 -0
- monocle_apptrace/haystack/__init__.py +4 -4
- monocle_apptrace/haystack/wrap_pipeline.py +3 -2
- monocle_apptrace/instrumentor.py +14 -17
- monocle_apptrace/langchain/__init__.py +6 -3
- monocle_apptrace/llamaindex/__init__.py +8 -7
- monocle_apptrace/message_processing.py +80 -0
- monocle_apptrace/metamodel/entities/README.md +33 -10
- monocle_apptrace/metamodel/entities/app_hosting_types.json +29 -0
- monocle_apptrace/metamodel/entities/entities.json +49 -0
- monocle_apptrace/metamodel/entities/inference_types.json +33 -0
- monocle_apptrace/metamodel/entities/model_types.json +41 -0
- monocle_apptrace/metamodel/entities/vector_store_types.json +25 -0
- monocle_apptrace/metamodel/entities/workflow_types.json +22 -0
- monocle_apptrace/metamodel/maps/attributes/inference/botocore_entities.json +27 -0
- monocle_apptrace/metamodel/maps/attributes/inference/haystack_entities.json +57 -0
- monocle_apptrace/metamodel/maps/attributes/inference/langchain_entities.json +57 -0
- monocle_apptrace/metamodel/maps/attributes/inference/llamaindex_entities.json +57 -0
- monocle_apptrace/metamodel/maps/attributes/retrieval/haystack_entities.json +31 -0
- monocle_apptrace/metamodel/maps/attributes/retrieval/langchain_entities.json +31 -0
- monocle_apptrace/metamodel/maps/attributes/retrieval/llamaindex_entities.json +31 -0
- monocle_apptrace/metamodel/maps/botocore_methods.json +13 -0
- monocle_apptrace/metamodel/maps/haystack_methods.json +26 -6
- monocle_apptrace/metamodel/maps/{lang_chain_methods.json → langchain_methods.json} +31 -8
- monocle_apptrace/metamodel/maps/{llama_index_methods.json → llamaindex_methods.json} +30 -8
- monocle_apptrace/metamodel/spans/span_example.json +1 -1
- monocle_apptrace/metamodel/spans/span_types.json +16 -0
- monocle_apptrace/utils.py +179 -20
- monocle_apptrace/wrap_common.py +350 -150
- monocle_apptrace/wrapper.py +5 -2
- {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0b1.dist-info}/METADATA +8 -3
- monocle_apptrace-0.3.0b1.dist-info/RECORD +48 -0
- {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0b1.dist-info}/WHEEL +1 -1
- monocle_apptrace/haystack/wrap_node.py +0 -27
- monocle_apptrace/haystack/wrap_openai.py +0 -44
- monocle_apptrace/metamodel/entities/entity_types.json +0 -157
- monocle_apptrace/metamodel/entities/entity_types.py +0 -51
- monocle_apptrace-0.1.1.dist-info/RECORD +0 -29
- {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.1.1.dist-info → monocle_apptrace-0.3.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
import os
|
|
2
|
-
from monocle_apptrace.utils import
|
|
2
|
+
from monocle_apptrace.utils import get_wrapper_methods_config
|
|
3
3
|
|
|
4
4
|
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
|
5
|
-
LANGCHAIN_METHODS =
|
|
6
|
-
os.path.join(parent_dir, 'metamodel', 'maps', '
|
|
5
|
+
LANGCHAIN_METHODS = get_wrapper_methods_config(
|
|
6
|
+
wrapper_methods_config_path=os.path.join(parent_dir, 'metamodel', 'maps', 'langchain_methods.json'),
|
|
7
|
+
attributes_config_base_path=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
8
|
+
|
|
9
|
+
|
|
@@ -1,15 +1,16 @@
|
|
|
1
|
-
|
|
2
|
-
#pylint: disable=protected-access
|
|
1
|
+
# pylint: disable=protected-access
|
|
3
2
|
import os
|
|
4
|
-
from monocle_apptrace.utils import
|
|
3
|
+
from monocle_apptrace.utils import get_wrapper_methods_config
|
|
4
|
+
|
|
5
5
|
|
|
6
6
|
def get_llm_span_name_for_openai(instance):
|
|
7
7
|
if (hasattr(instance, "_is_azure_client")
|
|
8
|
-
|
|
9
|
-
|
|
8
|
+
and callable(getattr(instance, "_is_azure_client"))
|
|
9
|
+
and instance._is_azure_client()):
|
|
10
10
|
return "llamaindex.azure_openai"
|
|
11
11
|
return "llamaindex.openai"
|
|
12
12
|
|
|
13
13
|
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
|
14
|
-
LLAMAINDEX_METHODS =
|
|
15
|
-
os.path.join(parent_dir, 'metamodel', 'maps', '
|
|
14
|
+
LLAMAINDEX_METHODS = get_wrapper_methods_config(
|
|
15
|
+
wrapper_methods_config_path=os.path.join(parent_dir, 'metamodel', 'maps', 'llamaindex_methods.json'),
|
|
16
|
+
attributes_config_base_path=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides utility functions for extracting system, user,
|
|
3
|
+
and assistant messages from various input formats.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from monocle_apptrace.utils import get_attribute
|
|
8
|
+
DATA_INPUT_KEY = "data.input"
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
def extract_messages(args):
|
|
12
|
+
"""Extract system and user messages"""
|
|
13
|
+
try:
|
|
14
|
+
messages = []
|
|
15
|
+
args_input = get_attribute(DATA_INPUT_KEY)
|
|
16
|
+
if args_input:
|
|
17
|
+
messages.append(args_input)
|
|
18
|
+
return messages
|
|
19
|
+
if args and isinstance(args, tuple) and len(args) > 0:
|
|
20
|
+
if hasattr(args[0], "messages") and isinstance(args[0].messages, list):
|
|
21
|
+
for msg in args[0].messages:
|
|
22
|
+
if hasattr(msg, 'content') and hasattr(msg, 'type'):
|
|
23
|
+
messages.append({msg.type: msg.content})
|
|
24
|
+
elif isinstance(args[0], list): #llama
|
|
25
|
+
for msg in args[0]:
|
|
26
|
+
if hasattr(msg, 'content') and hasattr(msg, 'role'):
|
|
27
|
+
if hasattr(msg.role, 'value'):
|
|
28
|
+
role = msg.role.value
|
|
29
|
+
else:
|
|
30
|
+
role = msg.role
|
|
31
|
+
if msg.role == "system":
|
|
32
|
+
messages.append({role: msg.content})
|
|
33
|
+
elif msg.role in ["user", "human"]:
|
|
34
|
+
user_message = extract_query_from_content(msg.content)
|
|
35
|
+
messages.append({role: user_message})
|
|
36
|
+
return messages
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
39
|
+
return []
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def extract_assistant_message(response):
|
|
43
|
+
try:
|
|
44
|
+
if isinstance(response, str):
|
|
45
|
+
return [response]
|
|
46
|
+
if hasattr(response, "content"):
|
|
47
|
+
return [response.content]
|
|
48
|
+
if hasattr(response, "message") and hasattr(response.message, "content"):
|
|
49
|
+
return [response.message.content]
|
|
50
|
+
if "replies" in response:
|
|
51
|
+
reply = response["replies"][0]
|
|
52
|
+
if hasattr(reply, 'content'):
|
|
53
|
+
return [reply.content]
|
|
54
|
+
return [reply]
|
|
55
|
+
if isinstance(response, dict):
|
|
56
|
+
return [response]
|
|
57
|
+
return []
|
|
58
|
+
except Exception as e:
|
|
59
|
+
logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
|
|
60
|
+
return []
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def extract_query_from_content(content):
|
|
64
|
+
try:
|
|
65
|
+
query_prefix = "Query:"
|
|
66
|
+
answer_prefix = "Answer:"
|
|
67
|
+
query_start = content.find(query_prefix)
|
|
68
|
+
if query_start == -1:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
query_start += len(query_prefix)
|
|
72
|
+
answer_start = content.find(answer_prefix, query_start)
|
|
73
|
+
if answer_start == -1:
|
|
74
|
+
query = content[query_start:].strip()
|
|
75
|
+
else:
|
|
76
|
+
query = content[query_start:answer_start].strip()
|
|
77
|
+
return query
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.warning("Warning: Error occurred in extract_query_from_content: %s", str(e))
|
|
80
|
+
return ""
|
|
@@ -1,33 +1,44 @@
|
|
|
1
1
|
# Monocle Entities
|
|
2
2
|
The entity type defines the type of GenAI component that Monocle understand. The monocle instrumentation can extract the relevenat information for this entity. There are a fixed set of [entity types](./entity_types.py) that are defined by Monocle out of the box, eg workflow, model etc. As the GenAI landscape evolves, the Monocle community will introduce a new entity type if the current entities won't represent a new technology component.
|
|
3
3
|
|
|
4
|
-
##
|
|
4
|
+
## Entities
|
|
5
5
|
Following attributes are supported for all entities
|
|
6
6
|
| Name | Description | Required |
|
|
7
7
|
| - | - | - |
|
|
8
8
|
| name | Entity name generated by Monocle | Required |
|
|
9
|
-
| type |
|
|
9
|
+
| type | entity.category.type | True |
|
|
10
10
|
|
|
11
11
|
### MonocleEntity.Workflow
|
|
12
|
-
Workflow ie the core application code.
|
|
12
|
+
Workflow ie the core application code.
|
|
13
|
+
#### Workflow types
|
|
13
14
|
- generic
|
|
14
15
|
- langchain
|
|
15
|
-
-
|
|
16
|
+
- llamaindex
|
|
16
17
|
- haystack
|
|
17
18
|
|
|
18
19
|
### MonocleEntity.Model
|
|
19
|
-
GenAI models.
|
|
20
|
+
GenAI models. It has following sub categories
|
|
20
21
|
- generic
|
|
21
22
|
- llm
|
|
22
23
|
- embedding
|
|
24
|
+
#### Model types
|
|
25
|
+
- gpt3
|
|
26
|
+
- gpt-3.5
|
|
27
|
+
- gpt-3.5-turbo
|
|
28
|
+
- gpt4
|
|
29
|
+
- gpt4o
|
|
30
|
+
- gpt4o-mini
|
|
31
|
+
- ollama
|
|
32
|
+
|
|
23
33
|
Following attributes are supported for all model type entities
|
|
24
34
|
| Name | Description | Required |
|
|
25
35
|
| - | - | - |
|
|
26
|
-
|
|
|
27
|
-
|
|
36
|
+
| parameters | model parameters | false |
|
|
37
|
+
| algorithm | model algorithm | false |
|
|
28
38
|
|
|
29
39
|
### MonocleEntity.AppHosting
|
|
30
|
-
Application host services where the workflow code is run.
|
|
40
|
+
Application host services where the workflow code is run.
|
|
41
|
+
#### AppHosting types
|
|
31
42
|
- generic
|
|
32
43
|
- aws_lambda
|
|
33
44
|
- aws_sagemaker
|
|
@@ -36,7 +47,12 @@ Application host services where the workflow code is run. Supported types are -
|
|
|
36
47
|
- azure_mlw
|
|
37
48
|
|
|
38
49
|
### MonocleEntity.Inference
|
|
39
|
-
The model hosting infrastructure services.
|
|
50
|
+
The model hosting infrastructure services.
|
|
51
|
+
Following attributes are supported for inference type entities
|
|
52
|
+
| Name | Description | Required |
|
|
53
|
+
| - | - | - |
|
|
54
|
+
| inference_endpoint | connection endpoint for inference service | false |
|
|
55
|
+
#### Inference types
|
|
40
56
|
- generic
|
|
41
57
|
- nvidia_triton
|
|
42
58
|
- openai
|
|
@@ -45,8 +61,15 @@ The model hosting infrastructure services. Supported types are -
|
|
|
45
61
|
- aws_bedrock
|
|
46
62
|
- hugging_face
|
|
47
63
|
|
|
64
|
+
Following attributes are supported for inference type entities
|
|
65
|
+
| Name | Description | Inference type | Required |
|
|
66
|
+
| - | - | - | - |
|
|
67
|
+
| deployment | model deployment name | azure_oai | false |
|
|
68
|
+
|
|
69
|
+
|
|
48
70
|
### MonocleEntity.VectorStore
|
|
49
|
-
Vector search data stores.
|
|
71
|
+
Vector search data stores.
|
|
72
|
+
#### VectorStore types
|
|
50
73
|
- generic
|
|
51
74
|
- chroma
|
|
52
75
|
- aws_es
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
{
|
|
2
|
+
"entity" : "app_hosting",
|
|
3
|
+
"types": [
|
|
4
|
+
{
|
|
5
|
+
"name": "aws_lambda",
|
|
6
|
+
"attributes" : []
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"name": "aws_sagemaker",
|
|
10
|
+
"attributes" : []
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"name": "azure_func",
|
|
14
|
+
"attributes" : []
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"name": "azure_mlw",
|
|
18
|
+
"attributes" : []
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "github_codespace",
|
|
22
|
+
"attributes" : []
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"name": "generic",
|
|
26
|
+
"attributes" : []
|
|
27
|
+
}
|
|
28
|
+
]
|
|
29
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
{
|
|
2
|
+
"description": "Monocle entities represents kinds GenAI technology components and their implementations supported by Monocle",
|
|
3
|
+
"monocle_entities": [
|
|
4
|
+
{
|
|
5
|
+
"name": "workflow",
|
|
6
|
+
"attributes" : [],
|
|
7
|
+
"categories": []
|
|
8
|
+
},
|
|
9
|
+
{
|
|
10
|
+
"name": "model",
|
|
11
|
+
"attributes" : [
|
|
12
|
+
{
|
|
13
|
+
"attribute_name": "model_name",
|
|
14
|
+
"attribute_description": "Model name",
|
|
15
|
+
"required": true
|
|
16
|
+
}
|
|
17
|
+
],
|
|
18
|
+
"categories": [
|
|
19
|
+
{
|
|
20
|
+
"name": "llm",
|
|
21
|
+
"attributes" : []
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"name": "embedding",
|
|
25
|
+
"attributes" : []
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
"name": "generic",
|
|
29
|
+
"attributes" : []
|
|
30
|
+
}
|
|
31
|
+
]
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
"name": "vector_store",
|
|
35
|
+
"attributes" : [],
|
|
36
|
+
"categories": []
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"name": "app_hosting",
|
|
40
|
+
"attributes" : [],
|
|
41
|
+
"categories": []
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"name": "inference",
|
|
45
|
+
"attributes" : [],
|
|
46
|
+
"categories": []
|
|
47
|
+
}
|
|
48
|
+
]
|
|
49
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
{
|
|
2
|
+
"entity" : "vector_store",
|
|
3
|
+
"types": [
|
|
4
|
+
{
|
|
5
|
+
"name": "aws_sagemaker",
|
|
6
|
+
"attributes" : []
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"name": "aws_bedrock",
|
|
10
|
+
"attributes" : []
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"name": "azure_oai",
|
|
14
|
+
"attributes" : []
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"name": "openai",
|
|
18
|
+
"attributes" : []
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "nvidia_triton",
|
|
22
|
+
"attributes" : []
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"name": "hugging_face",
|
|
26
|
+
"attributes" : []
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"name": "generic",
|
|
30
|
+
"attributes" : []
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
{
|
|
2
|
+
"entity" : "model",
|
|
3
|
+
"types": [
|
|
4
|
+
{
|
|
5
|
+
"name": "gpt3",
|
|
6
|
+
"attributes" : []
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"name": "gpt-3.5",
|
|
10
|
+
"attributes" : []
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"name": "gpt-3.5-turbo",
|
|
14
|
+
"attributes" : []
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"name": "gpt-3.5",
|
|
18
|
+
"attributes" : []
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "gpt4",
|
|
22
|
+
"attributes" : []
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"name": "gpt4o",
|
|
26
|
+
"attributes" : []
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"name": "gpt4o-mini",
|
|
30
|
+
"attributes" : []
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "ollama",
|
|
34
|
+
"attributes" : []
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
"name": "generic",
|
|
38
|
+
"attributes" : []
|
|
39
|
+
}
|
|
40
|
+
]
|
|
41
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"entity" : "vector_store",
|
|
3
|
+
"types": [
|
|
4
|
+
{
|
|
5
|
+
"name": "chroma",
|
|
6
|
+
"attributes" : []
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"name": "aws_es",
|
|
10
|
+
"attributes" : []
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"name": "milvus",
|
|
14
|
+
"attributes" : []
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"name": "pinecone",
|
|
18
|
+
"attributes" : []
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"name": "generic",
|
|
22
|
+
"attributes" : []
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"entity" : "workflow",
|
|
3
|
+
"types": [
|
|
4
|
+
{
|
|
5
|
+
"name": "llama_index",
|
|
6
|
+
"attributes" : []
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"name": "langchain",
|
|
10
|
+
"attributes" : []
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"name": "haystack",
|
|
14
|
+
"attributes" : []
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"name": "generic",
|
|
18
|
+
"attributes" : []
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
]
|
|
22
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "inference",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "provider type , inference_endpoint",
|
|
7
|
+
"attribute": "type",
|
|
8
|
+
"accessor": "lambda arguments:'inference.aws_sagemaker'"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "inference_endpoint",
|
|
12
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['instance'].meta.endpoint_url"
|
|
13
|
+
}
|
|
14
|
+
],
|
|
15
|
+
[
|
|
16
|
+
{
|
|
17
|
+
"_comment": "LLM Model",
|
|
18
|
+
"attribute": "name",
|
|
19
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['kwargs'].get('EndpointName', '')"
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
"attribute": "type",
|
|
23
|
+
"accessor": "lambda arguments: 'model.llm.' + (resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['kwargs'].get('EndpointName', ''))"
|
|
24
|
+
}
|
|
25
|
+
]
|
|
26
|
+
]
|
|
27
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "inference",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
7
|
+
"attribute": "type",
|
|
8
|
+
"accessor": "lambda arguments:'inference.azure_oai'"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "provider_name",
|
|
12
|
+
"accessor": "lambda arguments:arguments['kwargs']['provider_name']"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "deployment",
|
|
16
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"attribute": "inference_endpoint",
|
|
20
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
|
|
21
|
+
}
|
|
22
|
+
],
|
|
23
|
+
[
|
|
24
|
+
{
|
|
25
|
+
"_comment": "LLM Model",
|
|
26
|
+
"attribute": "name",
|
|
27
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
"attribute": "type",
|
|
31
|
+
"accessor": "lambda arguments: 'model.llm.'+resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
32
|
+
}
|
|
33
|
+
]
|
|
34
|
+
],
|
|
35
|
+
"events": [
|
|
36
|
+
{ "name":"data.input",
|
|
37
|
+
"attributes": [
|
|
38
|
+
|
|
39
|
+
{
|
|
40
|
+
"_comment": "this is instruction and user query to LLM",
|
|
41
|
+
"attribute": "input",
|
|
42
|
+
"accessor": "lambda arguments: extract_messages(arguments['args'])"
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"name":"data.output",
|
|
48
|
+
"attributes": [
|
|
49
|
+
{
|
|
50
|
+
"_comment": "this is response from LLM",
|
|
51
|
+
"attribute": "response",
|
|
52
|
+
"accessor": "lambda response: extract_assistant_message(response)"
|
|
53
|
+
}
|
|
54
|
+
]
|
|
55
|
+
}
|
|
56
|
+
]
|
|
57
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "inference",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
7
|
+
"attribute": "type",
|
|
8
|
+
"accessor": "lambda arguments:'inference.azure_oai'"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "provider_name",
|
|
12
|
+
"accessor": "lambda arguments:arguments['kwargs']['provider_name']"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "deployment",
|
|
16
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"attribute": "inference_endpoint",
|
|
20
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
|
|
21
|
+
}
|
|
22
|
+
],
|
|
23
|
+
[
|
|
24
|
+
{
|
|
25
|
+
"_comment": "LLM Model",
|
|
26
|
+
"attribute": "name",
|
|
27
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id"
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
"attribute": "type",
|
|
31
|
+
"accessor": "lambda arguments: 'model.llm.'+ (resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name']) or arguments['instance'].model_id)"
|
|
32
|
+
}
|
|
33
|
+
]
|
|
34
|
+
],
|
|
35
|
+
"events": [
|
|
36
|
+
{ "name":"data.input",
|
|
37
|
+
"attributes": [
|
|
38
|
+
|
|
39
|
+
{
|
|
40
|
+
"_comment": "this is instruction and user query to LLM",
|
|
41
|
+
"attribute": "input",
|
|
42
|
+
"accessor": "lambda arguments: extract_messages(arguments['args'])"
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"name":"data.output",
|
|
48
|
+
"attributes": [
|
|
49
|
+
{
|
|
50
|
+
"_comment": "this is response from LLM",
|
|
51
|
+
"attribute": "response",
|
|
52
|
+
"accessor": "lambda response: extract_assistant_message(response)"
|
|
53
|
+
}
|
|
54
|
+
]
|
|
55
|
+
}
|
|
56
|
+
]
|
|
57
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "inference",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
7
|
+
"attribute": "type",
|
|
8
|
+
"accessor": "lambda arguments:'inference.azure_oai'"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "provider_name",
|
|
12
|
+
"accessor": "lambda arguments:arguments['kwargs']['provider_name']"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "deployment",
|
|
16
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"attribute": "inference_endpoint",
|
|
20
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base']) or arguments['kwargs']['inference_endpoint']"
|
|
21
|
+
}
|
|
22
|
+
],
|
|
23
|
+
[
|
|
24
|
+
{
|
|
25
|
+
"_comment": "LLM Model",
|
|
26
|
+
"attribute": "name",
|
|
27
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
"attribute": "type",
|
|
31
|
+
"accessor": "lambda arguments: 'model.llm.'+resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name'])"
|
|
32
|
+
}
|
|
33
|
+
]
|
|
34
|
+
],
|
|
35
|
+
"events": [
|
|
36
|
+
{ "name":"data.input",
|
|
37
|
+
"attributes": [
|
|
38
|
+
|
|
39
|
+
{
|
|
40
|
+
"_comment": "this is instruction and user query to LLM",
|
|
41
|
+
"attribute": "input",
|
|
42
|
+
"accessor": "lambda arguments: extract_messages(arguments['args'])"
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"name":"data.output",
|
|
48
|
+
"attributes": [
|
|
49
|
+
{
|
|
50
|
+
"_comment": "this is response from LLM",
|
|
51
|
+
"attribute": "response",
|
|
52
|
+
"accessor": "lambda response: extract_assistant_message(response)"
|
|
53
|
+
}
|
|
54
|
+
]
|
|
55
|
+
}
|
|
56
|
+
]
|
|
57
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "retrieval",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "vector store name and type",
|
|
7
|
+
"attribute": "name",
|
|
8
|
+
"accessor": "lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "type",
|
|
12
|
+
"accessor": "lambda arguments: 'vectorstore.'+resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__class__.__name__"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "deployment",
|
|
16
|
+
"accessor": "lambda arguments: get_vectorstore_deployment(resolve_from_alias(arguments['instance'].__dict__, ['document_store', '_document_store']).__dict__)"
|
|
17
|
+
}
|
|
18
|
+
],
|
|
19
|
+
[
|
|
20
|
+
{
|
|
21
|
+
"_comment": "embedding model name and type",
|
|
22
|
+
"attribute": "name",
|
|
23
|
+
"accessor": "lambda arguments: get_embedding_model()"
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
"attribute": "type",
|
|
27
|
+
"accessor": "lambda arguments: 'model.embedding.'+get_embedding_model()"
|
|
28
|
+
}
|
|
29
|
+
]
|
|
30
|
+
]
|
|
31
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "retrieval",
|
|
3
|
+
"attributes": [
|
|
4
|
+
[
|
|
5
|
+
{
|
|
6
|
+
"_comment": "vector store name and type",
|
|
7
|
+
"attribute": "name",
|
|
8
|
+
"accessor": "lambda arguments: type(arguments['instance'].vectorstore).__name__"
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
"attribute": "type",
|
|
12
|
+
"accessor": "lambda arguments: 'vectorstore.'+type(arguments['instance'].vectorstore).__name__"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "deployment",
|
|
16
|
+
"accessor": "lambda arguments: get_vectorstore_deployment(arguments['instance'].vectorstore.__dict__)"
|
|
17
|
+
}
|
|
18
|
+
],
|
|
19
|
+
[
|
|
20
|
+
{
|
|
21
|
+
"_comment": "embedding model name and type",
|
|
22
|
+
"attribute": "name",
|
|
23
|
+
"accessor": "lambda arguments: arguments['instance'].vectorstore.embeddings.model"
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
"attribute": "type",
|
|
27
|
+
"accessor": "lambda arguments: 'model.embedding.'+arguments['instance'].vectorstore.embeddings.model"
|
|
28
|
+
}
|
|
29
|
+
]
|
|
30
|
+
]
|
|
31
|
+
}
|