monocle-apptrace 0.3.1b1__py3-none-any.whl → 0.4.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/exporters/aws/s3_exporter.py +3 -1
- monocle_apptrace/exporters/azure/blob_exporter.py +2 -2
- monocle_apptrace/exporters/base_exporter.py +10 -4
- monocle_apptrace/exporters/file_exporter.py +19 -4
- monocle_apptrace/exporters/monocle_exporters.py +8 -5
- monocle_apptrace/exporters/okahu/okahu_exporter.py +5 -2
- monocle_apptrace/instrumentation/common/__init__.py +1 -1
- monocle_apptrace/instrumentation/common/constants.py +8 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +44 -22
- monocle_apptrace/instrumentation/common/span_handler.py +67 -41
- monocle_apptrace/instrumentation/common/tracing.md +68 -0
- monocle_apptrace/instrumentation/common/utils.py +86 -63
- monocle_apptrace/instrumentation/common/wrapper.py +185 -46
- monocle_apptrace/instrumentation/common/wrapper_method.py +12 -6
- monocle_apptrace/instrumentation/metamodel/aiohttp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +66 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/methods.py +13 -0
- monocle_apptrace/instrumentation/metamodel/anthropic/methods.py +4 -2
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +50 -3
- monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +48 -0
- monocle_apptrace/instrumentation/metamodel/flask/methods.py +10 -1
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +17 -4
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +4 -1
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +8 -4
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +12 -4
- monocle_apptrace/instrumentation/metamodel/langchain/methods.py +6 -14
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +13 -9
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +16 -15
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +174 -26
- monocle_apptrace/instrumentation/metamodel/openai/methods.py +0 -2
- monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
- monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/requests/methods.py +2 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +19 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +24 -18
- monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +42 -8
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/METADATA +1 -1
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/RECORD +43 -36
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,71 +1,219 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import random
|
|
3
|
+
import time
|
|
4
|
+
from types import SimpleNamespace
|
|
1
5
|
from monocle_apptrace.instrumentation.metamodel.openai import (
|
|
2
6
|
_helper,
|
|
3
7
|
)
|
|
4
|
-
from monocle_apptrace.instrumentation.common.utils import
|
|
8
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
9
|
+
patch_instance_method,
|
|
10
|
+
resolve_from_alias,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def process_stream(to_wrap, response, span_processor):
|
|
17
|
+
waiting_for_first_token = True
|
|
18
|
+
stream_start_time = time.time_ns()
|
|
19
|
+
first_token_time = stream_start_time
|
|
20
|
+
stream_closed_time = None
|
|
21
|
+
accumulated_response = ""
|
|
22
|
+
token_usage = None
|
|
23
|
+
accumulated_temp_list = []
|
|
24
|
+
|
|
25
|
+
if to_wrap and hasattr(response, "__iter__"):
|
|
26
|
+
original_iter = response.__iter__
|
|
27
|
+
|
|
28
|
+
def new_iter(self):
|
|
29
|
+
nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
|
|
30
|
+
|
|
31
|
+
for item in original_iter():
|
|
32
|
+
try:
|
|
33
|
+
if (
|
|
34
|
+
item.choices
|
|
35
|
+
and item.choices[0].delta
|
|
36
|
+
and item.choices[0].delta.content
|
|
37
|
+
):
|
|
38
|
+
if waiting_for_first_token:
|
|
39
|
+
waiting_for_first_token = False
|
|
40
|
+
first_token_time = time.time_ns()
|
|
41
|
+
|
|
42
|
+
accumulated_response += item.choices[0].delta.content
|
|
43
|
+
# token_usage = item.usage
|
|
44
|
+
elif item.object == "chat.completion.chunk" and item.usage:
|
|
45
|
+
# Handle the case where the response is a chunk
|
|
46
|
+
token_usage = item.usage
|
|
47
|
+
stream_closed_time = time.time_ns()
|
|
48
|
+
|
|
49
|
+
except Exception as e:
|
|
50
|
+
logger.warning(
|
|
51
|
+
"Warning: Error occurred while processing item in new_iter: %s",
|
|
52
|
+
str(e),
|
|
53
|
+
)
|
|
54
|
+
finally:
|
|
55
|
+
accumulated_temp_list.append(item)
|
|
56
|
+
yield item
|
|
57
|
+
|
|
58
|
+
if span_processor:
|
|
59
|
+
ret_val = SimpleNamespace(
|
|
60
|
+
type="stream",
|
|
61
|
+
timestamps={
|
|
62
|
+
"data.input": int(stream_start_time),
|
|
63
|
+
"data.output": int(first_token_time),
|
|
64
|
+
"metadata": int(stream_closed_time or time.time_ns()),
|
|
65
|
+
},
|
|
66
|
+
output_text=accumulated_response,
|
|
67
|
+
usage=token_usage,
|
|
68
|
+
)
|
|
69
|
+
span_processor(ret_val)
|
|
70
|
+
|
|
71
|
+
patch_instance_method(response, "__iter__", new_iter)
|
|
72
|
+
|
|
73
|
+
if to_wrap and hasattr(response, "__aiter__"):
|
|
74
|
+
original_iter = response.__aiter__
|
|
75
|
+
|
|
76
|
+
async def new_aiter(self):
|
|
77
|
+
nonlocal waiting_for_first_token, first_token_time, stream_closed_time, accumulated_response, token_usage
|
|
78
|
+
|
|
79
|
+
async for item in original_iter():
|
|
80
|
+
try:
|
|
81
|
+
if (
|
|
82
|
+
item.choices
|
|
83
|
+
and item.choices[0].delta
|
|
84
|
+
and item.choices[0].delta.content
|
|
85
|
+
):
|
|
86
|
+
if waiting_for_first_token:
|
|
87
|
+
waiting_for_first_token = False
|
|
88
|
+
first_token_time = time.time_ns()
|
|
89
|
+
|
|
90
|
+
accumulated_response += item.choices[0].delta.content
|
|
91
|
+
# token_usage = item.usage
|
|
92
|
+
elif item.object == "chat.completion.chunk" and item.usage:
|
|
93
|
+
# Handle the case where the response is a chunk
|
|
94
|
+
token_usage = item.usage
|
|
95
|
+
stream_closed_time = time.time_ns()
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.warning(
|
|
99
|
+
"Warning: Error occurred while processing item in new_aiter: %s",
|
|
100
|
+
str(e),
|
|
101
|
+
)
|
|
102
|
+
finally:
|
|
103
|
+
accumulated_temp_list.append(item)
|
|
104
|
+
yield item
|
|
105
|
+
|
|
106
|
+
if span_processor:
|
|
107
|
+
ret_val = SimpleNamespace(
|
|
108
|
+
type="stream",
|
|
109
|
+
timestamps={
|
|
110
|
+
"data.input": int(stream_start_time),
|
|
111
|
+
"data.output": int(first_token_time),
|
|
112
|
+
"metadata": int(stream_closed_time or time.time_ns()),
|
|
113
|
+
},
|
|
114
|
+
output_text=accumulated_response,
|
|
115
|
+
usage=token_usage,
|
|
116
|
+
)
|
|
117
|
+
span_processor(ret_val)
|
|
118
|
+
|
|
119
|
+
patch_instance_method(response, "__aiter__", new_aiter)
|
|
120
|
+
|
|
5
121
|
|
|
6
122
|
INFERENCE = {
|
|
7
123
|
"type": "inference",
|
|
124
|
+
"is_auto_close": lambda kwargs: kwargs.get("stream", False) is False,
|
|
125
|
+
"response_processor": process_stream,
|
|
8
126
|
"attributes": [
|
|
9
127
|
[
|
|
10
128
|
{
|
|
11
129
|
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
12
130
|
"attribute": "type",
|
|
13
|
-
"accessor": lambda arguments:
|
|
131
|
+
"accessor": lambda arguments: "inference."
|
|
132
|
+
+ (_helper.get_inference_type(arguments["instance"]))
|
|
133
|
+
or "openai",
|
|
14
134
|
},
|
|
15
135
|
{
|
|
16
136
|
"attribute": "provider_name",
|
|
17
|
-
"accessor": lambda arguments: _helper.extract_provider_name(
|
|
137
|
+
"accessor": lambda arguments: _helper.extract_provider_name(
|
|
138
|
+
arguments["instance"]
|
|
139
|
+
),
|
|
18
140
|
},
|
|
19
141
|
{
|
|
20
142
|
"attribute": "deployment",
|
|
21
|
-
"accessor": lambda arguments: resolve_from_alias(
|
|
143
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
144
|
+
arguments["instance"].__dict__,
|
|
145
|
+
[
|
|
146
|
+
"engine",
|
|
147
|
+
"azure_deployment",
|
|
148
|
+
"deployment_name",
|
|
149
|
+
"deployment_id",
|
|
150
|
+
"deployment",
|
|
151
|
+
],
|
|
152
|
+
),
|
|
22
153
|
},
|
|
23
154
|
{
|
|
24
155
|
"attribute": "inference_endpoint",
|
|
25
|
-
"accessor": lambda arguments: resolve_from_alias(
|
|
26
|
-
|
|
156
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
157
|
+
arguments["instance"].__dict__,
|
|
158
|
+
["azure_endpoint", "api_base", "endpoint"],
|
|
159
|
+
)
|
|
160
|
+
or _helper.extract_inference_endpoint(arguments["instance"]),
|
|
161
|
+
},
|
|
27
162
|
],
|
|
28
163
|
[
|
|
29
164
|
{
|
|
30
165
|
"_comment": "LLM Model",
|
|
31
166
|
"attribute": "name",
|
|
32
|
-
"accessor": lambda arguments: resolve_from_alias(
|
|
167
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
168
|
+
arguments["kwargs"],
|
|
169
|
+
["model", "model_name", "endpoint_name", "deployment_name"],
|
|
170
|
+
),
|
|
33
171
|
},
|
|
34
172
|
{
|
|
35
173
|
"attribute": "type",
|
|
36
|
-
"accessor": lambda arguments:
|
|
37
|
-
|
|
38
|
-
|
|
174
|
+
"accessor": lambda arguments: "model.llm."
|
|
175
|
+
+ resolve_from_alias(
|
|
176
|
+
arguments["kwargs"],
|
|
177
|
+
["model", "model_name", "endpoint_name", "deployment_name"],
|
|
178
|
+
),
|
|
179
|
+
},
|
|
180
|
+
],
|
|
39
181
|
],
|
|
40
182
|
"events": [
|
|
41
|
-
{
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
183
|
+
{
|
|
184
|
+
"name": "data.input",
|
|
185
|
+
"attributes": [
|
|
186
|
+
{
|
|
187
|
+
"_comment": "this is instruction and user query to LLM",
|
|
188
|
+
"attribute": "input",
|
|
189
|
+
"accessor": lambda arguments: _helper.extract_messages(
|
|
190
|
+
arguments["kwargs"]
|
|
191
|
+
),
|
|
192
|
+
}
|
|
193
|
+
],
|
|
194
|
+
},
|
|
51
195
|
{
|
|
52
196
|
"name": "data.output",
|
|
53
197
|
"attributes": [
|
|
54
198
|
{
|
|
55
199
|
"_comment": "this is result from LLM",
|
|
56
200
|
"attribute": "response",
|
|
57
|
-
"accessor": lambda arguments: _helper.extract_assistant_message(
|
|
201
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(
|
|
202
|
+
arguments["result"]
|
|
203
|
+
),
|
|
58
204
|
}
|
|
59
|
-
]
|
|
205
|
+
],
|
|
60
206
|
},
|
|
61
207
|
{
|
|
62
208
|
"name": "metadata",
|
|
63
209
|
"attributes": [
|
|
64
210
|
{
|
|
65
211
|
"_comment": "this is metadata usage from LLM",
|
|
66
|
-
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
212
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
213
|
+
arguments["result"]
|
|
214
|
+
),
|
|
67
215
|
}
|
|
68
|
-
]
|
|
69
|
-
}
|
|
70
|
-
]
|
|
216
|
+
],
|
|
217
|
+
},
|
|
218
|
+
],
|
|
71
219
|
}
|
|
@@ -28,7 +28,6 @@ OPENAI_METHODS = [
|
|
|
28
28
|
"object": "Embeddings",
|
|
29
29
|
"method": "create",
|
|
30
30
|
"wrapper_method": task_wrapper,
|
|
31
|
-
"span_name": "openai_embeddings",
|
|
32
31
|
"span_handler": "non_framework_handler",
|
|
33
32
|
"output_processor": RETRIEVAL
|
|
34
33
|
},
|
|
@@ -37,7 +36,6 @@ OPENAI_METHODS = [
|
|
|
37
36
|
"object": "AsyncEmbeddings",
|
|
38
37
|
"method": "create",
|
|
39
38
|
"wrapper_method": atask_wrapper,
|
|
40
|
-
"span_name": "openai_embeddings",
|
|
41
39
|
"span_handler": "non_framework_handler",
|
|
42
40
|
"output_processor": RETRIEVAL
|
|
43
41
|
},
|
|
@@ -2,6 +2,36 @@ import os
|
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.requests import allowed_urls
|
|
3
3
|
from opentelemetry.propagate import inject
|
|
4
4
|
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
5
|
+
from monocle_apptrace.instrumentation.common.utils import add_monocle_trace_state
|
|
6
|
+
from urllib.parse import urlparse, ParseResult
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_route(kwargs):
|
|
10
|
+
url:str = kwargs['url']
|
|
11
|
+
parsed_url:ParseResult = urlparse(url)
|
|
12
|
+
return f"{parsed_url.netloc}{parsed_url.path}"
|
|
13
|
+
|
|
14
|
+
def get_method(kwargs) -> str:
|
|
15
|
+
return kwargs['method'] if 'method' in kwargs else 'GET'
|
|
16
|
+
|
|
17
|
+
def get_params(kwargs) -> dict:
|
|
18
|
+
url:str = kwargs['url']
|
|
19
|
+
parsed_url:ParseResult = urlparse(url)
|
|
20
|
+
return parsed_url.query
|
|
21
|
+
|
|
22
|
+
def get_headers(kwargs) -> dict:
|
|
23
|
+
return kwargs['headers'] if 'headers' in kwargs else {}
|
|
24
|
+
|
|
25
|
+
def get_body(kwargs) -> dict:
|
|
26
|
+
body = {}
|
|
27
|
+
return body
|
|
28
|
+
|
|
29
|
+
def extract_response(result) -> str:
|
|
30
|
+
return result.text if hasattr(result, 'text') else str(result)
|
|
31
|
+
|
|
32
|
+
def extract_status(result) -> str:
|
|
33
|
+
return f"{result.status_code}"
|
|
34
|
+
|
|
5
35
|
|
|
6
36
|
def request_pre_task_processor(kwargs):
|
|
7
37
|
# add traceparent to the request headers in kwargs
|
|
@@ -9,6 +39,7 @@ def request_pre_task_processor(kwargs):
|
|
|
9
39
|
headers = {}
|
|
10
40
|
else:
|
|
11
41
|
headers = kwargs['headers'].copy()
|
|
42
|
+
add_monocle_trace_state(headers)
|
|
12
43
|
inject(headers)
|
|
13
44
|
kwargs['headers'] = headers
|
|
14
45
|
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.requests import _helper
|
|
2
|
+
REQUEST_HTTP_PROCESSOR = {
|
|
3
|
+
"type": "http.send",
|
|
4
|
+
"attributes": [
|
|
5
|
+
[
|
|
6
|
+
{
|
|
7
|
+
"_comment": "request method, request URI",
|
|
8
|
+
"attribute": "method",
|
|
9
|
+
"accessor": lambda arguments: _helper.get_method(arguments['kwargs'])
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"_comment": "request method, request URI",
|
|
13
|
+
"attribute": "URL",
|
|
14
|
+
"accessor": lambda arguments: _helper.get_route(arguments['kwargs'])
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
]
|
|
18
|
+
],
|
|
19
|
+
"events": [
|
|
20
|
+
{"name": "data.input",
|
|
21
|
+
"attributes": [
|
|
22
|
+
{
|
|
23
|
+
"_comment": "route params",
|
|
24
|
+
"attribute": "http.params",
|
|
25
|
+
"accessor": lambda arguments: _helper.get_params(arguments['kwargs'])
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
"_comment": "route body",
|
|
29
|
+
"attribute": "body",
|
|
30
|
+
"accessor": lambda arguments: _helper.get_body(arguments['kwargs'])
|
|
31
|
+
},
|
|
32
|
+
|
|
33
|
+
]
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"name": "data.output",
|
|
37
|
+
"attributes": [
|
|
38
|
+
{
|
|
39
|
+
"_comment": "status from HTTP response",
|
|
40
|
+
"attribute": "status",
|
|
41
|
+
"accessor": lambda arguments: _helper.extract_status(arguments['result'])
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"_comment": "this is result from LLM",
|
|
45
|
+
"attribute": "response",
|
|
46
|
+
"accessor": lambda arguments: _helper.extract_response(arguments['result'])
|
|
47
|
+
}
|
|
48
|
+
]
|
|
49
|
+
}
|
|
50
|
+
]
|
|
51
|
+
}
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.requests.entities.http import REQUEST_HTTP_PROCESSOR
|
|
2
3
|
|
|
3
4
|
REQUESTS_METHODS = [
|
|
4
5
|
{
|
|
5
6
|
"package": "requests.sessions",
|
|
6
7
|
"object": "Session",
|
|
7
8
|
"method": "request",
|
|
8
|
-
"span_name": "http_requests",
|
|
9
9
|
"wrapper_method": task_wrapper,
|
|
10
10
|
"span_handler":"request_handler",
|
|
11
|
+
"output_processor": REQUEST_HTTP_PROCESSOR
|
|
11
12
|
}
|
|
12
13
|
]
|
|
@@ -1,4 +1,10 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
|
|
2
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
3
|
+
Option,
|
|
4
|
+
get_keys_as_tuple,
|
|
5
|
+
get_nested_value,
|
|
6
|
+
try_option,
|
|
7
|
+
)
|
|
2
8
|
def capture_input(arguments):
|
|
3
9
|
"""
|
|
4
10
|
Captures the input from Teams AI state.
|
|
@@ -55,4 +61,16 @@ def status_check(arguments):
|
|
|
55
61
|
if hasattr(arguments["result"], "error") and arguments["result"].error is not None:
|
|
56
62
|
error_msg:str = arguments["result"].error
|
|
57
63
|
error_code:str = arguments["result"].status if hasattr(arguments["result"], "status") else "unknown"
|
|
58
|
-
raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
|
|
64
|
+
raise MonocleSpanException(f"Error: {error_code} - {error_msg}")
|
|
65
|
+
|
|
66
|
+
def extract_provider_name(instance):
|
|
67
|
+
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
68
|
+
return provider_url.unwrap_or(None)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def extract_inference_endpoint(instance):
|
|
72
|
+
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
73
|
+
if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
|
|
74
|
+
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
75
|
+
|
|
76
|
+
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.teamsai import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_llm_type
|
|
4
5
|
TEAMAI_OUTPUT_PROCESSOR = {
|
|
5
6
|
"type": "inference",
|
|
6
7
|
"attributes": [
|
|
@@ -8,15 +9,15 @@ TEAMAI_OUTPUT_PROCESSOR = {
|
|
|
8
9
|
{
|
|
9
10
|
"_comment": "provider type, name, deployment",
|
|
10
11
|
"attribute": "type",
|
|
11
|
-
"accessor": lambda arguments:
|
|
12
|
+
"accessor": lambda arguments: 'inference.' + (get_llm_type(arguments['instance']._client) or 'generic')
|
|
12
13
|
},
|
|
13
14
|
{
|
|
14
15
|
"attribute": "provider_name",
|
|
15
|
-
"accessor": lambda arguments:
|
|
16
|
+
"accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
|
|
16
17
|
},
|
|
17
18
|
{
|
|
18
|
-
"attribute": "
|
|
19
|
-
"accessor": lambda arguments:
|
|
19
|
+
"attribute": "inference_endpoint",
|
|
20
|
+
"accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
|
|
20
21
|
}
|
|
21
22
|
],
|
|
22
23
|
[
|
|
@@ -25,6 +26,11 @@ TEAMAI_OUTPUT_PROCESSOR = {
|
|
|
25
26
|
"attribute": "name",
|
|
26
27
|
"accessor": lambda arguments: arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
|
|
27
28
|
},
|
|
29
|
+
{
|
|
30
|
+
"_comment": "LLM Model",
|
|
31
|
+
"attribute": "type",
|
|
32
|
+
"accessor": lambda arguments: 'model.llm.'+ arguments["instance"]._options.default_model if hasattr(arguments["instance"], "_options") else "unknown"
|
|
33
|
+
},
|
|
28
34
|
{
|
|
29
35
|
"attribute": "is_streaming",
|
|
30
36
|
"accessor": lambda arguments: arguments["instance"]._options.stream if hasattr(arguments["instance"], "_options") else False
|
|
@@ -52,19 +58,19 @@ TEAMAI_OUTPUT_PROCESSOR = {
|
|
|
52
58
|
}
|
|
53
59
|
]
|
|
54
60
|
},
|
|
55
|
-
{
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
}
|
|
61
|
+
# {
|
|
62
|
+
# "name": "metadata",
|
|
63
|
+
# "attributes": [
|
|
64
|
+
# {
|
|
65
|
+
# "_comment": "metadata from Teams AI response",
|
|
66
|
+
# "accessor": lambda arguments: {
|
|
67
|
+
# "prompt_tokens": arguments["result"].get("usage", {}).get("prompt_tokens", 0),
|
|
68
|
+
# "completion_tokens": arguments["result"].get("usage", {}).get("completion_tokens", 0),
|
|
69
|
+
# "total_tokens": arguments["result"].get("usage", {}).get("total_tokens", 0),
|
|
70
|
+
# "latency_ms": arguments.get("latency_ms")
|
|
71
|
+
# }
|
|
72
|
+
# }
|
|
73
|
+
# ]
|
|
74
|
+
# }
|
|
69
75
|
]
|
|
70
76
|
}
|
|
@@ -1,4 +1,9 @@
|
|
|
1
|
-
from monocle_apptrace.instrumentation.common.wrapper import
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import (
|
|
2
|
+
ascopes_wrapper,
|
|
3
|
+
atask_wrapper,
|
|
4
|
+
task_wrapper,
|
|
5
|
+
ascope_wrapper,
|
|
6
|
+
)
|
|
2
7
|
from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.teamsai_output_processor import (
|
|
3
8
|
TEAMAI_OUTPUT_PROCESSOR,
|
|
4
9
|
)
|
|
@@ -6,21 +11,50 @@ from monocle_apptrace.instrumentation.metamodel.teamsai.entities.inference.actio
|
|
|
6
11
|
ACTIONPLANNER_OUTPUT_PROCESSOR,
|
|
7
12
|
)
|
|
8
13
|
|
|
9
|
-
|
|
14
|
+
|
|
15
|
+
def get_id(args, kwargs):
|
|
16
|
+
"""
|
|
17
|
+
Extracts the ID from the context.
|
|
18
|
+
"""
|
|
19
|
+
scopes: dict[str, dict[str:str]] = {}
|
|
20
|
+
context = kwargs.get("context")
|
|
21
|
+
if context and context.activity and context.activity.conversation.id:
|
|
22
|
+
conversation_id = context.activity.conversation.id or ""
|
|
23
|
+
user_aad_object_id = context.activity.from_property.aad_object_id or ""
|
|
24
|
+
user_teams_id = context.activity.from_property.id or ""
|
|
25
|
+
channel_id = context.activity.channel_id or ""
|
|
26
|
+
recipient_id = context.activity.recipient.id or ""
|
|
27
|
+
recipient_aad_object_id = context.activity.recipient.aad_object_id or ""
|
|
28
|
+
scopes[f"teams.conversation.conversation.id"] = conversation_id
|
|
29
|
+
scopes[f"teams.user.from_property.aad_object_id"] = user_aad_object_id
|
|
30
|
+
scopes[f"teams.user.from_property.id"] = user_teams_id
|
|
31
|
+
scopes[f"teams.channel.channel_id"] = channel_id
|
|
32
|
+
scopes[f"teams.channel.recipient.id"] = recipient_id
|
|
33
|
+
scopes[f"teams.channel.recipient.aad_object_id"] = recipient_aad_object_id
|
|
34
|
+
|
|
35
|
+
return scopes
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
TEAMAI_METHODS = [
|
|
10
39
|
{
|
|
11
40
|
"package": "teams.ai.models.openai_model",
|
|
12
41
|
"object": "OpenAIModel",
|
|
13
42
|
"method": "complete_prompt",
|
|
14
|
-
"span_name": "teamsai.workflow",
|
|
15
43
|
"wrapper_method": atask_wrapper,
|
|
16
|
-
"output_processor": TEAMAI_OUTPUT_PROCESSOR
|
|
44
|
+
"output_processor": TEAMAI_OUTPUT_PROCESSOR,
|
|
17
45
|
},
|
|
18
46
|
{
|
|
19
47
|
"package": "teams.ai.planners.action_planner",
|
|
20
48
|
"object": "ActionPlanner",
|
|
21
49
|
"method": "complete_prompt",
|
|
22
|
-
"span_name": "teamsai.workflow",
|
|
23
50
|
"wrapper_method": atask_wrapper,
|
|
24
|
-
"output_processor": ACTIONPLANNER_OUTPUT_PROCESSOR
|
|
25
|
-
}
|
|
26
|
-
|
|
51
|
+
"output_processor": ACTIONPLANNER_OUTPUT_PROCESSOR,
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"package": "teams.ai.planners.action_planner",
|
|
55
|
+
"object": "ActionPlanner",
|
|
56
|
+
"method": "complete_prompt",
|
|
57
|
+
"scope_values": get_id,
|
|
58
|
+
"wrapper_method": ascopes_wrapper,
|
|
59
|
+
},
|
|
60
|
+
]
|