monocle-apptrace 0.3.1b1__py3-none-any.whl → 0.4.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/exporters/aws/s3_exporter.py +3 -1
- monocle_apptrace/exporters/azure/blob_exporter.py +2 -2
- monocle_apptrace/exporters/base_exporter.py +10 -4
- monocle_apptrace/exporters/file_exporter.py +19 -4
- monocle_apptrace/exporters/monocle_exporters.py +8 -5
- monocle_apptrace/exporters/okahu/okahu_exporter.py +5 -2
- monocle_apptrace/instrumentation/common/__init__.py +1 -1
- monocle_apptrace/instrumentation/common/constants.py +8 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +44 -22
- monocle_apptrace/instrumentation/common/span_handler.py +67 -41
- monocle_apptrace/instrumentation/common/tracing.md +68 -0
- monocle_apptrace/instrumentation/common/utils.py +86 -63
- monocle_apptrace/instrumentation/common/wrapper.py +185 -46
- monocle_apptrace/instrumentation/common/wrapper_method.py +12 -6
- monocle_apptrace/instrumentation/metamodel/aiohttp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +66 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/methods.py +13 -0
- monocle_apptrace/instrumentation/metamodel/anthropic/methods.py +4 -2
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +50 -3
- monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +48 -0
- monocle_apptrace/instrumentation/metamodel/flask/methods.py +10 -1
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +17 -4
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +4 -1
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +8 -4
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +12 -4
- monocle_apptrace/instrumentation/metamodel/langchain/methods.py +6 -14
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +13 -9
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +16 -15
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +174 -26
- monocle_apptrace/instrumentation/metamodel/openai/methods.py +0 -2
- monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
- monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/requests/methods.py +2 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +19 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +24 -18
- monocle_apptrace/instrumentation/metamodel/teamsai/methods.py +42 -8
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/METADATA +1 -1
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/RECORD +43 -36
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.3.1b1.dist-info → monocle_apptrace-0.4.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from threading import local
|
|
3
|
+
from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes, try_option, Option, MonocleSpanException
|
|
4
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
5
|
+
from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
|
|
6
|
+
from urllib.parse import unquote
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
MAX_DATA_LENGTH = 1000
|
|
10
|
+
token_data = local()
|
|
11
|
+
token_data.current_token = None
|
|
12
|
+
|
|
13
|
+
def get_route(args) -> str:
|
|
14
|
+
route_path: Option[str] = try_option(getattr, args[0], 'path')
|
|
15
|
+
return route_path.unwrap_or("")
|
|
16
|
+
|
|
17
|
+
def get_method(args) -> str:
|
|
18
|
+
# return args[0]['method'] if 'method' in args[0] else ""
|
|
19
|
+
http_method: Option[str] = try_option(getattr, args[0], 'method')
|
|
20
|
+
return http_method.unwrap_or("")
|
|
21
|
+
|
|
22
|
+
def get_params(args) -> dict:
|
|
23
|
+
params: Option[str] = try_option(getattr, args[0], 'query_string')
|
|
24
|
+
return unquote(params.unwrap_or(""))
|
|
25
|
+
|
|
26
|
+
def get_body(args) -> dict:
|
|
27
|
+
return ""
|
|
28
|
+
|
|
29
|
+
def extract_response(result) -> str:
|
|
30
|
+
if hasattr(result, 'text'):
|
|
31
|
+
response = result.text[0:max(result.text.__len__(), MAX_DATA_LENGTH)]
|
|
32
|
+
else:
|
|
33
|
+
response = ""
|
|
34
|
+
return response
|
|
35
|
+
|
|
36
|
+
def extract_status(result) -> str:
|
|
37
|
+
status = f"{result.status}" if hasattr(result, 'status') else ""
|
|
38
|
+
if status not in HTTP_SUCCESS_CODES:
|
|
39
|
+
error_message = extract_response(result)
|
|
40
|
+
raise MonocleSpanException(f"error: {status} - {error_message}")
|
|
41
|
+
return status
|
|
42
|
+
|
|
43
|
+
def aiohttp_pre_tracing(args):
|
|
44
|
+
token_data.current_token = extract_http_headers(args[0].headers)
|
|
45
|
+
|
|
46
|
+
def aiohttp_post_tracing():
|
|
47
|
+
clear_http_scopes(token_data.current_token)
|
|
48
|
+
token_data.current_token = None
|
|
49
|
+
|
|
50
|
+
def aiohttp_skip_span(args) -> bool:
|
|
51
|
+
if get_method(args) == "HEAD":
|
|
52
|
+
return True
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
class aiohttpSpanHandler(SpanHandler):
|
|
56
|
+
|
|
57
|
+
def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
|
|
58
|
+
aiohttp_pre_tracing(args)
|
|
59
|
+
return super().pre_tracing(to_wrap, wrapped, instance, args, kwargs)
|
|
60
|
+
|
|
61
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
62
|
+
aiohttp_post_tracing()
|
|
63
|
+
return super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
|
|
64
|
+
|
|
65
|
+
def skip_span(self, to_wrap, wrapped, instance, args, kwargs) -> bool:
|
|
66
|
+
return aiohttp_skip_span(args)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.aiohttp import _helper
|
|
2
|
+
AIO_HTTP_PROCESSOR = {
|
|
3
|
+
"type": "http.process",
|
|
4
|
+
"attributes": [
|
|
5
|
+
[
|
|
6
|
+
{
|
|
7
|
+
"_comment": "request method, request URI",
|
|
8
|
+
"attribute": "method",
|
|
9
|
+
"accessor": lambda arguments: _helper.get_method(arguments['args'])
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"_comment": "request method, request URI",
|
|
13
|
+
"attribute": "route",
|
|
14
|
+
"accessor": lambda arguments: _helper.get_route(arguments['args'])
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"_comment": "request method, request URI",
|
|
18
|
+
"attribute": "body",
|
|
19
|
+
"accessor": lambda arguments: _helper.get_body(arguments['args'])
|
|
20
|
+
},
|
|
21
|
+
]
|
|
22
|
+
],
|
|
23
|
+
"events": [
|
|
24
|
+
{
|
|
25
|
+
"name": "data.input",
|
|
26
|
+
"attributes": [
|
|
27
|
+
{
|
|
28
|
+
"_comment": "route params",
|
|
29
|
+
"attribute": "params",
|
|
30
|
+
"accessor": lambda arguments: _helper.get_params(arguments['args'])
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"name": "data.output",
|
|
36
|
+
"attributes": [
|
|
37
|
+
{
|
|
38
|
+
"_comment": "status from HTTP response",
|
|
39
|
+
"attribute": "status",
|
|
40
|
+
"accessor": lambda arguments: _helper.extract_status(arguments['result'])
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"_comment": "this is result from LLM",
|
|
44
|
+
"attribute": "response",
|
|
45
|
+
"accessor": lambda arguments: _helper.extract_response(arguments['result'])
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
]
|
|
51
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.aiohttp.entities.http import AIO_HTTP_PROCESSOR
|
|
3
|
+
|
|
4
|
+
AIOHTTP_METHODS = [
|
|
5
|
+
{
|
|
6
|
+
"package": "aiohttp.web_app",
|
|
7
|
+
"object": "Application",
|
|
8
|
+
"method": "_handle",
|
|
9
|
+
"wrapper_method": atask_wrapper,
|
|
10
|
+
"span_handler": "aiohttp_handler",
|
|
11
|
+
"output_processor": AIO_HTTP_PROCESSOR
|
|
12
|
+
}
|
|
13
|
+
]
|
|
@@ -5,17 +5,19 @@ from monocle_apptrace.instrumentation.metamodel.anthropic.entities.inference imp
|
|
|
5
5
|
|
|
6
6
|
ANTHROPIC_METHODS = [
|
|
7
7
|
{
|
|
8
|
-
"package": "anthropic.resources
|
|
8
|
+
"package": "anthropic.resources",
|
|
9
9
|
"object": "Messages",
|
|
10
10
|
"method": "create",
|
|
11
11
|
"wrapper_method": task_wrapper,
|
|
12
|
+
"span_handler": "non_framework_handler",
|
|
12
13
|
"output_processor": INFERENCE
|
|
13
14
|
},
|
|
14
15
|
{
|
|
15
|
-
"package": "anthropic.resources
|
|
16
|
+
"package": "anthropic.resources",
|
|
16
17
|
"object": "AsyncMessages",
|
|
17
18
|
"method": "create",
|
|
18
19
|
"wrapper_method": atask_wrapper,
|
|
20
|
+
"span_handler": "non_framework_handler",
|
|
19
21
|
"output_processor": INFERENCE
|
|
20
22
|
},
|
|
21
23
|
|
|
@@ -1,11 +1,46 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
from threading import local
|
|
2
3
|
from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes
|
|
3
|
-
from opentelemetry.propagate import extract
|
|
4
|
-
from opentelemetry.context import Context, attach, detach
|
|
5
4
|
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
5
|
+
from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
|
|
6
|
+
from monocle_apptrace.instrumentation.common.utils import MonocleSpanException
|
|
7
|
+
from urllib.parse import unquote
|
|
8
|
+
from opentelemetry.context import get_current
|
|
9
|
+
from opentelemetry.trace import Span, get_current_span
|
|
10
|
+
from opentelemetry.trace.propagation import _SPAN_KEY
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
MAX_DATA_LENGTH = 1000
|
|
6
14
|
token_data = local()
|
|
7
15
|
token_data.current_token = None
|
|
8
16
|
|
|
17
|
+
def get_route(args) -> str:
|
|
18
|
+
return args[0]['PATH_INFO'] if 'PATH_INFO' in args[0] else ""
|
|
19
|
+
|
|
20
|
+
def get_method(args) -> str:
|
|
21
|
+
return args[0]['REQUEST_METHOD'] if 'REQUEST_METHOD' in args[0] else ""
|
|
22
|
+
|
|
23
|
+
def get_params(args) -> dict:
|
|
24
|
+
params = args[0]['QUERY_STRING'] if 'QUERY_STRING' in args[0] else ""
|
|
25
|
+
return unquote(params)
|
|
26
|
+
|
|
27
|
+
def get_body(args) -> dict:
|
|
28
|
+
return ""
|
|
29
|
+
|
|
30
|
+
def extract_response(instance) -> str:
|
|
31
|
+
if hasattr(instance, 'data') and hasattr(instance, 'content_length'):
|
|
32
|
+
response = instance.data[0:max(instance.content_length, MAX_DATA_LENGTH)]
|
|
33
|
+
else:
|
|
34
|
+
response = ""
|
|
35
|
+
return response
|
|
36
|
+
|
|
37
|
+
def extract_status(instance) -> str:
|
|
38
|
+
status = f"{instance.status_code}" if hasattr(instance, 'status_code') else ""
|
|
39
|
+
if status not in HTTP_SUCCESS_CODES:
|
|
40
|
+
error_message = extract_response(instance)
|
|
41
|
+
raise MonocleSpanException(f"error: {status} - {error_message}")
|
|
42
|
+
return status
|
|
43
|
+
|
|
9
44
|
def flask_pre_tracing(args):
|
|
10
45
|
headers = dict()
|
|
11
46
|
for key, value in args[0].items():
|
|
@@ -26,4 +61,16 @@ class FlaskSpanHandler(SpanHandler):
|
|
|
26
61
|
|
|
27
62
|
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
28
63
|
flask_post_tracing()
|
|
29
|
-
return super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
|
|
64
|
+
return super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
|
|
65
|
+
|
|
66
|
+
class FlaskResponseSpanHandler(SpanHandler):
|
|
67
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
68
|
+
try:
|
|
69
|
+
_parent_span_context = get_current()
|
|
70
|
+
if _parent_span_context is not None:
|
|
71
|
+
parent_span: Span = _parent_span_context.get(_SPAN_KEY, None)
|
|
72
|
+
if parent_span is not None:
|
|
73
|
+
self.hydrate_events(to_wrap, wrapped, instance, args, kwargs, return_value, parent_span)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.info(f"Failed to propogate flask response: {e}")
|
|
76
|
+
super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.flask import _helper
|
|
2
|
+
FLASK_HTTP_PROCESSOR = {
|
|
3
|
+
"type": "http.process",
|
|
4
|
+
"attributes": [
|
|
5
|
+
[
|
|
6
|
+
{
|
|
7
|
+
"_comment": "request method, request URI",
|
|
8
|
+
"attribute": "method",
|
|
9
|
+
"accessor": lambda arguments: _helper.get_method(arguments['args'])
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"_comment": "request method, request URI",
|
|
13
|
+
"attribute": "route",
|
|
14
|
+
"accessor": lambda arguments: _helper.get_route(arguments['args'])
|
|
15
|
+
},
|
|
16
|
+
]
|
|
17
|
+
]
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
FLASK_RESPONSE_PROCESSOR = {
|
|
21
|
+
"events": [
|
|
22
|
+
{
|
|
23
|
+
"name": "data.input",
|
|
24
|
+
"attributes": [
|
|
25
|
+
{
|
|
26
|
+
"_comment": "route params",
|
|
27
|
+
"attribute": "params",
|
|
28
|
+
"accessor": lambda arguments: _helper.get_params(arguments['args'])
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "data.output",
|
|
34
|
+
"attributes": [
|
|
35
|
+
{
|
|
36
|
+
"_comment": "status from HTTP response",
|
|
37
|
+
"attribute": "status",
|
|
38
|
+
"accessor": lambda arguments: _helper.extract_status(arguments['instance'])
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
"_comment": "this is result from LLM",
|
|
42
|
+
"attribute": "response",
|
|
43
|
+
"accessor": lambda arguments: _helper.extract_response(arguments['instance'])
|
|
44
|
+
}
|
|
45
|
+
]
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
}
|
|
@@ -1,13 +1,22 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.flask.entities.http import FLASK_HTTP_PROCESSOR, FLASK_RESPONSE_PROCESSOR
|
|
2
3
|
|
|
3
4
|
FLASK_METHODS = [
|
|
4
5
|
{
|
|
5
6
|
"package": "flask.app",
|
|
6
7
|
"object": "Flask",
|
|
7
8
|
"method": "wsgi_app",
|
|
8
|
-
"span_name": "Flask.wsgi_app",
|
|
9
9
|
"wrapper_method": task_wrapper,
|
|
10
10
|
"span_handler": "flask_handler",
|
|
11
|
+
"output_processor": FLASK_HTTP_PROCESSOR,
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"package": "werkzeug.wrappers.response",
|
|
15
|
+
"object": "Response",
|
|
16
|
+
"method": "__call__",
|
|
17
|
+
"wrapper_method": task_wrapper,
|
|
18
|
+
"span_handler": "flask_response_handler",
|
|
19
|
+
"output_processor": FLASK_RESPONSE_PROCESSOR,
|
|
11
20
|
"skip_span": True
|
|
12
21
|
}
|
|
13
22
|
]
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
|
|
2
3
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
3
4
|
Option,
|
|
4
5
|
get_keys_as_tuple,
|
|
@@ -11,13 +12,19 @@ logger = logging.getLogger(__name__)
|
|
|
11
12
|
def extract_messages(kwargs):
|
|
12
13
|
try:
|
|
13
14
|
messages = []
|
|
15
|
+
system_message, user_message = None,None
|
|
14
16
|
if isinstance(kwargs, dict):
|
|
15
17
|
if 'system_prompt' in kwargs and kwargs['system_prompt']:
|
|
16
18
|
system_message = kwargs['system_prompt']
|
|
17
|
-
messages.append({"system" : system_message})
|
|
18
19
|
if 'prompt' in kwargs and kwargs['prompt']:
|
|
19
20
|
user_message = extract_question_from_prompt(kwargs['prompt'])
|
|
21
|
+
if 'messages' in kwargs and len(kwargs['messages'])>1:
|
|
22
|
+
system_message = kwargs['messages'][0].text
|
|
23
|
+
user_message = kwargs['messages'][1].text
|
|
24
|
+
if system_message and user_message:
|
|
25
|
+
messages.append({"system": system_message})
|
|
20
26
|
messages.append({"user": user_message})
|
|
27
|
+
|
|
21
28
|
return [str(message) for message in messages]
|
|
22
29
|
except Exception as e:
|
|
23
30
|
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
@@ -52,6 +59,8 @@ def extract_assistant_message(response):
|
|
|
52
59
|
reply = response["replies"][0]
|
|
53
60
|
if hasattr(reply, 'content'):
|
|
54
61
|
return [reply.content]
|
|
62
|
+
if hasattr(reply, 'text'):
|
|
63
|
+
return [reply.text]
|
|
55
64
|
return [reply]
|
|
56
65
|
except Exception as e:
|
|
57
66
|
logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
|
|
@@ -108,15 +117,19 @@ def extract_embeding_model(instance):
|
|
|
108
117
|
|
|
109
118
|
def update_span_from_llm_response(response, instance):
|
|
110
119
|
meta_dict = {}
|
|
111
|
-
|
|
112
|
-
|
|
120
|
+
token_usage = None
|
|
121
|
+
if response is not None and isinstance(response, dict):
|
|
122
|
+
if "meta" in response:
|
|
123
|
+
token_usage = response["meta"][0]["usage"]
|
|
124
|
+
if "replies" in response:
|
|
125
|
+
token_usage = response["replies"][0].meta["usage"]
|
|
113
126
|
if token_usage is not None:
|
|
114
127
|
temperature = instance.__dict__.get("temperature", None)
|
|
115
128
|
meta_dict.update({"temperature": temperature})
|
|
116
129
|
meta_dict.update(
|
|
117
130
|
{"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
|
|
118
131
|
meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
|
|
119
|
-
meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
|
|
132
|
+
meta_dict.update({"total_tokens": token_usage.get("total_tokens") or token_usage.get("completion_tokens")+token_usage.get("prompt_tokens")})
|
|
120
133
|
return meta_dict
|
|
121
134
|
|
|
122
135
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.haystack import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_llm_type
|
|
4
5
|
|
|
5
6
|
INFERENCE = {
|
|
6
7
|
"type": "inference",
|
|
@@ -9,7 +10,9 @@ INFERENCE = {
|
|
|
9
10
|
{
|
|
10
11
|
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
11
12
|
"attribute": "type",
|
|
12
|
-
"accessor": lambda arguments: 'inference.azure_openai'
|
|
13
|
+
# "accessor": lambda arguments: 'inference.azure_openai'
|
|
14
|
+
"accessor": lambda arguments: 'inference.' + (get_llm_type(arguments['instance']) or 'generic')
|
|
15
|
+
|
|
13
16
|
},
|
|
14
17
|
{
|
|
15
18
|
"attribute": "provider_name",
|
|
@@ -7,7 +7,6 @@ HAYSTACK_METHODS = [
|
|
|
7
7
|
"package": "haystack.components.retrievers.in_memory",
|
|
8
8
|
"object": "InMemoryEmbeddingRetriever",
|
|
9
9
|
"method": "run",
|
|
10
|
-
"span_name": "haystack.retriever",
|
|
11
10
|
"wrapper_method": task_wrapper,
|
|
12
11
|
"output_processor": RETRIEVAL
|
|
13
12
|
},
|
|
@@ -15,7 +14,6 @@ HAYSTACK_METHODS = [
|
|
|
15
14
|
"package": "haystack_integrations.components.retrievers.opensearch",
|
|
16
15
|
"object": "OpenSearchEmbeddingRetriever",
|
|
17
16
|
"method": "run",
|
|
18
|
-
"span_name": "haystack.retriever",
|
|
19
17
|
"wrapper_method": task_wrapper,
|
|
20
18
|
"output_processor": RETRIEVAL
|
|
21
19
|
},
|
|
@@ -37,7 +35,13 @@ HAYSTACK_METHODS = [
|
|
|
37
35
|
"package": "haystack.core.pipeline.pipeline",
|
|
38
36
|
"object": "Pipeline",
|
|
39
37
|
"method": "run",
|
|
38
|
+
"wrapper_method": task_wrapper
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
"package": "haystack_integrations.components.generators.anthropic",
|
|
42
|
+
"object": "AnthropicChatGenerator",
|
|
43
|
+
"method": "run",
|
|
40
44
|
"wrapper_method": task_wrapper,
|
|
41
|
-
"
|
|
42
|
-
}
|
|
45
|
+
"output_processor": INFERENCE
|
|
46
|
+
},
|
|
43
47
|
]
|
|
@@ -50,14 +50,22 @@ def extract_assistant_message(response):
|
|
|
50
50
|
|
|
51
51
|
|
|
52
52
|
def extract_provider_name(instance):
|
|
53
|
-
provider_url: Option[str] =
|
|
53
|
+
provider_url: Option[str] = None
|
|
54
|
+
if hasattr(instance,'client'):
|
|
55
|
+
provider_url: Option[str] = try_option(getattr, instance.client._client.base_url, 'host')
|
|
56
|
+
if hasattr(instance, '_client'):
|
|
57
|
+
provider_url = try_option(getattr, instance._client.base_url, 'host')
|
|
54
58
|
return provider_url.unwrap_or(None)
|
|
55
59
|
|
|
56
60
|
|
|
57
61
|
def extract_inference_endpoint(instance):
|
|
58
|
-
inference_endpoint: Option[str] =
|
|
59
|
-
if
|
|
60
|
-
inference_endpoint = try_option(getattr, instance.client.
|
|
62
|
+
inference_endpoint: Option[str] = None
|
|
63
|
+
if hasattr(instance,'client'):
|
|
64
|
+
inference_endpoint: Option[str] = try_option(getattr, instance.client._client, 'base_url').map(str)
|
|
65
|
+
if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
|
|
66
|
+
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
67
|
+
if hasattr(instance,'_client'):
|
|
68
|
+
inference_endpoint = try_option(getattr, instance._client, 'base_url').map(str)
|
|
61
69
|
|
|
62
70
|
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
63
71
|
|
|
@@ -11,15 +11,13 @@ LANGCHAIN_METHODS = [
|
|
|
11
11
|
"package": "langchain.prompts.base",
|
|
12
12
|
"object": "BasePromptTemplate",
|
|
13
13
|
"method": "invoke",
|
|
14
|
-
"wrapper_method": task_wrapper
|
|
15
|
-
"span_type": "workflow"
|
|
14
|
+
"wrapper_method": task_wrapper
|
|
16
15
|
},
|
|
17
16
|
{
|
|
18
17
|
"package": "langchain.prompts.base",
|
|
19
18
|
"object": "BasePromptTemplate",
|
|
20
19
|
"method": "ainvoke",
|
|
21
|
-
"wrapper_method": atask_wrapper
|
|
22
|
-
"span_type": "workflow"
|
|
20
|
+
"wrapper_method": atask_wrapper
|
|
23
21
|
},
|
|
24
22
|
{
|
|
25
23
|
"package": "langchain.chat_models.base",
|
|
@@ -82,30 +80,24 @@ LANGCHAIN_METHODS = [
|
|
|
82
80
|
"package": "langchain.schema",
|
|
83
81
|
"object": "BaseOutputParser",
|
|
84
82
|
"method": "invoke",
|
|
85
|
-
"wrapper_method": task_wrapper
|
|
86
|
-
"span_type": "workflow"
|
|
83
|
+
"wrapper_method": task_wrapper
|
|
87
84
|
},
|
|
88
85
|
{
|
|
89
86
|
"package": "langchain.schema",
|
|
90
87
|
"object": "BaseOutputParser",
|
|
91
88
|
"method": "ainvoke",
|
|
92
|
-
"wrapper_method": atask_wrapper
|
|
93
|
-
"span_type": "workflow"
|
|
89
|
+
"wrapper_method": atask_wrapper
|
|
94
90
|
},
|
|
95
91
|
{
|
|
96
92
|
"package": "langchain.schema.runnable",
|
|
97
93
|
"object": "RunnableSequence",
|
|
98
94
|
"method": "invoke",
|
|
99
|
-
"
|
|
100
|
-
"wrapper_method": task_wrapper,
|
|
101
|
-
"span_type": "workflow"
|
|
95
|
+
"wrapper_method": task_wrapper
|
|
102
96
|
},
|
|
103
97
|
{
|
|
104
98
|
"package": "langchain.schema.runnable",
|
|
105
99
|
"object": "RunnableSequence",
|
|
106
100
|
"method": "ainvoke",
|
|
107
|
-
"
|
|
108
|
-
"wrapper_method": atask_wrapper,
|
|
109
|
-
"span_type": "workflow"
|
|
101
|
+
"wrapper_method": atask_wrapper
|
|
110
102
|
}
|
|
111
103
|
]
|
|
@@ -96,12 +96,19 @@ def extract_query_from_content(content):
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
def extract_provider_name(instance):
|
|
99
|
-
|
|
100
|
-
|
|
99
|
+
if hasattr(instance,'api_base'):
|
|
100
|
+
provider_url: Option[str]= try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
|
|
101
|
+
if hasattr(instance,'_client'):
|
|
102
|
+
provider_url:Option[str] = try_option(getattr, instance._client.base_url,'host')
|
|
103
|
+
return provider_url.unwrap_or(None)
|
|
101
104
|
|
|
102
105
|
|
|
103
106
|
def extract_inference_endpoint(instance):
|
|
104
|
-
|
|
107
|
+
if hasattr(instance,'_client'):
|
|
108
|
+
if hasattr(instance._client,'sdk_configuration'):
|
|
109
|
+
inference_endpoint: Option[str] = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
|
|
110
|
+
if hasattr(instance._client,'base_url'):
|
|
111
|
+
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
105
112
|
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
106
113
|
|
|
107
114
|
|
|
@@ -163,10 +170,7 @@ def update_span_from_llm_response(response, instance):
|
|
|
163
170
|
if token_usage is not None:
|
|
164
171
|
temperature = instance.__dict__.get("temperature", None)
|
|
165
172
|
meta_dict.update({"temperature": temperature})
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens")})
|
|
170
|
-
if getattr(token_usage, "total_tokens", None):
|
|
171
|
-
meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens")})
|
|
173
|
+
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
|
|
174
|
+
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None)})
|
|
175
|
+
meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens",None) or getattr(token_usage,"output_tokens",None)+getattr(token_usage,"input_tokens",None)})
|
|
172
176
|
return meta_dict
|
|
@@ -13,7 +13,6 @@ LLAMAINDEX_METHODS = [
|
|
|
13
13
|
"package": "llama_index.core.indices.base_retriever",
|
|
14
14
|
"object": "BaseRetriever",
|
|
15
15
|
"method": "retrieve",
|
|
16
|
-
"span_name": "llamaindex.retrieve",
|
|
17
16
|
"wrapper_method": task_wrapper,
|
|
18
17
|
"output_processor": RETRIEVAL
|
|
19
18
|
},
|
|
@@ -21,7 +20,6 @@ LLAMAINDEX_METHODS = [
|
|
|
21
20
|
"package": "llama_index.core.indices.base_retriever",
|
|
22
21
|
"object": "BaseRetriever",
|
|
23
22
|
"method": "aretrieve",
|
|
24
|
-
"span_name": "llamaindex.retrieve",
|
|
25
23
|
"wrapper_method": atask_wrapper,
|
|
26
24
|
"output_processor": RETRIEVAL
|
|
27
25
|
},
|
|
@@ -29,23 +27,18 @@ LLAMAINDEX_METHODS = [
|
|
|
29
27
|
"package": "llama_index.core.base.base_query_engine",
|
|
30
28
|
"object": "BaseQueryEngine",
|
|
31
29
|
"method": "query",
|
|
32
|
-
"
|
|
33
|
-
"wrapper_method": task_wrapper,
|
|
34
|
-
"span_type": "workflow"
|
|
30
|
+
"wrapper_method": task_wrapper
|
|
35
31
|
},
|
|
36
32
|
{
|
|
37
33
|
"package": "llama_index.core.base.base_query_engine",
|
|
38
34
|
"object": "BaseQueryEngine",
|
|
39
35
|
"method": "aquery",
|
|
40
|
-
"
|
|
41
|
-
"wrapper_method": atask_wrapper,
|
|
42
|
-
"span_type": "workflow"
|
|
36
|
+
"wrapper_method": atask_wrapper
|
|
43
37
|
},
|
|
44
38
|
{
|
|
45
39
|
"package": "llama_index.core.llms.custom",
|
|
46
40
|
"object": "CustomLLM",
|
|
47
41
|
"method": "chat",
|
|
48
|
-
"span_name": "llamaindex.llmchat",
|
|
49
42
|
"wrapper_method": task_wrapper,
|
|
50
43
|
"output_processor": INFERENCE
|
|
51
44
|
},
|
|
@@ -53,7 +46,6 @@ LLAMAINDEX_METHODS = [
|
|
|
53
46
|
"package": "llama_index.core.llms.custom",
|
|
54
47
|
"object": "CustomLLM",
|
|
55
48
|
"method": "achat",
|
|
56
|
-
"span_name": "llamaindex.llmchat",
|
|
57
49
|
"wrapper_method": atask_wrapper,
|
|
58
50
|
"output_processor": INFERENCE,
|
|
59
51
|
|
|
@@ -62,7 +54,6 @@ LLAMAINDEX_METHODS = [
|
|
|
62
54
|
"package": "llama_index.llms.openai.base",
|
|
63
55
|
"object": "OpenAI",
|
|
64
56
|
"method": "chat",
|
|
65
|
-
"span_name": "llamaindex.openai",
|
|
66
57
|
"wrapper_method": task_wrapper,
|
|
67
58
|
"output_processor": INFERENCE
|
|
68
59
|
},
|
|
@@ -70,7 +61,6 @@ LLAMAINDEX_METHODS = [
|
|
|
70
61
|
"package": "llama_index.llms.openai.base",
|
|
71
62
|
"object": "OpenAI",
|
|
72
63
|
"method": "achat",
|
|
73
|
-
"span_name": "llamaindex.openai",
|
|
74
64
|
"wrapper_method": atask_wrapper,
|
|
75
65
|
"output_processor": INFERENCE
|
|
76
66
|
},
|
|
@@ -78,7 +68,6 @@ LLAMAINDEX_METHODS = [
|
|
|
78
68
|
"package": "llama_index.llms.mistralai.base",
|
|
79
69
|
"object": "MistralAI",
|
|
80
70
|
"method": "chat",
|
|
81
|
-
"span_name": "llamaindex.mistralai",
|
|
82
71
|
"wrapper_method": task_wrapper,
|
|
83
72
|
"output_processor": INFERENCE
|
|
84
73
|
},
|
|
@@ -86,7 +75,6 @@ LLAMAINDEX_METHODS = [
|
|
|
86
75
|
"package": "llama_index.llms.mistralai.base",
|
|
87
76
|
"object": "MistralAI",
|
|
88
77
|
"method": "achat",
|
|
89
|
-
"span_name": "llamaindex.mistralai",
|
|
90
78
|
"wrapper_method": atask_wrapper,
|
|
91
79
|
"output_processor": INFERENCE
|
|
92
80
|
},
|
|
@@ -94,8 +82,21 @@ LLAMAINDEX_METHODS = [
|
|
|
94
82
|
"package": "llama_index.core.agent",
|
|
95
83
|
"object": "ReActAgent",
|
|
96
84
|
"method": "chat",
|
|
97
|
-
"span_name": "react.agent",
|
|
98
85
|
"wrapper_method": task_wrapper,
|
|
99
86
|
"output_processor": AGENT
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
"package": "llama_index.llms.anthropic",
|
|
90
|
+
"object": "Anthropic",
|
|
91
|
+
"method": "chat",
|
|
92
|
+
"wrapper_method": task_wrapper,
|
|
93
|
+
"output_processor": INFERENCE
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"package": "llama_index.llms.anthropic",
|
|
97
|
+
"object": "Anthropic",
|
|
98
|
+
"method": "achat",
|
|
99
|
+
"wrapper_method": atask_wrapper,
|
|
100
|
+
"output_processor": INFERENCE
|
|
100
101
|
}
|
|
101
102
|
]
|