openinference-instrumentation-beeai 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openinference/instrumentation/beeai/__init__.py +78 -104
- openinference/instrumentation/beeai/_span.py +81 -0
- openinference/instrumentation/beeai/_utils.py +75 -0
- openinference/instrumentation/beeai/processors/__init__.py +0 -0
- openinference/instrumentation/beeai/processors/agents/__init__.py +0 -0
- openinference/instrumentation/beeai/processors/agents/base.py +34 -0
- openinference/instrumentation/beeai/processors/agents/react.py +77 -0
- openinference/instrumentation/beeai/processors/agents/requirement_agent.py +71 -0
- openinference/instrumentation/beeai/processors/agents/tool_calling.py +34 -0
- openinference/instrumentation/beeai/processors/base.py +60 -0
- openinference/instrumentation/beeai/processors/chat.py +239 -0
- openinference/instrumentation/beeai/processors/embedding.py +71 -0
- openinference/instrumentation/beeai/processors/locator.py +106 -0
- openinference/instrumentation/beeai/processors/requirement.py +67 -0
- openinference/instrumentation/beeai/processors/tool.py +72 -0
- openinference/instrumentation/beeai/processors/workflow.py +108 -0
- openinference/instrumentation/beeai/version.py +1 -1
- {openinference_instrumentation_beeai-0.1.5.dist-info → openinference_instrumentation_beeai-0.1.7.dist-info}/METADATA +12 -9
- openinference_instrumentation_beeai-0.1.7.dist-info/RECORD +21 -0
- openinference/instrumentation/beeai/middleware.py +0 -291
- openinference/instrumentation/beeai/utils/build_trace_tree.py +0 -170
- openinference/instrumentation/beeai/utils/create_span.py +0 -80
- openinference/instrumentation/beeai/utils/get_serialized_object_safe.py +0 -302
- openinference/instrumentation/beeai/utils/id_name_manager.py +0 -58
- openinference_instrumentation_beeai-0.1.5.dist-info/RECORD +0 -11
- {openinference_instrumentation_beeai-0.1.5.dist-info → openinference_instrumentation_beeai-0.1.7.dist-info}/WHEEL +0 -0
- {openinference_instrumentation_beeai-0.1.5.dist-info → openinference_instrumentation_beeai-0.1.7.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from typing import Any, ClassVar
|
|
4
|
+
|
|
5
|
+
from beeai_framework.backend import (
|
|
6
|
+
AnyMessage,
|
|
7
|
+
ChatModel,
|
|
8
|
+
MessageImageContent,
|
|
9
|
+
MessageTextContent,
|
|
10
|
+
MessageToolCallContent,
|
|
11
|
+
MessageToolResultContent,
|
|
12
|
+
)
|
|
13
|
+
from beeai_framework.backend.events import (
|
|
14
|
+
ChatModelStartEvent,
|
|
15
|
+
ChatModelSuccessEvent,
|
|
16
|
+
)
|
|
17
|
+
from beeai_framework.context import RunContext, RunContextStartEvent
|
|
18
|
+
from beeai_framework.emitter import EventMeta
|
|
19
|
+
from beeai_framework.tools import AnyTool
|
|
20
|
+
from beeai_framework.utils.lists import remove_falsy
|
|
21
|
+
from typing_extensions import override
|
|
22
|
+
|
|
23
|
+
from openinference.instrumentation.beeai._utils import (
|
|
24
|
+
_unpack_object,
|
|
25
|
+
safe_dump_model_schema,
|
|
26
|
+
stringify,
|
|
27
|
+
)
|
|
28
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
29
|
+
from openinference.semconv.trace import (
|
|
30
|
+
MessageAttributes,
|
|
31
|
+
MessageContentAttributes,
|
|
32
|
+
OpenInferenceMimeTypeValues,
|
|
33
|
+
OpenInferenceSpanKindValues,
|
|
34
|
+
SpanAttributes,
|
|
35
|
+
ToolAttributes,
|
|
36
|
+
ToolCallAttributes,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ChatModelProcessor(Processor):
|
|
41
|
+
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.LLM
|
|
42
|
+
|
|
43
|
+
def __init__(self, event: RunContextStartEvent, meta: "EventMeta"):
|
|
44
|
+
super().__init__(event, meta)
|
|
45
|
+
|
|
46
|
+
self._last_updated_at = datetime.now()
|
|
47
|
+
self._messages: dict[str, "AnyMessage"] = {}
|
|
48
|
+
|
|
49
|
+
assert isinstance(meta.creator, RunContext)
|
|
50
|
+
assert isinstance(meta.creator.instance, ChatModel)
|
|
51
|
+
llm = meta.creator.instance
|
|
52
|
+
self.span.set_attributes(
|
|
53
|
+
{
|
|
54
|
+
SpanAttributes.LLM_MODEL_NAME: llm.model_id,
|
|
55
|
+
SpanAttributes.LLM_PROVIDER: llm.provider_id,
|
|
56
|
+
}
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
@override
|
|
60
|
+
async def update(
|
|
61
|
+
self,
|
|
62
|
+
event: Any,
|
|
63
|
+
meta: "EventMeta",
|
|
64
|
+
) -> None:
|
|
65
|
+
await super().update(event, meta)
|
|
66
|
+
|
|
67
|
+
self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
|
|
68
|
+
|
|
69
|
+
if meta.name == "finish":
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
match event:
|
|
73
|
+
case ChatModelStartEvent():
|
|
74
|
+
assert isinstance(meta.creator, ChatModel)
|
|
75
|
+
self._last_updated_at = meta.created_at
|
|
76
|
+
self.span.set_attributes(
|
|
77
|
+
_process_messages(
|
|
78
|
+
event.input.messages,
|
|
79
|
+
prefix=SpanAttributes.LLM_INPUT_MESSAGES,
|
|
80
|
+
),
|
|
81
|
+
)
|
|
82
|
+
self.span.set_attributes(
|
|
83
|
+
{
|
|
84
|
+
SpanAttributes.LLM_TOOLS: json.loads(stringify(event.input.tools or [])),
|
|
85
|
+
SpanAttributes.LLM_INVOCATION_PARAMETERS: stringify(
|
|
86
|
+
meta.creator.parameters.model_dump(
|
|
87
|
+
exclude_none=True, exclude_unset=True
|
|
88
|
+
)
|
|
89
|
+
| event.input.model_dump(
|
|
90
|
+
exclude_none=True,
|
|
91
|
+
exclude_unset=True,
|
|
92
|
+
exclude={
|
|
93
|
+
"tools",
|
|
94
|
+
"messages",
|
|
95
|
+
},
|
|
96
|
+
),
|
|
97
|
+
),
|
|
98
|
+
}
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
case ChatModelSuccessEvent():
|
|
102
|
+
if not self._messages: # only when no streaming
|
|
103
|
+
self._add_new_messages(event.value.messages)
|
|
104
|
+
|
|
105
|
+
usage = event.value.usage
|
|
106
|
+
|
|
107
|
+
self.span.set_attributes(
|
|
108
|
+
{
|
|
109
|
+
**(
|
|
110
|
+
{
|
|
111
|
+
SpanAttributes.LLM_TOKEN_COUNT_TOTAL: usage.total_tokens,
|
|
112
|
+
SpanAttributes.LLM_TOKEN_COUNT_PROMPT: usage.prompt_tokens,
|
|
113
|
+
SpanAttributes.LLM_TOKEN_COUNT_COMPLETION: usage.completion_tokens,
|
|
114
|
+
}
|
|
115
|
+
if usage
|
|
116
|
+
else {}
|
|
117
|
+
),
|
|
118
|
+
SpanAttributes.OPENINFERENCE_SPAN_KIND: type(self).kind,
|
|
119
|
+
SpanAttributes.OUTPUT_VALUE: event.value.get_text_content(),
|
|
120
|
+
SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.TEXT.value,
|
|
121
|
+
f"{SpanAttributes.METADATA}.chunks_count": len(event.value.messages),
|
|
122
|
+
**_unpack_object(
|
|
123
|
+
usage.model_dump(exclude_none=True) if usage else {},
|
|
124
|
+
prefix=f"{SpanAttributes.METADATA}.usage",
|
|
125
|
+
),
|
|
126
|
+
}
|
|
127
|
+
)
|
|
128
|
+
case _:
|
|
129
|
+
self.span.child(meta.name, event=(event, meta))
|
|
130
|
+
|
|
131
|
+
def _add_new_messages(self, messages: list["AnyMessage"]) -> None:
|
|
132
|
+
for new_msg in messages:
|
|
133
|
+
msg_id = new_msg.meta.get("id") or f"latest_{new_msg.role}"
|
|
134
|
+
if msg_id in self._messages:
|
|
135
|
+
self._messages[msg_id].merge(new_msg)
|
|
136
|
+
else:
|
|
137
|
+
self._messages[msg_id] = new_msg.clone()
|
|
138
|
+
|
|
139
|
+
_aggregate_msg_content(self._messages[msg_id])
|
|
140
|
+
self.span.set_attributes(
|
|
141
|
+
_process_messages(
|
|
142
|
+
[self._messages[msg_id]], prefix=SpanAttributes.LLM_OUTPUT_MESSAGES
|
|
143
|
+
)
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _process_tools(tools: list[AnyTool]) -> list[dict[str, str | Any]]:
|
|
148
|
+
return [
|
|
149
|
+
{
|
|
150
|
+
SpanAttributes.TOOL_NAME: t.name,
|
|
151
|
+
SpanAttributes.TOOL_DESCRIPTION: t.description,
|
|
152
|
+
# TODO: difference between TOOL_PARAMETERS and TOOL_JSON_SCHEMA is not obvious
|
|
153
|
+
SpanAttributes.TOOL_PARAMETERS: safe_dump_model_schema(t.input_schema),
|
|
154
|
+
ToolAttributes.TOOL_JSON_SCHEMA: safe_dump_model_schema(t.input_schema),
|
|
155
|
+
}
|
|
156
|
+
for t in tools
|
|
157
|
+
]
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _process_messages(
|
|
161
|
+
messages: list["AnyMessage"], prefix: str = "", offset: int = 0
|
|
162
|
+
) -> dict[str, Any]:
|
|
163
|
+
if prefix and not prefix.endswith("."):
|
|
164
|
+
prefix += "."
|
|
165
|
+
|
|
166
|
+
output = {}
|
|
167
|
+
for _i, msg in enumerate(messages):
|
|
168
|
+
i = _i + offset
|
|
169
|
+
output[f"{prefix}{i}.{MessageAttributes.MESSAGE_ROLE}"] = str(msg.role)
|
|
170
|
+
|
|
171
|
+
output.update(
|
|
172
|
+
_unpack_object(
|
|
173
|
+
remove_falsy(
|
|
174
|
+
[
|
|
175
|
+
(
|
|
176
|
+
{
|
|
177
|
+
MessageContentAttributes.MESSAGE_CONTENT_TYPE: "text",
|
|
178
|
+
MessageContentAttributes.MESSAGE_CONTENT_TEXT: content.text,
|
|
179
|
+
}
|
|
180
|
+
if isinstance(content, MessageTextContent)
|
|
181
|
+
else {
|
|
182
|
+
MessageContentAttributes.MESSAGE_CONTENT_TYPE: "image",
|
|
183
|
+
MessageContentAttributes.MESSAGE_CONTENT_IMAGE: content.image_url[
|
|
184
|
+
"url"
|
|
185
|
+
],
|
|
186
|
+
}
|
|
187
|
+
if isinstance(content, MessageImageContent)
|
|
188
|
+
else {
|
|
189
|
+
MessageAttributes.MESSAGE_TOOL_CALL_ID: content.tool_call_id,
|
|
190
|
+
MessageContentAttributes.MESSAGE_CONTENT_TYPE: "text",
|
|
191
|
+
MessageContentAttributes.MESSAGE_CONTENT_TEXT: stringify(
|
|
192
|
+
content.result, pretty=True
|
|
193
|
+
),
|
|
194
|
+
}
|
|
195
|
+
if isinstance(content, MessageToolResultContent)
|
|
196
|
+
else None
|
|
197
|
+
)
|
|
198
|
+
for content in msg.content
|
|
199
|
+
],
|
|
200
|
+
),
|
|
201
|
+
prefix=f"{prefix}{i}.{MessageAttributes.MESSAGE_CONTENTS}",
|
|
202
|
+
)
|
|
203
|
+
)
|
|
204
|
+
# )
|
|
205
|
+
|
|
206
|
+
tool_calls: list[dict[str, Any]] = [
|
|
207
|
+
{
|
|
208
|
+
ToolCallAttributes.TOOL_CALL_ID: msg_call.id,
|
|
209
|
+
ToolCallAttributes.TOOL_CALL_FUNCTION_NAME: msg_call.tool_name,
|
|
210
|
+
ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON: msg_call.args,
|
|
211
|
+
}
|
|
212
|
+
for msg_call in msg.get_by_type(MessageToolCallContent)
|
|
213
|
+
]
|
|
214
|
+
output.update(
|
|
215
|
+
_unpack_object(tool_calls, prefix=f"{prefix}{i}.{MessageAttributes.MESSAGE_TOOL_CALLS}")
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
return output
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _aggregate_msg_content(message: "AnyMessage") -> None:
|
|
222
|
+
from beeai_framework.backend import MessageTextContent, MessageToolCallContent
|
|
223
|
+
|
|
224
|
+
contents = message.content.copy()
|
|
225
|
+
aggregated_content: list[Any] = []
|
|
226
|
+
|
|
227
|
+
for content in contents:
|
|
228
|
+
last_content = aggregated_content[-1] if aggregated_content else None
|
|
229
|
+
if isinstance(last_content, MessageTextContent) and isinstance(content, MessageTextContent):
|
|
230
|
+
last_content.text += content.text
|
|
231
|
+
elif isinstance(last_content, MessageToolCallContent) and isinstance(
|
|
232
|
+
content, MessageToolCallContent
|
|
233
|
+
):
|
|
234
|
+
last_content.args += content.args
|
|
235
|
+
else:
|
|
236
|
+
aggregated_content.append(content)
|
|
237
|
+
|
|
238
|
+
message.content.clear()
|
|
239
|
+
message.content.extend(aggregated_content)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, Any, ClassVar
|
|
2
|
+
|
|
3
|
+
if TYPE_CHECKING:
|
|
4
|
+
from beeai_framework.context import RunContextStartEvent
|
|
5
|
+
from beeai_framework.emitter import EventMeta
|
|
6
|
+
|
|
7
|
+
from beeai_framework.backend import EmbeddingModel
|
|
8
|
+
from beeai_framework.backend.events import (
|
|
9
|
+
EmbeddingModelStartEvent,
|
|
10
|
+
EmbeddingModelSuccessEvent,
|
|
11
|
+
)
|
|
12
|
+
from beeai_framework.context import RunContext
|
|
13
|
+
from typing_extensions import override
|
|
14
|
+
|
|
15
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
16
|
+
from openinference.semconv.trace import (
|
|
17
|
+
EmbeddingAttributes,
|
|
18
|
+
OpenInferenceSpanKindValues,
|
|
19
|
+
SpanAttributes,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class EmbeddingModelProcessor(Processor):
|
|
24
|
+
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.EMBEDDING
|
|
25
|
+
|
|
26
|
+
def __init__(self, event: "RunContextStartEvent", meta: "EventMeta"):
|
|
27
|
+
super().__init__(event, meta)
|
|
28
|
+
|
|
29
|
+
assert isinstance(meta.creator, RunContext)
|
|
30
|
+
assert isinstance(meta.creator.instance, EmbeddingModel)
|
|
31
|
+
|
|
32
|
+
llm = meta.creator.instance
|
|
33
|
+
self.span.set_attributes(
|
|
34
|
+
{
|
|
35
|
+
SpanAttributes.EMBEDDING_MODEL_NAME: llm.model_id,
|
|
36
|
+
SpanAttributes.LLM_PROVIDER: llm.provider_id,
|
|
37
|
+
}
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
@override
|
|
41
|
+
async def update(
|
|
42
|
+
self,
|
|
43
|
+
event: Any,
|
|
44
|
+
meta: "EventMeta",
|
|
45
|
+
) -> None:
|
|
46
|
+
await super().update(event, meta)
|
|
47
|
+
|
|
48
|
+
self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
|
|
49
|
+
self.span.child(meta.name, event=(event, meta))
|
|
50
|
+
|
|
51
|
+
if isinstance(event, EmbeddingModelStartEvent):
|
|
52
|
+
for idx, txt in enumerate(event.input.values):
|
|
53
|
+
self.span.set_attribute(
|
|
54
|
+
f"{SpanAttributes.EMBEDDING_EMBEDDINGS}.{idx}.{EmbeddingAttributes.EMBEDDING_TEXT}",
|
|
55
|
+
txt,
|
|
56
|
+
)
|
|
57
|
+
elif isinstance(event, EmbeddingModelSuccessEvent):
|
|
58
|
+
for idx, embedding in enumerate(event.value.embeddings):
|
|
59
|
+
self.span.set_attribute(
|
|
60
|
+
f"{SpanAttributes.EMBEDDING_EMBEDDINGS}.{idx}.{EmbeddingAttributes.EMBEDDING_VECTOR}",
|
|
61
|
+
embedding,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
if event.value.usage:
|
|
65
|
+
self.span.set_attributes(
|
|
66
|
+
{
|
|
67
|
+
SpanAttributes.LLM_TOKEN_COUNT_TOTAL: event.value.usage.total_tokens,
|
|
68
|
+
SpanAttributes.LLM_TOKEN_COUNT_PROMPT: event.value.usage.prompt_tokens,
|
|
69
|
+
SpanAttributes.LLM_TOKEN_COUNT_COMPLETION: event.value.usage.completion_tokens, # noqa: E501
|
|
70
|
+
}
|
|
71
|
+
)
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from beeai_framework.emitter import EventMeta
|
|
6
|
+
|
|
7
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ProcessorLocator:
|
|
11
|
+
entries: dict[type, type] = {}
|
|
12
|
+
_loaded: bool = False
|
|
13
|
+
|
|
14
|
+
@staticmethod
|
|
15
|
+
def _load() -> None:
|
|
16
|
+
if ProcessorLocator._loaded:
|
|
17
|
+
return
|
|
18
|
+
|
|
19
|
+
ProcessorLocator._loaded = True
|
|
20
|
+
|
|
21
|
+
with contextlib.suppress(ImportError):
|
|
22
|
+
from beeai_framework.backend.chat import ChatModel
|
|
23
|
+
|
|
24
|
+
from .chat import ChatModelProcessor
|
|
25
|
+
|
|
26
|
+
ProcessorLocator.entries[ChatModel] = ChatModelProcessor
|
|
27
|
+
|
|
28
|
+
with contextlib.suppress(ImportError):
|
|
29
|
+
from beeai_framework.backend.embedding import EmbeddingModel
|
|
30
|
+
|
|
31
|
+
from .embedding import EmbeddingModelProcessor
|
|
32
|
+
|
|
33
|
+
ProcessorLocator.entries[EmbeddingModel] = EmbeddingModelProcessor
|
|
34
|
+
|
|
35
|
+
with contextlib.suppress(ImportError):
|
|
36
|
+
from beeai_framework.agents.react.agent import ReActAgent
|
|
37
|
+
|
|
38
|
+
from .agents.react import ReActAgentProcessor
|
|
39
|
+
|
|
40
|
+
ProcessorLocator.entries[ReActAgent] = ReActAgentProcessor
|
|
41
|
+
|
|
42
|
+
with contextlib.suppress(ImportError):
|
|
43
|
+
from beeai_framework.agents.tool_calling.agent import ToolCallingAgent
|
|
44
|
+
|
|
45
|
+
from .agents.tool_calling import ToolCallingAgentProcessor
|
|
46
|
+
|
|
47
|
+
ProcessorLocator.entries[ToolCallingAgent] = ToolCallingAgentProcessor
|
|
48
|
+
|
|
49
|
+
with contextlib.suppress(ImportError):
|
|
50
|
+
from beeai_framework.agents.experimental.agent import RequirementAgent
|
|
51
|
+
|
|
52
|
+
from .agents.requirement_agent import RequirementAgentProcessor
|
|
53
|
+
|
|
54
|
+
ProcessorLocator.entries[RequirementAgent] = RequirementAgentProcessor
|
|
55
|
+
|
|
56
|
+
with contextlib.suppress(ImportError):
|
|
57
|
+
from beeai_framework.agents.experimental.requirements.requirement import Requirement
|
|
58
|
+
|
|
59
|
+
from openinference.instrumentation.beeai.processors.requirement import (
|
|
60
|
+
RequirementProcessor,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
ProcessorLocator.entries[Requirement] = RequirementProcessor
|
|
64
|
+
|
|
65
|
+
with contextlib.suppress(ImportError):
|
|
66
|
+
from beeai_framework.agents.base import BaseAgent
|
|
67
|
+
|
|
68
|
+
from openinference.instrumentation.beeai.processors.agents.base import AgentProcessor
|
|
69
|
+
|
|
70
|
+
ProcessorLocator.entries[BaseAgent] = AgentProcessor
|
|
71
|
+
|
|
72
|
+
with contextlib.suppress(ImportError):
|
|
73
|
+
from beeai_framework.tools.tool import Tool
|
|
74
|
+
|
|
75
|
+
from openinference.instrumentation.beeai.processors.tool import ToolProcessor
|
|
76
|
+
|
|
77
|
+
ProcessorLocator.entries[Tool] = ToolProcessor
|
|
78
|
+
|
|
79
|
+
with contextlib.suppress(ImportError):
|
|
80
|
+
from beeai_framework.workflows.workflow import Workflow
|
|
81
|
+
|
|
82
|
+
from .workflow import WorkflowProcessor
|
|
83
|
+
|
|
84
|
+
ProcessorLocator.entries[Workflow] = WorkflowProcessor
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def locate(data: Any, event: "EventMeta") -> Processor:
|
|
88
|
+
ProcessorLocator._load()
|
|
89
|
+
|
|
90
|
+
from beeai_framework.context import RunContext, RunContextStartEvent
|
|
91
|
+
|
|
92
|
+
assert isinstance(data, RunContextStartEvent)
|
|
93
|
+
assert isinstance(event.creator, RunContext)
|
|
94
|
+
|
|
95
|
+
instance_cls = type(event.creator.instance)
|
|
96
|
+
|
|
97
|
+
if instance_cls in ProcessorLocator.entries:
|
|
98
|
+
cls_processor = ProcessorLocator.entries[instance_cls]
|
|
99
|
+
else:
|
|
100
|
+
for cls, cls_processor in ProcessorLocator.entries.items():
|
|
101
|
+
if isinstance(event.creator.instance, cls):
|
|
102
|
+
break
|
|
103
|
+
else:
|
|
104
|
+
cls_processor = Processor
|
|
105
|
+
|
|
106
|
+
return cls_processor(data, event) # type: ignore
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from typing import Any, ClassVar
|
|
2
|
+
|
|
3
|
+
from beeai_framework.agents.experimental.requirements.ask_permission import AskPermissionRequirement
|
|
4
|
+
from beeai_framework.agents.experimental.requirements.conditional import ConditionalRequirement
|
|
5
|
+
from beeai_framework.agents.experimental.requirements.events import RequirementInitEvent
|
|
6
|
+
from beeai_framework.agents.experimental.requirements.requirement import Requirement
|
|
7
|
+
from beeai_framework.context import RunContext, RunContextStartEvent
|
|
8
|
+
from beeai_framework.emitter import EventMeta
|
|
9
|
+
from typing_extensions import override
|
|
10
|
+
|
|
11
|
+
from openinference.instrumentation.beeai._utils import _unpack_object
|
|
12
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
13
|
+
from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RequirementProcessor(Processor):
|
|
17
|
+
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.UNKNOWN
|
|
18
|
+
|
|
19
|
+
def __init__(self, event: "RunContextStartEvent", meta: "EventMeta") -> None:
|
|
20
|
+
super().__init__(event, meta)
|
|
21
|
+
|
|
22
|
+
assert isinstance(meta.creator, RunContext)
|
|
23
|
+
assert isinstance(meta.creator.instance, Requirement)
|
|
24
|
+
|
|
25
|
+
requirement = meta.creator.instance
|
|
26
|
+
self.span.name = requirement.name or self.span.name
|
|
27
|
+
self._sync_state(meta.creator.instance)
|
|
28
|
+
|
|
29
|
+
def _sync_state(self, instance: "Requirement[Any]") -> None:
|
|
30
|
+
attributes = {
|
|
31
|
+
"name": instance.name,
|
|
32
|
+
"enabled": instance.enabled,
|
|
33
|
+
"priority": instance.priority,
|
|
34
|
+
"state": instance.state,
|
|
35
|
+
"middlewares": [str(m) for m in instance.middlewares],
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
if isinstance(instance, ConditionalRequirement):
|
|
39
|
+
attributes["target_name"] = str(instance.source)
|
|
40
|
+
elif isinstance(instance, AskPermissionRequirement):
|
|
41
|
+
attributes["includes"] = [str(t) for t in instance._include]
|
|
42
|
+
attributes["excludes"] = [str(t) for t in instance._exclude]
|
|
43
|
+
attributes["remember_choices"] = instance._remember_choices
|
|
44
|
+
attributes["_always_allow"] = instance._always_allow
|
|
45
|
+
|
|
46
|
+
self.span.set_attributes(
|
|
47
|
+
_unpack_object(
|
|
48
|
+
attributes,
|
|
49
|
+
prefix=f"{SpanAttributes.METADATA}",
|
|
50
|
+
)
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
@override
|
|
54
|
+
async def update(
|
|
55
|
+
self,
|
|
56
|
+
event: Any,
|
|
57
|
+
meta: "EventMeta",
|
|
58
|
+
) -> None:
|
|
59
|
+
await super().update(event, meta)
|
|
60
|
+
|
|
61
|
+
self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
|
|
62
|
+
|
|
63
|
+
match event:
|
|
64
|
+
case RequirementInitEvent():
|
|
65
|
+
pass
|
|
66
|
+
case _:
|
|
67
|
+
self.span.child(meta.name, event=(event, meta))
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from typing import Any, ClassVar
|
|
2
|
+
|
|
3
|
+
from beeai_framework.context import RunContext, RunContextStartEvent
|
|
4
|
+
from beeai_framework.emitter import EventMeta
|
|
5
|
+
from beeai_framework.tools import ToolErrorEvent, ToolRetryEvent, ToolSuccessEvent
|
|
6
|
+
from beeai_framework.tools.tool import Tool
|
|
7
|
+
from typing_extensions import override
|
|
8
|
+
|
|
9
|
+
from openinference.instrumentation.beeai._utils import safe_dump_model_schema, stringify
|
|
10
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
11
|
+
from openinference.semconv.trace import (
|
|
12
|
+
OpenInferenceMimeTypeValues,
|
|
13
|
+
OpenInferenceSpanKindValues,
|
|
14
|
+
SpanAttributes,
|
|
15
|
+
ToolAttributes,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ToolProcessor(Processor):
|
|
20
|
+
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.TOOL
|
|
21
|
+
|
|
22
|
+
def __init__(self, event: "RunContextStartEvent", meta: "EventMeta") -> None:
|
|
23
|
+
super().__init__(event, meta)
|
|
24
|
+
|
|
25
|
+
assert isinstance(meta.creator, RunContext)
|
|
26
|
+
assert isinstance(meta.creator.instance, Tool)
|
|
27
|
+
|
|
28
|
+
tool = meta.creator.instance
|
|
29
|
+
self.span.name = tool.name
|
|
30
|
+
self.span.set_attributes(
|
|
31
|
+
{
|
|
32
|
+
SpanAttributes.TOOL_NAME: tool.name,
|
|
33
|
+
SpanAttributes.TOOL_DESCRIPTION: tool.description,
|
|
34
|
+
# TODO: what's the difference?
|
|
35
|
+
SpanAttributes.TOOL_PARAMETERS: stringify(
|
|
36
|
+
safe_dump_model_schema(tool.input_schema)
|
|
37
|
+
),
|
|
38
|
+
ToolAttributes.TOOL_JSON_SCHEMA: stringify(
|
|
39
|
+
safe_dump_model_schema(tool.input_schema)
|
|
40
|
+
),
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
@override
|
|
45
|
+
async def update(
|
|
46
|
+
self,
|
|
47
|
+
event: Any,
|
|
48
|
+
meta: "EventMeta",
|
|
49
|
+
) -> None:
|
|
50
|
+
await super().update(event, meta)
|
|
51
|
+
|
|
52
|
+
self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
|
|
53
|
+
|
|
54
|
+
match event:
|
|
55
|
+
case ToolSuccessEvent():
|
|
56
|
+
output_cls = type(event.output)
|
|
57
|
+
|
|
58
|
+
self.span.set_attributes(
|
|
59
|
+
{
|
|
60
|
+
SpanAttributes.OUTPUT_VALUE: event.output.get_text_content(),
|
|
61
|
+
SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.TEXT.value,
|
|
62
|
+
f"{SpanAttributes.METADATA}.output_class": output_cls.__name__,
|
|
63
|
+
f"{SpanAttributes.METADATA}.is_empty": event.output.is_empty(),
|
|
64
|
+
}
|
|
65
|
+
)
|
|
66
|
+
case ToolErrorEvent():
|
|
67
|
+
span = self.span.child(meta.name, event=(event, meta))
|
|
68
|
+
span.record_exception(event.error)
|
|
69
|
+
case ToolRetryEvent():
|
|
70
|
+
self.span.child(meta.name, event=(event, meta))
|
|
71
|
+
case _:
|
|
72
|
+
self.span.child(meta.name, event=(event, meta))
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from typing import Any, ClassVar
|
|
2
|
+
|
|
3
|
+
from beeai_framework.context import RunContext, RunContextStartEvent
|
|
4
|
+
from beeai_framework.emitter import EventMeta
|
|
5
|
+
from beeai_framework.workflows import (
|
|
6
|
+
Workflow,
|
|
7
|
+
WorkflowErrorEvent,
|
|
8
|
+
WorkflowStartEvent,
|
|
9
|
+
WorkflowSuccessEvent,
|
|
10
|
+
)
|
|
11
|
+
from beeai_framework.workflows.agent.agent import Schema as AgentWorkflowSchema
|
|
12
|
+
from pydantic import BaseModel
|
|
13
|
+
from typing_extensions import override
|
|
14
|
+
|
|
15
|
+
from openinference.instrumentation.beeai._utils import (
|
|
16
|
+
_unpack_object,
|
|
17
|
+
safe_dump_model_schema,
|
|
18
|
+
stringify,
|
|
19
|
+
)
|
|
20
|
+
from openinference.instrumentation.beeai.processors.base import Processor
|
|
21
|
+
from openinference.semconv.trace import (
|
|
22
|
+
OpenInferenceMimeTypeValues,
|
|
23
|
+
OpenInferenceSpanKindValues,
|
|
24
|
+
SpanAttributes,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class WorkflowProcessor(Processor):
|
|
29
|
+
kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.CHAIN
|
|
30
|
+
|
|
31
|
+
def __init__(self, event: "RunContextStartEvent", meta: "EventMeta"):
|
|
32
|
+
super().__init__(event, meta)
|
|
33
|
+
|
|
34
|
+
assert isinstance(meta.creator, RunContext)
|
|
35
|
+
assert isinstance(meta.creator.instance, Workflow)
|
|
36
|
+
|
|
37
|
+
self._last_step = 0
|
|
38
|
+
|
|
39
|
+
workflow = meta.creator.instance
|
|
40
|
+
self.span.name = workflow.name or self.span.name
|
|
41
|
+
self.span.set_attributes(
|
|
42
|
+
{
|
|
43
|
+
f"{SpanAttributes.METADATA}.name": workflow.name,
|
|
44
|
+
f"{SpanAttributes.METADATA}.all_steps": workflow.step_names,
|
|
45
|
+
f"{SpanAttributes.METADATA}.start_step": workflow.start_step,
|
|
46
|
+
**_unpack_object(
|
|
47
|
+
safe_dump_model_schema(workflow.schema),
|
|
48
|
+
prefix=f"{SpanAttributes.METADATA}.schema",
|
|
49
|
+
),
|
|
50
|
+
}
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
@override
|
|
54
|
+
async def update(
|
|
55
|
+
self,
|
|
56
|
+
event: Any,
|
|
57
|
+
meta: "EventMeta",
|
|
58
|
+
) -> None:
|
|
59
|
+
await super().update(event, meta)
|
|
60
|
+
|
|
61
|
+
self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
|
|
62
|
+
|
|
63
|
+
match event:
|
|
64
|
+
case WorkflowStartEvent() | WorkflowSuccessEvent() | WorkflowErrorEvent():
|
|
65
|
+
self._update_state(event)
|
|
66
|
+
case _:
|
|
67
|
+
self.span.child(meta.name, event=(event, meta))
|
|
68
|
+
|
|
69
|
+
def _update_state(
|
|
70
|
+
self, event: "WorkflowStartEvent[Any] | WorkflowSuccessEvent[Any] | WorkflowErrorEvent[Any]"
|
|
71
|
+
) -> None:
|
|
72
|
+
self.span.set_attribute(f"{SpanAttributes.METADATA}.current_step", event.step)
|
|
73
|
+
self.span.set_attributes(
|
|
74
|
+
_unpack_object(
|
|
75
|
+
_serialize_state(event.run.state), prefix=f"{SpanAttributes.METADATA}.state"
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# update steps
|
|
80
|
+
for idx, step in enumerate(event.run.steps):
|
|
81
|
+
if idx < self._last_step:
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
self.span.set_attributes(
|
|
85
|
+
_unpack_object(
|
|
86
|
+
{
|
|
87
|
+
"name": step.name,
|
|
88
|
+
"state": _serialize_state(step.state),
|
|
89
|
+
},
|
|
90
|
+
prefix=f"{SpanAttributes.METADATA}.steps.{idx}",
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
self._last_step = idx
|
|
94
|
+
|
|
95
|
+
if isinstance(event, WorkflowSuccessEvent):
|
|
96
|
+
if event.next == Workflow.END or event.run.result is not None:
|
|
97
|
+
result = event.run.result if event.run.result is not None else event.state
|
|
98
|
+
self.span.attributes.update(
|
|
99
|
+
{
|
|
100
|
+
SpanAttributes.OUTPUT_VALUE: stringify(_serialize_state(result)),
|
|
101
|
+
SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value,
|
|
102
|
+
}
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _serialize_state(result: BaseModel) -> dict[str, Any]:
|
|
107
|
+
exclude = {"new_messages", "inputs"} if isinstance(result, AgentWorkflowSchema) else set()
|
|
108
|
+
return result.model_dump(exclude=exclude, exclude_none=True)
|
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.7"
|