openinference-instrumentation-beeai 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. openinference/instrumentation/beeai/__init__.py +78 -104
  2. openinference/instrumentation/beeai/_span.py +81 -0
  3. openinference/instrumentation/beeai/_utils.py +77 -0
  4. openinference/instrumentation/beeai/processors/__init__.py +0 -0
  5. openinference/instrumentation/beeai/processors/agents/__init__.py +0 -0
  6. openinference/instrumentation/beeai/processors/agents/base.py +34 -0
  7. openinference/instrumentation/beeai/processors/agents/react.py +77 -0
  8. openinference/instrumentation/beeai/processors/agents/requirement_agent.py +71 -0
  9. openinference/instrumentation/beeai/processors/agents/tool_calling.py +34 -0
  10. openinference/instrumentation/beeai/processors/base.py +60 -0
  11. openinference/instrumentation/beeai/processors/chat.py +245 -0
  12. openinference/instrumentation/beeai/processors/embedding.py +71 -0
  13. openinference/instrumentation/beeai/processors/locator.py +106 -0
  14. openinference/instrumentation/beeai/processors/requirement.py +67 -0
  15. openinference/instrumentation/beeai/processors/tool.py +68 -0
  16. openinference/instrumentation/beeai/processors/workflow.py +108 -0
  17. openinference/instrumentation/beeai/version.py +1 -1
  18. {openinference_instrumentation_beeai-0.1.6.dist-info → openinference_instrumentation_beeai-0.1.8.dist-info}/METADATA +12 -9
  19. openinference_instrumentation_beeai-0.1.8.dist-info/RECORD +21 -0
  20. openinference/instrumentation/beeai/middleware.py +0 -291
  21. openinference/instrumentation/beeai/utils/build_trace_tree.py +0 -170
  22. openinference/instrumentation/beeai/utils/create_span.py +0 -80
  23. openinference/instrumentation/beeai/utils/get_serialized_object_safe.py +0 -302
  24. openinference/instrumentation/beeai/utils/id_name_manager.py +0 -58
  25. openinference_instrumentation_beeai-0.1.6.dist-info/RECORD +0 -11
  26. {openinference_instrumentation_beeai-0.1.6.dist-info → openinference_instrumentation_beeai-0.1.8.dist-info}/WHEEL +0 -0
  27. {openinference_instrumentation_beeai-0.1.6.dist-info → openinference_instrumentation_beeai-0.1.8.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,245 @@
1
+ from datetime import datetime
2
+ from typing import Any, ClassVar
3
+
4
+ from beeai_framework.backend import (
5
+ AnyMessage,
6
+ ChatModel,
7
+ MessageImageContent,
8
+ MessageTextContent,
9
+ MessageToolCallContent,
10
+ MessageToolResultContent,
11
+ )
12
+ from beeai_framework.backend.events import (
13
+ ChatModelStartEvent,
14
+ ChatModelSuccessEvent,
15
+ )
16
+ from beeai_framework.context import RunContext, RunContextStartEvent
17
+ from beeai_framework.emitter import EventMeta
18
+ from beeai_framework.tools import AnyTool
19
+ from beeai_framework.utils.lists import remove_falsy
20
+ from typing_extensions import override
21
+
22
+ from openinference.instrumentation.beeai._utils import (
23
+ _unpack_object,
24
+ safe_dump_model_schema,
25
+ stringify,
26
+ )
27
+ from openinference.instrumentation.beeai.processors.base import Processor
28
+ from openinference.semconv.trace import (
29
+ MessageAttributes,
30
+ MessageContentAttributes,
31
+ OpenInferenceMimeTypeValues,
32
+ OpenInferenceSpanKindValues,
33
+ SpanAttributes,
34
+ ToolAttributes,
35
+ ToolCallAttributes,
36
+ )
37
+
38
+
39
+ class ChatModelProcessor(Processor):
40
+ kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.LLM
41
+
42
+ def __init__(self, event: RunContextStartEvent, meta: "EventMeta"):
43
+ super().__init__(event, meta)
44
+
45
+ self._last_updated_at = datetime.now()
46
+ self._messages: dict[str, "AnyMessage"] = {}
47
+
48
+ assert isinstance(meta.creator, RunContext)
49
+ assert isinstance(meta.creator.instance, ChatModel)
50
+ llm = meta.creator.instance
51
+ self.span.set_attributes(
52
+ {
53
+ SpanAttributes.LLM_MODEL_NAME: llm.model_id,
54
+ SpanAttributes.LLM_PROVIDER: llm.provider_id,
55
+ }
56
+ )
57
+
58
+ @override
59
+ async def update(
60
+ self,
61
+ event: Any,
62
+ meta: "EventMeta",
63
+ ) -> None:
64
+ await super().update(event, meta)
65
+
66
+ self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
67
+
68
+ if meta.name == "finish":
69
+ return
70
+
71
+ match event:
72
+ case ChatModelStartEvent():
73
+ assert isinstance(meta.creator, ChatModel)
74
+ self._last_updated_at = meta.created_at
75
+ self.span.set_attributes(
76
+ _process_messages(
77
+ event.input.messages,
78
+ prefix=SpanAttributes.LLM_INPUT_MESSAGES,
79
+ ),
80
+ )
81
+ self.span.set_attributes(
82
+ {
83
+ SpanAttributes.LLM_TOOLS: [t.name for t in (event.input.tools or [])],
84
+ SpanAttributes.LLM_INVOCATION_PARAMETERS: stringify(
85
+ meta.creator.parameters.model_dump(
86
+ exclude_none=True, exclude_unset=True
87
+ )
88
+ | event.input.model_dump(
89
+ exclude_none=True,
90
+ exclude_unset=True,
91
+ exclude={
92
+ "tools",
93
+ "messages",
94
+ },
95
+ ),
96
+ ),
97
+ }
98
+ )
99
+
100
+ case ChatModelSuccessEvent():
101
+ if not self._messages: # only when no streaming
102
+ self._add_new_messages(event.value.messages)
103
+
104
+ usage = event.value.usage
105
+ if usage:
106
+ self.span.set_attributes(
107
+ {
108
+ SpanAttributes.LLM_TOKEN_COUNT_TOTAL: usage.total_tokens,
109
+ SpanAttributes.LLM_TOKEN_COUNT_PROMPT: usage.prompt_tokens,
110
+ SpanAttributes.LLM_TOKEN_COUNT_COMPLETION: usage.completion_tokens,
111
+ }
112
+ )
113
+
114
+ cost = event.value.cost
115
+ if cost:
116
+ self.span.set_attributes(
117
+ {
118
+ SpanAttributes.LLM_COST_COMPLETION: cost.completion_tokens_cost_usd,
119
+ SpanAttributes.LLM_COST_PROMPT: cost.prompt_tokens_usd,
120
+ SpanAttributes.LLM_COST_TOTAL: cost.total_cost_usd,
121
+ }
122
+ )
123
+
124
+ self.span.set_attributes(
125
+ {
126
+ SpanAttributes.OPENINFERENCE_SPAN_KIND: type(self).kind,
127
+ SpanAttributes.OUTPUT_VALUE: event.value.get_text_content(),
128
+ SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.TEXT.value,
129
+ f"{SpanAttributes.METADATA}.chunks_count": len(event.value.messages),
130
+ **_unpack_object(
131
+ usage.model_dump(exclude_none=True) if usage else {},
132
+ prefix=f"{SpanAttributes.METADATA}.usage",
133
+ ),
134
+ }
135
+ )
136
+ case _:
137
+ self.span.child(meta.name, event=(event, meta))
138
+
139
+ def _add_new_messages(self, messages: list["AnyMessage"]) -> None:
140
+ for new_msg in messages:
141
+ msg_id = new_msg.meta.get("id") or f"latest_{new_msg.role}"
142
+ if msg_id in self._messages:
143
+ self._messages[msg_id].merge(new_msg)
144
+ else:
145
+ self._messages[msg_id] = new_msg.clone()
146
+
147
+ _aggregate_msg_content(self._messages[msg_id])
148
+ self.span.set_attributes(
149
+ _process_messages(
150
+ [self._messages[msg_id]], prefix=SpanAttributes.LLM_OUTPUT_MESSAGES
151
+ )
152
+ )
153
+
154
+
155
+ def _process_tools(tools: list[AnyTool]) -> list[dict[str, str | Any]]:
156
+ return [
157
+ {
158
+ SpanAttributes.TOOL_NAME: t.name,
159
+ SpanAttributes.TOOL_DESCRIPTION: t.description,
160
+ ToolAttributes.TOOL_JSON_SCHEMA: safe_dump_model_schema(t.input_schema),
161
+ }
162
+ for t in tools
163
+ ]
164
+
165
+
166
+ def _process_messages(
167
+ messages: list["AnyMessage"], prefix: str = "", offset: int = 0
168
+ ) -> dict[str, Any]:
169
+ if prefix and not prefix.endswith("."):
170
+ prefix += "."
171
+
172
+ output = {}
173
+ for _i, msg in enumerate(messages):
174
+ i = _i + offset
175
+ output[f"{prefix}{i}.{MessageAttributes.MESSAGE_ROLE}"] = str(msg.role)
176
+
177
+ output.update(
178
+ _unpack_object(
179
+ remove_falsy(
180
+ [
181
+ (
182
+ {
183
+ MessageContentAttributes.MESSAGE_CONTENT_TYPE: "text",
184
+ MessageContentAttributes.MESSAGE_CONTENT_TEXT: content.text,
185
+ }
186
+ if isinstance(content, MessageTextContent)
187
+ else {
188
+ MessageContentAttributes.MESSAGE_CONTENT_TYPE: "image",
189
+ MessageContentAttributes.MESSAGE_CONTENT_IMAGE: content.image_url[
190
+ "url"
191
+ ],
192
+ }
193
+ if isinstance(content, MessageImageContent)
194
+ else {
195
+ MessageAttributes.MESSAGE_TOOL_CALL_ID: content.tool_call_id,
196
+ MessageContentAttributes.MESSAGE_CONTENT_TYPE: "text",
197
+ MessageContentAttributes.MESSAGE_CONTENT_TEXT: stringify(
198
+ content.result, pretty=True
199
+ ),
200
+ }
201
+ if isinstance(content, MessageToolResultContent)
202
+ else None
203
+ )
204
+ for content in msg.content
205
+ ],
206
+ ),
207
+ prefix=f"{prefix}{i}.{MessageAttributes.MESSAGE_CONTENTS}",
208
+ )
209
+ )
210
+ # )
211
+
212
+ tool_calls: list[dict[str, Any]] = [
213
+ {
214
+ ToolCallAttributes.TOOL_CALL_ID: msg_call.id,
215
+ ToolCallAttributes.TOOL_CALL_FUNCTION_NAME: msg_call.tool_name,
216
+ ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON: msg_call.args,
217
+ }
218
+ for msg_call in msg.get_by_type(MessageToolCallContent)
219
+ ]
220
+ output.update(
221
+ _unpack_object(tool_calls, prefix=f"{prefix}{i}.{MessageAttributes.MESSAGE_TOOL_CALLS}")
222
+ )
223
+
224
+ return output
225
+
226
+
227
+ def _aggregate_msg_content(message: "AnyMessage") -> None:
228
+ from beeai_framework.backend import MessageTextContent, MessageToolCallContent
229
+
230
+ contents = message.content.copy()
231
+ aggregated_content: list[Any] = []
232
+
233
+ for content in contents:
234
+ last_content = aggregated_content[-1] if aggregated_content else None
235
+ if isinstance(last_content, MessageTextContent) and isinstance(content, MessageTextContent):
236
+ last_content.text += content.text
237
+ elif isinstance(last_content, MessageToolCallContent) and isinstance(
238
+ content, MessageToolCallContent
239
+ ):
240
+ last_content.args += content.args
241
+ else:
242
+ aggregated_content.append(content)
243
+
244
+ message.content.clear()
245
+ message.content.extend(aggregated_content)
@@ -0,0 +1,71 @@
1
+ from typing import TYPE_CHECKING, Any, ClassVar
2
+
3
+ if TYPE_CHECKING:
4
+ from beeai_framework.context import RunContextStartEvent
5
+ from beeai_framework.emitter import EventMeta
6
+
7
+ from beeai_framework.backend import EmbeddingModel
8
+ from beeai_framework.backend.events import (
9
+ EmbeddingModelStartEvent,
10
+ EmbeddingModelSuccessEvent,
11
+ )
12
+ from beeai_framework.context import RunContext
13
+ from typing_extensions import override
14
+
15
+ from openinference.instrumentation.beeai.processors.base import Processor
16
+ from openinference.semconv.trace import (
17
+ EmbeddingAttributes,
18
+ OpenInferenceSpanKindValues,
19
+ SpanAttributes,
20
+ )
21
+
22
+
23
+ class EmbeddingModelProcessor(Processor):
24
+ kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.EMBEDDING
25
+
26
+ def __init__(self, event: "RunContextStartEvent", meta: "EventMeta"):
27
+ super().__init__(event, meta)
28
+
29
+ assert isinstance(meta.creator, RunContext)
30
+ assert isinstance(meta.creator.instance, EmbeddingModel)
31
+
32
+ llm = meta.creator.instance
33
+ self.span.set_attributes(
34
+ {
35
+ SpanAttributes.EMBEDDING_MODEL_NAME: llm.model_id,
36
+ SpanAttributes.LLM_PROVIDER: llm.provider_id,
37
+ }
38
+ )
39
+
40
+ @override
41
+ async def update(
42
+ self,
43
+ event: Any,
44
+ meta: "EventMeta",
45
+ ) -> None:
46
+ await super().update(event, meta)
47
+
48
+ self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
49
+ self.span.child(meta.name, event=(event, meta))
50
+
51
+ if isinstance(event, EmbeddingModelStartEvent):
52
+ for idx, txt in enumerate(event.input.values):
53
+ self.span.set_attribute(
54
+ f"{SpanAttributes.EMBEDDING_EMBEDDINGS}.{idx}.{EmbeddingAttributes.EMBEDDING_TEXT}",
55
+ txt,
56
+ )
57
+ elif isinstance(event, EmbeddingModelSuccessEvent):
58
+ for idx, embedding in enumerate(event.value.embeddings):
59
+ self.span.set_attribute(
60
+ f"{SpanAttributes.EMBEDDING_EMBEDDINGS}.{idx}.{EmbeddingAttributes.EMBEDDING_VECTOR}",
61
+ embedding,
62
+ )
63
+
64
+ if event.value.usage:
65
+ self.span.set_attributes(
66
+ {
67
+ SpanAttributes.LLM_TOKEN_COUNT_TOTAL: event.value.usage.total_tokens,
68
+ SpanAttributes.LLM_TOKEN_COUNT_PROMPT: event.value.usage.prompt_tokens,
69
+ SpanAttributes.LLM_TOKEN_COUNT_COMPLETION: event.value.usage.completion_tokens, # noqa: E501
70
+ }
71
+ )
@@ -0,0 +1,106 @@
1
+ import contextlib
2
+ from typing import TYPE_CHECKING, Any
3
+
4
+ if TYPE_CHECKING:
5
+ from beeai_framework.emitter import EventMeta
6
+
7
+ from openinference.instrumentation.beeai.processors.base import Processor
8
+
9
+
10
+ class ProcessorLocator:
11
+ entries: dict[type, type] = {}
12
+ _loaded: bool = False
13
+
14
+ @staticmethod
15
+ def _load() -> None:
16
+ if ProcessorLocator._loaded:
17
+ return
18
+
19
+ ProcessorLocator._loaded = True
20
+
21
+ with contextlib.suppress(ImportError):
22
+ from beeai_framework.backend.chat import ChatModel
23
+
24
+ from .chat import ChatModelProcessor
25
+
26
+ ProcessorLocator.entries[ChatModel] = ChatModelProcessor
27
+
28
+ with contextlib.suppress(ImportError):
29
+ from beeai_framework.backend.embedding import EmbeddingModel
30
+
31
+ from .embedding import EmbeddingModelProcessor
32
+
33
+ ProcessorLocator.entries[EmbeddingModel] = EmbeddingModelProcessor
34
+
35
+ with contextlib.suppress(ImportError):
36
+ from beeai_framework.agents.react.agent import ReActAgent
37
+
38
+ from .agents.react import ReActAgentProcessor
39
+
40
+ ProcessorLocator.entries[ReActAgent] = ReActAgentProcessor
41
+
42
+ with contextlib.suppress(ImportError):
43
+ from beeai_framework.agents.tool_calling.agent import ToolCallingAgent
44
+
45
+ from .agents.tool_calling import ToolCallingAgentProcessor
46
+
47
+ ProcessorLocator.entries[ToolCallingAgent] = ToolCallingAgentProcessor
48
+
49
+ with contextlib.suppress(ImportError):
50
+ from beeai_framework.agents.experimental.agent import RequirementAgent
51
+
52
+ from .agents.requirement_agent import RequirementAgentProcessor
53
+
54
+ ProcessorLocator.entries[RequirementAgent] = RequirementAgentProcessor
55
+
56
+ with contextlib.suppress(ImportError):
57
+ from beeai_framework.agents.experimental.requirements.requirement import Requirement
58
+
59
+ from openinference.instrumentation.beeai.processors.requirement import (
60
+ RequirementProcessor,
61
+ )
62
+
63
+ ProcessorLocator.entries[Requirement] = RequirementProcessor
64
+
65
+ with contextlib.suppress(ImportError):
66
+ from beeai_framework.agents.base import BaseAgent
67
+
68
+ from openinference.instrumentation.beeai.processors.agents.base import AgentProcessor
69
+
70
+ ProcessorLocator.entries[BaseAgent] = AgentProcessor
71
+
72
+ with contextlib.suppress(ImportError):
73
+ from beeai_framework.tools.tool import Tool
74
+
75
+ from openinference.instrumentation.beeai.processors.tool import ToolProcessor
76
+
77
+ ProcessorLocator.entries[Tool] = ToolProcessor
78
+
79
+ with contextlib.suppress(ImportError):
80
+ from beeai_framework.workflows.workflow import Workflow
81
+
82
+ from .workflow import WorkflowProcessor
83
+
84
+ ProcessorLocator.entries[Workflow] = WorkflowProcessor
85
+
86
+ @staticmethod
87
+ def locate(data: Any, event: "EventMeta") -> Processor:
88
+ ProcessorLocator._load()
89
+
90
+ from beeai_framework.context import RunContext, RunContextStartEvent
91
+
92
+ assert isinstance(data, RunContextStartEvent)
93
+ assert isinstance(event.creator, RunContext)
94
+
95
+ instance_cls = type(event.creator.instance)
96
+
97
+ if instance_cls in ProcessorLocator.entries:
98
+ cls_processor = ProcessorLocator.entries[instance_cls]
99
+ else:
100
+ for cls, cls_processor in ProcessorLocator.entries.items():
101
+ if isinstance(event.creator.instance, cls):
102
+ break
103
+ else:
104
+ cls_processor = Processor
105
+
106
+ return cls_processor(data, event) # type: ignore
@@ -0,0 +1,67 @@
1
+ from typing import Any, ClassVar
2
+
3
+ from beeai_framework.agents.experimental.requirements.ask_permission import AskPermissionRequirement
4
+ from beeai_framework.agents.experimental.requirements.conditional import ConditionalRequirement
5
+ from beeai_framework.agents.experimental.requirements.events import RequirementInitEvent
6
+ from beeai_framework.agents.experimental.requirements.requirement import Requirement
7
+ from beeai_framework.context import RunContext, RunContextStartEvent
8
+ from beeai_framework.emitter import EventMeta
9
+ from typing_extensions import override
10
+
11
+ from openinference.instrumentation.beeai._utils import _unpack_object
12
+ from openinference.instrumentation.beeai.processors.base import Processor
13
+ from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes
14
+
15
+
16
+ class RequirementProcessor(Processor):
17
+ kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.UNKNOWN
18
+
19
+ def __init__(self, event: "RunContextStartEvent", meta: "EventMeta") -> None:
20
+ super().__init__(event, meta)
21
+
22
+ assert isinstance(meta.creator, RunContext)
23
+ assert isinstance(meta.creator.instance, Requirement)
24
+
25
+ requirement = meta.creator.instance
26
+ self.span.name = requirement.name or self.span.name
27
+ self._sync_state(meta.creator.instance)
28
+
29
+ def _sync_state(self, instance: "Requirement[Any]") -> None:
30
+ attributes = {
31
+ "name": instance.name,
32
+ "enabled": instance.enabled,
33
+ "priority": instance.priority,
34
+ "state": instance.state,
35
+ "middlewares": [str(m) for m in instance.middlewares],
36
+ }
37
+
38
+ if isinstance(instance, ConditionalRequirement):
39
+ attributes["target_name"] = str(instance.source)
40
+ elif isinstance(instance, AskPermissionRequirement):
41
+ attributes["includes"] = [str(t) for t in instance._include]
42
+ attributes["excludes"] = [str(t) for t in instance._exclude]
43
+ attributes["remember_choices"] = instance._remember_choices
44
+ attributes["_always_allow"] = instance._always_allow
45
+
46
+ self.span.set_attributes(
47
+ _unpack_object(
48
+ attributes,
49
+ prefix=f"{SpanAttributes.METADATA}",
50
+ )
51
+ )
52
+
53
+ @override
54
+ async def update(
55
+ self,
56
+ event: Any,
57
+ meta: "EventMeta",
58
+ ) -> None:
59
+ await super().update(event, meta)
60
+
61
+ self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
62
+
63
+ match event:
64
+ case RequirementInitEvent():
65
+ pass
66
+ case _:
67
+ self.span.child(meta.name, event=(event, meta))
@@ -0,0 +1,68 @@
1
+ from typing import Any, ClassVar
2
+
3
+ from beeai_framework.context import RunContext, RunContextStartEvent
4
+ from beeai_framework.emitter import EventMeta
5
+ from beeai_framework.tools import ToolErrorEvent, ToolRetryEvent, ToolSuccessEvent
6
+ from beeai_framework.tools.tool import Tool
7
+ from typing_extensions import override
8
+
9
+ from openinference.instrumentation.beeai._utils import safe_dump_model_schema, stringify
10
+ from openinference.instrumentation.beeai.processors.base import Processor
11
+ from openinference.semconv.trace import (
12
+ OpenInferenceMimeTypeValues,
13
+ OpenInferenceSpanKindValues,
14
+ SpanAttributes,
15
+ ToolAttributes,
16
+ )
17
+
18
+
19
+ class ToolProcessor(Processor):
20
+ kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.TOOL
21
+
22
+ def __init__(self, event: "RunContextStartEvent", meta: "EventMeta") -> None:
23
+ super().__init__(event, meta)
24
+
25
+ assert isinstance(meta.creator, RunContext)
26
+ assert isinstance(meta.creator.instance, Tool)
27
+
28
+ tool = meta.creator.instance
29
+ self.span.name = tool.name
30
+ self.span.set_attributes(
31
+ {
32
+ SpanAttributes.TOOL_NAME: tool.name,
33
+ SpanAttributes.TOOL_DESCRIPTION: tool.description,
34
+ ToolAttributes.TOOL_JSON_SCHEMA: stringify(
35
+ safe_dump_model_schema(tool.input_schema)
36
+ ),
37
+ }
38
+ )
39
+
40
+ @override
41
+ async def update(
42
+ self,
43
+ event: Any,
44
+ meta: "EventMeta",
45
+ ) -> None:
46
+ await super().update(event, meta)
47
+
48
+ self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
49
+
50
+ match event:
51
+ case ToolSuccessEvent():
52
+ output_cls = type(event.output)
53
+
54
+ self.span.set_attributes(
55
+ {
56
+ SpanAttributes.OUTPUT_VALUE: event.output.get_text_content(),
57
+ SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.TEXT.value,
58
+ f"{SpanAttributes.METADATA}.output_class": output_cls.__name__,
59
+ f"{SpanAttributes.METADATA}.is_empty": event.output.is_empty(),
60
+ }
61
+ )
62
+ case ToolErrorEvent():
63
+ span = self.span.child(meta.name, event=(event, meta))
64
+ span.record_exception(event.error)
65
+ case ToolRetryEvent():
66
+ self.span.child(meta.name, event=(event, meta))
67
+ case _:
68
+ self.span.child(meta.name, event=(event, meta))
@@ -0,0 +1,108 @@
1
+ from typing import Any, ClassVar
2
+
3
+ from beeai_framework.context import RunContext, RunContextStartEvent
4
+ from beeai_framework.emitter import EventMeta
5
+ from beeai_framework.workflows import (
6
+ Workflow,
7
+ WorkflowErrorEvent,
8
+ WorkflowStartEvent,
9
+ WorkflowSuccessEvent,
10
+ )
11
+ from beeai_framework.workflows.agent.agent import Schema as AgentWorkflowSchema
12
+ from pydantic import BaseModel
13
+ from typing_extensions import override
14
+
15
+ from openinference.instrumentation.beeai._utils import (
16
+ _unpack_object,
17
+ safe_dump_model_schema,
18
+ stringify,
19
+ )
20
+ from openinference.instrumentation.beeai.processors.base import Processor
21
+ from openinference.semconv.trace import (
22
+ OpenInferenceMimeTypeValues,
23
+ OpenInferenceSpanKindValues,
24
+ SpanAttributes,
25
+ )
26
+
27
+
28
+ class WorkflowProcessor(Processor):
29
+ kind: ClassVar[OpenInferenceSpanKindValues] = OpenInferenceSpanKindValues.CHAIN
30
+
31
+ def __init__(self, event: "RunContextStartEvent", meta: "EventMeta"):
32
+ super().__init__(event, meta)
33
+
34
+ assert isinstance(meta.creator, RunContext)
35
+ assert isinstance(meta.creator.instance, Workflow)
36
+
37
+ self._last_step = 0
38
+
39
+ workflow = meta.creator.instance
40
+ self.span.name = workflow.name or self.span.name
41
+ self.span.set_attributes(
42
+ {
43
+ f"{SpanAttributes.METADATA}.name": workflow.name,
44
+ f"{SpanAttributes.METADATA}.all_steps": workflow.step_names,
45
+ f"{SpanAttributes.METADATA}.start_step": workflow.start_step,
46
+ **_unpack_object(
47
+ safe_dump_model_schema(workflow.schema),
48
+ prefix=f"{SpanAttributes.METADATA}.schema",
49
+ ),
50
+ }
51
+ )
52
+
53
+ @override
54
+ async def update(
55
+ self,
56
+ event: Any,
57
+ meta: "EventMeta",
58
+ ) -> None:
59
+ await super().update(event, meta)
60
+
61
+ self.span.add_event(f"{meta.name} ({meta.path})", timestamp=meta.created_at)
62
+
63
+ match event:
64
+ case WorkflowStartEvent() | WorkflowSuccessEvent() | WorkflowErrorEvent():
65
+ self._update_state(event)
66
+ case _:
67
+ self.span.child(meta.name, event=(event, meta))
68
+
69
+ def _update_state(
70
+ self, event: "WorkflowStartEvent[Any] | WorkflowSuccessEvent[Any] | WorkflowErrorEvent[Any]"
71
+ ) -> None:
72
+ self.span.set_attribute(f"{SpanAttributes.METADATA}.current_step", event.step)
73
+ self.span.set_attributes(
74
+ _unpack_object(
75
+ _serialize_state(event.run.state), prefix=f"{SpanAttributes.METADATA}.state"
76
+ )
77
+ )
78
+
79
+ # update steps
80
+ for idx, step in enumerate(event.run.steps):
81
+ if idx < self._last_step:
82
+ continue
83
+
84
+ self.span.set_attributes(
85
+ _unpack_object(
86
+ {
87
+ "name": step.name,
88
+ "state": _serialize_state(step.state),
89
+ },
90
+ prefix=f"{SpanAttributes.METADATA}.steps.{idx}",
91
+ )
92
+ )
93
+ self._last_step = idx
94
+
95
+ if isinstance(event, WorkflowSuccessEvent):
96
+ if event.next == Workflow.END or event.run.result is not None:
97
+ result = event.run.result if event.run.result is not None else event.state
98
+ self.span.attributes.update(
99
+ {
100
+ SpanAttributes.OUTPUT_VALUE: stringify(_serialize_state(result)),
101
+ SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value,
102
+ }
103
+ )
104
+
105
+
106
+ def _serialize_state(result: BaseModel) -> dict[str, Any]:
107
+ exclude = {"new_messages", "inputs"} if isinstance(result, AgentWorkflowSchema) else set()
108
+ return result.model_dump(exclude=exclude, exclude_none=True)
@@ -1 +1 @@
1
- __version__ = "0.1.6"
1
+ __version__ = "0.1.8"