arize-phoenix 5.4.0__py3-none-any.whl → 5.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: arize-phoenix
3
- Version: 5.4.0
3
+ Version: 5.5.0
4
4
  Summary: AI Observability and Evaluation
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -56,6 +56,7 @@ Requires-Dist: uvicorn
56
56
  Requires-Dist: websockets
57
57
  Requires-Dist: wrapt
58
58
  Provides-Extra: container
59
+ Requires-Dist: anthropic; extra == 'container'
59
60
  Requires-Dist: fast-hdbscan>=0.2.0; extra == 'container'
60
61
  Requires-Dist: numba>=0.60.0; extra == 'container'
61
62
  Requires-Dist: openai>=1.0.0; extra == 'container'
@@ -6,7 +6,7 @@ phoenix/exceptions.py,sha256=n2L2KKuecrdflB9MsCdAYCiSEvGJptIsfRkXMoJle7A,169
6
6
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
7
7
  phoenix/services.py,sha256=aTxhcOA1pZHB6U-B3TEcp6fqDF5oT0xCUvEUNMZVTUQ,5175
8
8
  phoenix/settings.py,sha256=ht-0oN-sMV6SPXrk7Tu1EZlngpAYkGNLYPhO8DyrdQI,661
9
- phoenix/version.py,sha256=xjYaBGUFGg0kGZj_WhuoFyPD8NILPsr79SaMwmYQGSg,22
9
+ phoenix/version.py,sha256=zFTHldBmR5ReiC3uSZ8VkZOEirtsq_l6QbUJYRBHlTs,22
10
10
  phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
12
12
  phoenix/core/model.py,sha256=km_a--PBHOuA337ClRw9xqhOHhrUT6Rl9pz_zV0JYkQ,4843
@@ -94,7 +94,7 @@ phoenix/server/api/exceptions.py,sha256=TA0JuY2YRnj35qGuMSQ8d0ToHum9gWm9W--3fSKH
94
94
  phoenix/server/api/interceptor.py,sha256=ykDnoC_apUd-llVli3m1CW18kNSIgjz2qZ6m5JmPDu8,1294
95
95
  phoenix/server/api/queries.py,sha256=sE_w4X9sz0QJBGNQbOMgiZiKpR24jvxCpv6AQeGeE9Y,27153
96
96
  phoenix/server/api/schema.py,sha256=JKxcZd0UBX6VnVic9tnKAzehJ2fGYNWAu6Gr1tR1PMI,637
97
- phoenix/server/api/subscriptions.py,sha256=MOxA0iTAIPTI_yKUOitRXiNP4A2cuEEytT9ql9uqUdM,17258
97
+ phoenix/server/api/subscriptions.py,sha256=hxIj8WRNk1m1aXVW7PDlpPt5TwRGNbjp2yxibRZRqrc,21972
98
98
  phoenix/server/api/utils.py,sha256=Kl47G-1A7QKTDrc75BU2QK6HupsG6MWuXxy351FOfKQ,858
99
99
  phoenix/server/api/dataloaders/__init__.py,sha256=jNYvfXjnZzgA2HWTG7AZdqWGla3ZysBUDUei8Zkz6N8,3290
100
100
  phoenix/server/api/dataloaders/annotation_summaries.py,sha256=Wv8AORZoGd5TJ4Y-em8iqJu87AMpZP7lWOTr-SML-x8,5560
@@ -264,10 +264,10 @@ phoenix/server/static/apple-touch-icon-76x76.png,sha256=CT_xT12I0u2i0WU8JzBZBuOQ
264
264
  phoenix/server/static/apple-touch-icon.png,sha256=fOfpjqGpWYbJ0eAurKsyoZP1EAs6ZVooBJ_SGk2ZkDs,3801
265
265
  phoenix/server/static/favicon.ico,sha256=bY0vvCKRftemZfPShwZtE93DiiQdaYaozkPGwNFr6H8,34494
266
266
  phoenix/server/static/modernizr.js,sha256=mvK-XtkNqjOral-QvzoqsyOMECXIMu5BQwSVN_wcU9c,2564
267
- phoenix/server/static/.vite/manifest.json,sha256=_n7ZFPqjikT-PaJRCfuQH6uAKVJ75DDy-yVhvoHT4HQ,1929
268
- phoenix/server/static/assets/components-8zh9kCOG.js,sha256=Dw2GUia4f0fu5DFbXLKi7bqmuTfSG3_XFdWEiIdPCmU,272304
269
- phoenix/server/static/assets/index-Ci93KI-L.js,sha256=_Pccmiyc3Ak2moJ_drkresjx0iwZHa1qrfUoZ1NoYUM,7162
270
- phoenix/server/static/assets/pages-CFNU-U_5.js,sha256=JPZJBIh3FxrJIsSOEr2eIXY1dBRwymnAXI-LMUkVzuI,590345
267
+ phoenix/server/static/.vite/manifest.json,sha256=ifveuWhOubKDOfkJLSeHrUMz3kwtdip_8ctJ4KPRfP8,1929
268
+ phoenix/server/static/assets/components-DOjuTDx9.js,sha256=JXFIP0SiBdyEvvh53dKsdoi-6o1lOUFY_IIMsGBwMxg,272352
269
+ phoenix/server/static/assets/index-C15MiAe0.js,sha256=pM97A7RT-WTM1F2TqKVvP8VgNWGHC7OehcBqq-sjzR4,7162
270
+ phoenix/server/static/assets/pages-uwuWR0d3.js,sha256=GWoWNnx_8-q56uUQWbBRM_fZv8zxw1ZZXvWbog8Q0-0,591038
271
271
  phoenix/server/static/assets/vendor-6IcPAw_j.js,sha256=kA0afCcdEfjdTlLbngra3925MiR8wVgYF5d7XBBViXQ,10898278
272
272
  phoenix/server/static/assets/vendor-DxkFTwjz.css,sha256=nZrkr0u6NNElFGvpWHk9GTHeGoibCXCli1bE7mXZGZg,1816
273
273
  phoenix/server/static/assets/vendor-arizeai-uC2sozJ6.js,sha256=o5Su5D9_jBumfVpDXwuQC7Hzpkze4LlvVA9t0Pvh1pk,306512
@@ -313,9 +313,9 @@ phoenix/utilities/project.py,sha256=8IJuMM4yUMoooPi37sictGj8Etu9rGmq6RFtc9848cQ,
313
313
  phoenix/utilities/re.py,sha256=nr_B0txj_7CXc45953X6vr2KCRSWMuaXJSEkL8s8Sjc,2036
314
314
  phoenix/utilities/span_store.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
315
315
  phoenix/utilities/template_formatters.py,sha256=zWY6gjAMmspYqlmn-ZjKeY5SExylwePk7a1BUdDxVGI,2236
316
- arize_phoenix-5.4.0.dist-info/METADATA,sha256=bwyk52R9lADobAHiz327hsDxKfDqIdHsTcp87xFCK_Y,12052
317
- arize_phoenix-5.4.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
318
- arize_phoenix-5.4.0.dist-info/entry_points.txt,sha256=Pgpn8Upxx9P8z8joPXZWl2LlnAlGc3gcQoVchb06X1Q,94
319
- arize_phoenix-5.4.0.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
320
- arize_phoenix-5.4.0.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
321
- arize_phoenix-5.4.0.dist-info/RECORD,,
316
+ arize_phoenix-5.5.0.dist-info/METADATA,sha256=F32DnF7HFmK_M0NKDsD5mXY91llrxN1Php04_nmzo40,12099
317
+ arize_phoenix-5.5.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
318
+ arize_phoenix-5.5.0.dist-info/entry_points.txt,sha256=Pgpn8Upxx9P8z8joPXZWl2LlnAlGc3gcQoVchb06X1Q,94
319
+ arize_phoenix-5.5.0.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
320
+ arize_phoenix-5.5.0.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
321
+ arize_phoenix-5.5.0.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  import json
2
+ from abc import ABC, abstractmethod
2
3
  from collections import defaultdict
3
4
  from dataclasses import fields
4
5
  from datetime import datetime
@@ -9,6 +10,7 @@ from typing import (
9
10
  Annotated,
10
11
  Any,
11
12
  AsyncIterator,
13
+ Callable,
12
14
  DefaultDict,
13
15
  Dict,
14
16
  Iterable,
@@ -16,6 +18,7 @@ from typing import (
16
18
  List,
17
19
  Optional,
18
20
  Tuple,
21
+ Type,
19
22
  Union,
20
23
  )
21
24
 
@@ -56,14 +59,13 @@ from phoenix.utilities.template_formatters import (
56
59
  )
57
60
 
58
61
  if TYPE_CHECKING:
62
+ from anthropic.types import MessageParam
59
63
  from openai.types import CompletionUsage
60
- from openai.types.chat import (
61
- ChatCompletionMessageParam,
62
- )
64
+ from openai.types.chat import ChatCompletionMessageParam
63
65
 
64
66
  PLAYGROUND_PROJECT_NAME = "playground"
65
67
 
66
- ToolCallIndex: TypeAlias = int
68
+ ToolCallID: TypeAlias = str
67
69
 
68
70
 
69
71
  @strawberry.enum
@@ -127,39 +129,202 @@ class ChatCompletionInput:
127
129
  api_key: Optional[str] = strawberry.field(default=None)
128
130
 
129
131
 
130
- def to_openai_chat_completion_param(
131
- role: ChatCompletionMessageRole, content: JSONScalarType
132
- ) -> "ChatCompletionMessageParam":
133
- from openai.types.chat import (
134
- ChatCompletionAssistantMessageParam,
135
- ChatCompletionSystemMessageParam,
136
- ChatCompletionUserMessageParam,
137
- )
132
+ PLAYGROUND_STREAMING_CLIENT_REGISTRY: Dict[
133
+ GenerativeProviderKey, Type["PlaygroundStreamingClient"]
134
+ ] = {}
138
135
 
139
- if role is ChatCompletionMessageRole.USER:
140
- return ChatCompletionUserMessageParam(
141
- {
142
- "content": content,
143
- "role": "user",
144
- }
145
- )
146
- if role is ChatCompletionMessageRole.SYSTEM:
147
- return ChatCompletionSystemMessageParam(
148
- {
149
- "content": content,
150
- "role": "system",
151
- }
136
+
137
+ def register_llm_client(
138
+ provider_key: GenerativeProviderKey,
139
+ ) -> Callable[[Type["PlaygroundStreamingClient"]], Type["PlaygroundStreamingClient"]]:
140
+ def decorator(cls: Type["PlaygroundStreamingClient"]) -> Type["PlaygroundStreamingClient"]:
141
+ PLAYGROUND_STREAMING_CLIENT_REGISTRY[provider_key] = cls
142
+ return cls
143
+
144
+ return decorator
145
+
146
+
147
+ class PlaygroundStreamingClient(ABC):
148
+ def __init__(self, model: GenerativeModelInput, api_key: Optional[str] = None) -> None: ...
149
+
150
+ @abstractmethod
151
+ async def chat_completion_create(
152
+ self,
153
+ messages: List[Tuple[ChatCompletionMessageRole, str]],
154
+ tools: List[JSONScalarType],
155
+ **invocation_parameters: Any,
156
+ ) -> AsyncIterator[ChatCompletionSubscriptionPayload]:
157
+ # a yield statement is needed to satisfy the type-checker
158
+ # https://mypy.readthedocs.io/en/stable/more_types.html#asynchronous-iterators
159
+ yield TextChunk(content="")
160
+
161
+ @property
162
+ @abstractmethod
163
+ def attributes(self) -> Dict[str, Any]: ...
164
+
165
+
166
+ @register_llm_client(GenerativeProviderKey.OPENAI)
167
+ class OpenAIStreamingClient(PlaygroundStreamingClient):
168
+ def __init__(self, model: GenerativeModelInput, api_key: Optional[str] = None) -> None:
169
+ from openai import AsyncOpenAI
170
+
171
+ self.client = AsyncOpenAI(api_key=api_key)
172
+ self.model_name = model.name
173
+ self._attributes: Dict[str, Any] = {}
174
+
175
+ async def chat_completion_create(
176
+ self,
177
+ messages: List[Tuple[ChatCompletionMessageRole, str]],
178
+ tools: List[JSONScalarType],
179
+ **invocation_parameters: Any,
180
+ ) -> AsyncIterator[ChatCompletionSubscriptionPayload]:
181
+ from openai import NOT_GIVEN
182
+ from openai.types.chat import ChatCompletionStreamOptionsParam
183
+
184
+ # Convert standard messages to OpenAI messages
185
+ openai_messages = [self.to_openai_chat_completion_param(*message) for message in messages]
186
+ tool_call_ids: Dict[int, str] = {}
187
+ token_usage: Optional["CompletionUsage"] = None
188
+ async for chunk in await self.client.chat.completions.create(
189
+ messages=openai_messages,
190
+ model=self.model_name,
191
+ stream=True,
192
+ stream_options=ChatCompletionStreamOptionsParam(include_usage=True),
193
+ tools=tools or NOT_GIVEN,
194
+ **invocation_parameters,
195
+ ):
196
+ if (usage := chunk.usage) is not None:
197
+ token_usage = usage
198
+ continue
199
+ choice = chunk.choices[0]
200
+ delta = choice.delta
201
+ if choice.finish_reason is None:
202
+ if isinstance(chunk_content := delta.content, str):
203
+ text_chunk = TextChunk(content=chunk_content)
204
+ yield text_chunk
205
+ if (tool_calls := delta.tool_calls) is not None:
206
+ for tool_call_index, tool_call in enumerate(tool_calls):
207
+ tool_call_id = (
208
+ tool_call.id
209
+ if tool_call.id is not None
210
+ else tool_call_ids[tool_call_index]
211
+ )
212
+ tool_call_ids[tool_call_index] = tool_call_id
213
+ if (function := tool_call.function) is not None:
214
+ tool_call_chunk = ToolCallChunk(
215
+ id=tool_call_id,
216
+ function=FunctionCallChunk(
217
+ name=function.name or "",
218
+ arguments=function.arguments or "",
219
+ ),
220
+ )
221
+ yield tool_call_chunk
222
+ if token_usage is not None:
223
+ self._attributes.update(_llm_token_counts(token_usage))
224
+
225
+ def to_openai_chat_completion_param(
226
+ self, role: ChatCompletionMessageRole, content: JSONScalarType
227
+ ) -> "ChatCompletionMessageParam":
228
+ from openai.types.chat import (
229
+ ChatCompletionAssistantMessageParam,
230
+ ChatCompletionSystemMessageParam,
231
+ ChatCompletionUserMessageParam,
152
232
  )
153
- if role is ChatCompletionMessageRole.AI:
154
- return ChatCompletionAssistantMessageParam(
155
- {
156
- "content": content,
157
- "role": "assistant",
158
- }
233
+
234
+ if role is ChatCompletionMessageRole.USER:
235
+ return ChatCompletionUserMessageParam(
236
+ {
237
+ "content": content,
238
+ "role": "user",
239
+ }
240
+ )
241
+ if role is ChatCompletionMessageRole.SYSTEM:
242
+ return ChatCompletionSystemMessageParam(
243
+ {
244
+ "content": content,
245
+ "role": "system",
246
+ }
247
+ )
248
+ if role is ChatCompletionMessageRole.AI:
249
+ return ChatCompletionAssistantMessageParam(
250
+ {
251
+ "content": content,
252
+ "role": "assistant",
253
+ }
254
+ )
255
+ if role is ChatCompletionMessageRole.TOOL:
256
+ raise NotImplementedError
257
+ assert_never(role)
258
+
259
+ @property
260
+ def attributes(self) -> Dict[str, Any]:
261
+ return self._attributes
262
+
263
+
264
+ @register_llm_client(GenerativeProviderKey.AZURE_OPENAI)
265
+ class AzureOpenAIStreamingClient(OpenAIStreamingClient):
266
+ def __init__(self, model: GenerativeModelInput, api_key: Optional[str] = None):
267
+ from openai import AsyncAzureOpenAI
268
+
269
+ if model.endpoint is None or model.api_version is None:
270
+ raise ValueError("endpoint and api_version are required for Azure OpenAI models")
271
+ self.client = AsyncAzureOpenAI(
272
+ api_key=api_key,
273
+ azure_endpoint=model.endpoint,
274
+ api_version=model.api_version,
159
275
  )
160
- if role is ChatCompletionMessageRole.TOOL:
161
- raise NotImplementedError
162
- assert_never(role)
276
+
277
+
278
+ @register_llm_client(GenerativeProviderKey.ANTHROPIC)
279
+ class AnthropicStreamingClient(PlaygroundStreamingClient):
280
+ def __init__(self, model: GenerativeModelInput, api_key: Optional[str] = None) -> None:
281
+ import anthropic
282
+
283
+ self.client = anthropic.AsyncAnthropic(api_key=api_key)
284
+ self.model_name = model.name
285
+
286
+ async def chat_completion_create(
287
+ self,
288
+ messages: List[Tuple[ChatCompletionMessageRole, str]],
289
+ tools: List[JSONScalarType],
290
+ **invocation_parameters: Any,
291
+ ) -> AsyncIterator[ChatCompletionSubscriptionPayload]:
292
+ anthropic_messages, system_prompt = self._build_anthropic_messages(messages)
293
+
294
+ anthropic_params = {
295
+ "messages": anthropic_messages,
296
+ "model": self.model_name,
297
+ "system": system_prompt,
298
+ "max_tokens": 1024,
299
+ **invocation_parameters,
300
+ }
301
+
302
+ async with self.client.messages.stream(**anthropic_params) as stream:
303
+ async for text in stream.text_stream:
304
+ yield TextChunk(content=text)
305
+
306
+ def _build_anthropic_messages(
307
+ self, messages: List[Tuple[ChatCompletionMessageRole, str]]
308
+ ) -> Tuple[List["MessageParam"], str]:
309
+ anthropic_messages: List["MessageParam"] = []
310
+ system_prompt = ""
311
+ for role, content in messages:
312
+ if role == ChatCompletionMessageRole.USER:
313
+ anthropic_messages.append({"role": "user", "content": content})
314
+ elif role == ChatCompletionMessageRole.AI:
315
+ anthropic_messages.append({"role": "assistant", "content": content})
316
+ elif role == ChatCompletionMessageRole.SYSTEM:
317
+ system_prompt += content + "\n"
318
+ elif role == ChatCompletionMessageRole.TOOL:
319
+ raise NotImplementedError
320
+ else:
321
+ assert_never(role)
322
+
323
+ return anthropic_messages, system_prompt
324
+
325
+ @property
326
+ def attributes(self) -> Dict[str, Any]:
327
+ return dict()
163
328
 
164
329
 
165
330
  @strawberry.type
@@ -168,30 +333,20 @@ class Subscription:
168
333
  async def chat_completion(
169
334
  self, info: Info[Context, None], input: ChatCompletionInput
170
335
  ) -> AsyncIterator[ChatCompletionSubscriptionPayload]:
171
- from openai import NOT_GIVEN, AsyncAzureOpenAI, AsyncOpenAI
172
- from openai.types.chat import ChatCompletionStreamOptionsParam
336
+ # Determine which LLM client to use based on provider_key
337
+ provider_key = input.model.provider_key
338
+ llm_client_class = PLAYGROUND_STREAMING_CLIENT_REGISTRY.get(provider_key)
339
+ if llm_client_class is None:
340
+ raise ValueError(f"No LLM client registered for provider '{provider_key}'")
173
341
 
174
- client: Union[AsyncAzureOpenAI, AsyncOpenAI]
342
+ llm_client = llm_client_class(model=input.model, api_key=input.api_key)
175
343
 
176
- if input.model.provider_key == GenerativeProviderKey.AZURE_OPENAI:
177
- if input.model.endpoint is None or input.model.api_version is None:
178
- raise ValueError("endpoint and api_version are required for Azure OpenAI models")
179
- client = AsyncAzureOpenAI(
180
- api_key=input.api_key,
181
- azure_endpoint=input.model.endpoint,
182
- api_version=input.model.api_version,
183
- )
184
- else:
185
- client = AsyncOpenAI(api_key=input.api_key)
186
-
187
- invocation_parameters = jsonify(input.invocation_parameters)
344
+ messages = [(message.role, message.content) for message in input.messages]
188
345
 
189
- messages: List[Tuple[ChatCompletionMessageRole, str]] = [
190
- (message.role, message.content) for message in input.messages
191
- ]
192
346
  if template_options := input.template:
193
347
  messages = list(_formatted_messages(messages, template_options))
194
- openai_messages = [to_openai_chat_completion_param(*message) for message in messages]
348
+
349
+ invocation_parameters = jsonify(input.invocation_parameters)
195
350
 
196
351
  in_memory_span_exporter = InMemorySpanExporter()
197
352
  tracer_provider = TracerProvider()
@@ -200,6 +355,7 @@ class Subscription:
200
355
  )
201
356
  tracer = tracer_provider.get_tracer(__name__)
202
357
  span_name = "ChatCompletion"
358
+
203
359
  with tracer.start_span(
204
360
  span_name,
205
361
  attributes=dict(
@@ -215,52 +371,29 @@ class Subscription:
215
371
  ) as span:
216
372
  response_chunks = []
217
373
  text_chunks: List[TextChunk] = []
218
- tool_call_chunks: DefaultDict[ToolCallIndex, List[ToolCallChunk]] = defaultdict(list)
219
- role: Optional[str] = None
220
- token_usage: Optional[CompletionUsage] = None
221
- async for chunk in await client.chat.completions.create(
222
- messages=openai_messages,
223
- model=input.model.name,
224
- stream=True,
225
- tools=input.tools or NOT_GIVEN,
226
- stream_options=ChatCompletionStreamOptionsParam(include_usage=True),
374
+ tool_call_chunks: DefaultDict[ToolCallID, List[ToolCallChunk]] = defaultdict(list)
375
+
376
+ async for chunk in llm_client.chat_completion_create(
377
+ messages=messages,
378
+ tools=input.tools or [],
227
379
  **invocation_parameters,
228
380
  ):
229
381
  response_chunks.append(chunk)
230
- if (usage := chunk.usage) is not None:
231
- token_usage = usage
232
- continue
233
- choice = chunk.choices[0]
234
- delta = choice.delta
235
- if role is None:
236
- role = delta.role
237
- if choice.finish_reason is None:
238
- if isinstance(chunk_content := delta.content, str):
239
- text_chunk = TextChunk(content=chunk_content)
240
- yield text_chunk
241
- text_chunks.append(text_chunk)
242
- if (tool_calls := delta.tool_calls) is not None:
243
- for tool_call_index, tool_call in enumerate(tool_calls):
244
- if (function := tool_call.function) is not None:
245
- if (tool_call_id := tool_call.id) is None:
246
- first_tool_call_chunk = tool_call_chunks[tool_call_index][0]
247
- tool_call_id = first_tool_call_chunk.id
248
- tool_call_chunk = ToolCallChunk(
249
- id=tool_call_id,
250
- function=FunctionCallChunk(
251
- name=function.name or "",
252
- arguments=function.arguments or "",
253
- ),
254
- )
255
- yield tool_call_chunk
256
- tool_call_chunks[tool_call_index].append(tool_call_chunk)
382
+ if isinstance(chunk, TextChunk):
383
+ yield chunk
384
+ text_chunks.append(chunk)
385
+ elif isinstance(chunk, ToolCallChunk):
386
+ yield chunk
387
+ tool_call_chunks[chunk.id].append(chunk)
388
+
257
389
  span.set_status(StatusCode.OK)
258
- assert role is not None
390
+ llm_client_attributes = llm_client.attributes
391
+
259
392
  span.set_attributes(
260
393
  dict(
261
394
  chain(
262
395
  _output_value_and_mime_type(response_chunks),
263
- _llm_token_counts(token_usage) if token_usage is not None else [],
396
+ llm_client_attributes.items(),
264
397
  _llm_output_messages(text_chunks, tool_call_chunks),
265
398
  )
266
399
  )
@@ -272,8 +405,8 @@ class Subscription:
272
405
  assert (attributes := finished_span.attributes) is not None
273
406
  start_time = _datetime(epoch_nanoseconds=finished_span.start_time)
274
407
  end_time = _datetime(epoch_nanoseconds=finished_span.end_time)
275
- prompt_tokens = token_usage.prompt_tokens if token_usage is not None else 0
276
- completion_tokens = token_usage.completion_tokens if token_usage is not None else 0
408
+ prompt_tokens = llm_client_attributes.get(LLM_TOKEN_COUNT_PROMPT, 0)
409
+ completion_tokens = llm_client_attributes.get(LLM_TOKEN_COUNT_COMPLETION, 0)
277
410
  trace_id = _hex(finished_span.context.trace_id)
278
411
  span_id = _hex(finished_span.context.span_id)
279
412
  status = finished_span.status
@@ -367,7 +500,7 @@ def _llm_input_messages(
367
500
 
368
501
  def _llm_output_messages(
369
502
  text_chunks: List[TextChunk],
370
- tool_call_chunks: DefaultDict[ToolCallIndex, List[ToolCallChunk]],
503
+ tool_call_chunks: DefaultDict[ToolCallID, List[ToolCallChunk]],
371
504
  ) -> Iterator[Tuple[str, Any]]:
372
505
  yield f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_ROLE}", "assistant"
373
506
  if content := "".join(chunk.content for chunk in text_chunks):
@@ -1,22 +1,22 @@
1
1
  {
2
- "_components-8zh9kCOG.js": {
3
- "file": "assets/components-8zh9kCOG.js",
2
+ "_components-DOjuTDx9.js": {
3
+ "file": "assets/components-DOjuTDx9.js",
4
4
  "name": "components",
5
5
  "imports": [
6
6
  "_vendor-6IcPAw_j.js",
7
7
  "_vendor-arizeai-uC2sozJ6.js",
8
- "_pages-CFNU-U_5.js",
8
+ "_pages-uwuWR0d3.js",
9
9
  "_vendor-three-DwGkEfCM.js",
10
10
  "_vendor-codemirror-DVE2_WBr.js"
11
11
  ]
12
12
  },
13
- "_pages-CFNU-U_5.js": {
14
- "file": "assets/pages-CFNU-U_5.js",
13
+ "_pages-uwuWR0d3.js": {
14
+ "file": "assets/pages-uwuWR0d3.js",
15
15
  "name": "pages",
16
16
  "imports": [
17
17
  "_vendor-6IcPAw_j.js",
18
18
  "_vendor-arizeai-uC2sozJ6.js",
19
- "_components-8zh9kCOG.js",
19
+ "_components-DOjuTDx9.js",
20
20
  "_vendor-recharts-DwrexFA4.js",
21
21
  "_vendor-codemirror-DVE2_WBr.js"
22
22
  ]
@@ -61,15 +61,15 @@
61
61
  "name": "vendor-three"
62
62
  },
63
63
  "index.tsx": {
64
- "file": "assets/index-Ci93KI-L.js",
64
+ "file": "assets/index-C15MiAe0.js",
65
65
  "name": "index",
66
66
  "src": "index.tsx",
67
67
  "isEntry": true,
68
68
  "imports": [
69
69
  "_vendor-6IcPAw_j.js",
70
70
  "_vendor-arizeai-uC2sozJ6.js",
71
- "_pages-CFNU-U_5.js",
72
- "_components-8zh9kCOG.js",
71
+ "_pages-uwuWR0d3.js",
72
+ "_components-DOjuTDx9.js",
73
73
  "_vendor-three-DwGkEfCM.js",
74
74
  "_vendor-recharts-DwrexFA4.js",
75
75
  "_vendor-codemirror-DVE2_WBr.js"