arize-phoenix 11.10.0__py3-none-any.whl → 11.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/METADATA +1 -1
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/RECORD +9 -9
- phoenix/server/api/helpers/playground_clients.py +146 -63
- phoenix/server/api/helpers/playground_registry.py +2 -2
- phoenix/version.py +1 -1
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/WHEEL +0 -0
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/entry_points.txt +0 -0
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-11.10.0.dist-info → arize_phoenix-11.10.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -6,7 +6,7 @@ phoenix/exceptions.py,sha256=n2L2KKuecrdflB9MsCdAYCiSEvGJptIsfRkXMoJle7A,169
|
|
|
6
6
|
phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
7
7
|
phoenix/services.py,sha256=ngkyKGVatX3cO2WJdo2hKdaVKP-xJCMvqthvga6kJss,5196
|
|
8
8
|
phoenix/settings.py,sha256=2kHfT3BNOVd4dAO1bq-syEQbHSG8oX2-7NhOwK2QREk,896
|
|
9
|
-
phoenix/version.py,sha256=
|
|
9
|
+
phoenix/version.py,sha256=xvvOboOAs4Htuc0_lAWkZ6Ru9ZUbUhsIFFXYCIMHAAE,24
|
|
10
10
|
phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
|
|
12
12
|
phoenix/core/model.py,sha256=qBFraOtmwCCnWJltKNP18DDG0mULXigytlFsa6YOz6k,4837
|
|
@@ -176,8 +176,8 @@ phoenix/server/api/helpers/__init__.py,sha256=m2-xaSPqUiSs91k62JaRDjFNfl-1byxBfY
|
|
|
176
176
|
phoenix/server/api/helpers/annotations.py,sha256=9gMXKpMTfWEChoSCnvdWYuyB0hlSnNOp-qUdar9Vono,262
|
|
177
177
|
phoenix/server/api/helpers/dataset_helpers.py,sha256=3bdGBoUzqrtg-sr5p2wpQLOU6dhg_3TKFHNeJj8p0TU,9155
|
|
178
178
|
phoenix/server/api/helpers/experiment_run_filters.py,sha256=DOnVwrmn39eAkk2mwuZP8kIcAnR5jrOgllEwWSjsw94,29893
|
|
179
|
-
phoenix/server/api/helpers/playground_clients.py,sha256=
|
|
180
|
-
phoenix/server/api/helpers/playground_registry.py,sha256=
|
|
179
|
+
phoenix/server/api/helpers/playground_clients.py,sha256=Fq4DNVIdnCiiVt0bh5mrZ7dJb2oOQcLjTttfq0Wcuv0,73589
|
|
180
|
+
phoenix/server/api/helpers/playground_registry.py,sha256=n0v4-KnvZJxeaEwOla5qBbnOQjSWznKmMhZnh9ziJt0,2584
|
|
181
181
|
phoenix/server/api/helpers/playground_spans.py,sha256=QpXwPl_fFNwm_iA1A77XApUyXMl1aDmonw8aXuNZ_4k,17132
|
|
182
182
|
phoenix/server/api/helpers/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
183
183
|
phoenix/server/api/helpers/prompts/models.py,sha256=nlPtLZaGcHfWNRR0iNRaBUv8eoKOnoGqRm6zadrTt0I,23547
|
|
@@ -437,9 +437,9 @@ phoenix/utilities/project.py,sha256=auVpARXkDb-JgeX5f2aStyFIkeKvGwN9l7qrFeJMVxI,
|
|
|
437
437
|
phoenix/utilities/re.py,sha256=6YyUWIkv0zc2SigsxfOWIHzdpjKA_TZo2iqKq7zJKvw,2081
|
|
438
438
|
phoenix/utilities/span_store.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
439
439
|
phoenix/utilities/template_formatters.py,sha256=gh9PJD6WEGw7TEYXfSst1UR4pWWwmjxMLrDVQ_CkpkQ,2779
|
|
440
|
-
arize_phoenix-11.10.
|
|
441
|
-
arize_phoenix-11.10.
|
|
442
|
-
arize_phoenix-11.10.
|
|
443
|
-
arize_phoenix-11.10.
|
|
444
|
-
arize_phoenix-11.10.
|
|
445
|
-
arize_phoenix-11.10.
|
|
440
|
+
arize_phoenix-11.10.1.dist-info/METADATA,sha256=wd_5sRdhTNXTvCYeob4xrZbZiPxZ81ENs-yF8PMEIyE,30851
|
|
441
|
+
arize_phoenix-11.10.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
442
|
+
arize_phoenix-11.10.1.dist-info/entry_points.txt,sha256=Pgpn8Upxx9P8z8joPXZWl2LlnAlGc3gcQoVchb06X1Q,94
|
|
443
|
+
arize_phoenix-11.10.1.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
|
|
444
|
+
arize_phoenix-11.10.1.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
|
|
445
|
+
arize_phoenix-11.10.1.dist-info/RECORD,,
|
|
@@ -20,7 +20,7 @@ from openinference.semconv.trace import (
|
|
|
20
20
|
)
|
|
21
21
|
from strawberry import UNSET
|
|
22
22
|
from strawberry.scalars import JSON as JSONScalarType
|
|
23
|
-
from typing_extensions import TypeAlias, assert_never
|
|
23
|
+
from typing_extensions import TypeAlias, assert_never, override
|
|
24
24
|
|
|
25
25
|
from phoenix.config import getenv
|
|
26
26
|
from phoenix.evals.models.rate_limiters import (
|
|
@@ -437,9 +437,9 @@ class OpenAIBaseStreamingClient(PlaygroundStreamingClient):
|
|
|
437
437
|
if role is ChatCompletionMessageRole.TOOL:
|
|
438
438
|
if tool_call_id is None:
|
|
439
439
|
raise ValueError("tool_call_id is required for tool messages")
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
440
|
+
return ChatCompletionToolMessageParam(
|
|
441
|
+
{"content": content, "role": "tool", "tool_call_id": tool_call_id}
|
|
442
|
+
)
|
|
443
443
|
assert_never(role)
|
|
444
444
|
|
|
445
445
|
def to_openai_tool_call_param(
|
|
@@ -1140,27 +1140,28 @@ class OpenAIStreamingClient(OpenAIBaseStreamingClient):
|
|
|
1140
1140
|
self._attributes[LLM_SYSTEM] = OpenInferenceLLMSystemValues.OPENAI.value
|
|
1141
1141
|
|
|
1142
1142
|
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
class
|
|
1143
|
+
_OPENAI_REASONING_MODELS = [
|
|
1144
|
+
"o1",
|
|
1145
|
+
"o1-pro",
|
|
1146
|
+
"o1-2024-12-17",
|
|
1147
|
+
"o1-pro-2025-03-19",
|
|
1148
|
+
"o1-mini",
|
|
1149
|
+
"o1-mini-2024-09-12",
|
|
1150
|
+
"o1-preview",
|
|
1151
|
+
"o1-preview-2024-09-12",
|
|
1152
|
+
"o3",
|
|
1153
|
+
"o3-pro",
|
|
1154
|
+
"o3-2025-04-16",
|
|
1155
|
+
"o3-mini",
|
|
1156
|
+
"o3-mini-2025-01-31",
|
|
1157
|
+
"o4-mini",
|
|
1158
|
+
"o4-mini-2025-04-16",
|
|
1159
|
+
]
|
|
1160
|
+
|
|
1161
|
+
|
|
1162
|
+
class OpenAIReasoningReasoningModelsMixin:
|
|
1163
|
+
"""Mixin class for OpenAI-style reasoning model clients (o1, o3 series)."""
|
|
1164
|
+
|
|
1164
1165
|
@classmethod
|
|
1165
1166
|
def supported_invocation_parameters(cls) -> list[InvocationParameter]:
|
|
1166
1167
|
return [
|
|
@@ -1191,6 +1192,16 @@ class OpenAIReasoningStreamingClient(OpenAIStreamingClient):
|
|
|
1191
1192
|
),
|
|
1192
1193
|
]
|
|
1193
1194
|
|
|
1195
|
+
|
|
1196
|
+
@register_llm_client(
|
|
1197
|
+
provider_key=GenerativeProviderKey.OPENAI,
|
|
1198
|
+
model_names=_OPENAI_REASONING_MODELS,
|
|
1199
|
+
)
|
|
1200
|
+
class OpenAIReasoningNonStreamingClient(
|
|
1201
|
+
OpenAIReasoningReasoningModelsMixin,
|
|
1202
|
+
OpenAIStreamingClient,
|
|
1203
|
+
):
|
|
1204
|
+
@override
|
|
1194
1205
|
async def chat_completion_create(
|
|
1195
1206
|
self,
|
|
1196
1207
|
messages: list[
|
|
@@ -1283,46 +1294,11 @@ class OpenAIReasoningStreamingClient(OpenAIStreamingClient):
|
|
|
1283
1294
|
if role is ChatCompletionMessageRole.TOOL:
|
|
1284
1295
|
if tool_call_id is None:
|
|
1285
1296
|
raise ValueError("tool_call_id is required for tool messages")
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1297
|
+
return ChatCompletionToolMessageParam(
|
|
1298
|
+
{"content": content, "role": "tool", "tool_call_id": tool_call_id}
|
|
1299
|
+
)
|
|
1289
1300
|
assert_never(role)
|
|
1290
1301
|
|
|
1291
|
-
@staticmethod
|
|
1292
|
-
def _llm_token_counts(usage: "CompletionUsage") -> Iterator[tuple[str, Any]]:
|
|
1293
|
-
yield LLM_TOKEN_COUNT_PROMPT, usage.prompt_tokens
|
|
1294
|
-
yield LLM_TOKEN_COUNT_COMPLETION, usage.completion_tokens
|
|
1295
|
-
yield LLM_TOKEN_COUNT_TOTAL, usage.total_tokens
|
|
1296
|
-
|
|
1297
|
-
if hasattr(usage, "prompt_tokens_details") and usage.prompt_tokens_details is not None:
|
|
1298
|
-
prompt_details = usage.prompt_tokens_details
|
|
1299
|
-
if (
|
|
1300
|
-
hasattr(prompt_details, "cached_tokens")
|
|
1301
|
-
and prompt_details.cached_tokens is not None
|
|
1302
|
-
):
|
|
1303
|
-
yield LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ, prompt_details.cached_tokens
|
|
1304
|
-
if hasattr(prompt_details, "audio_tokens") and prompt_details.audio_tokens is not None:
|
|
1305
|
-
yield LLM_TOKEN_COUNT_PROMPT_DETAILS_AUDIO, prompt_details.audio_tokens
|
|
1306
|
-
|
|
1307
|
-
if (
|
|
1308
|
-
hasattr(usage, "completion_tokens_details")
|
|
1309
|
-
and usage.completion_tokens_details is not None
|
|
1310
|
-
):
|
|
1311
|
-
completion_details = usage.completion_tokens_details
|
|
1312
|
-
if (
|
|
1313
|
-
hasattr(completion_details, "reasoning_tokens")
|
|
1314
|
-
and completion_details.reasoning_tokens is not None
|
|
1315
|
-
):
|
|
1316
|
-
yield (
|
|
1317
|
-
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING,
|
|
1318
|
-
completion_details.reasoning_tokens,
|
|
1319
|
-
)
|
|
1320
|
-
if (
|
|
1321
|
-
hasattr(completion_details, "audio_tokens")
|
|
1322
|
-
and completion_details.audio_tokens is not None
|
|
1323
|
-
):
|
|
1324
|
-
yield LLM_TOKEN_COUNT_COMPLETION_DETAILS_AUDIO, completion_details.audio_tokens
|
|
1325
|
-
|
|
1326
1302
|
|
|
1327
1303
|
@register_llm_client(
|
|
1328
1304
|
provider_key=GenerativeProviderKey.AZURE_OPENAI,
|
|
@@ -1376,6 +1352,113 @@ class AzureOpenAIStreamingClient(OpenAIBaseStreamingClient):
|
|
|
1376
1352
|
self._attributes[LLM_SYSTEM] = OpenInferenceLLMSystemValues.OPENAI.value
|
|
1377
1353
|
|
|
1378
1354
|
|
|
1355
|
+
@register_llm_client(
|
|
1356
|
+
provider_key=GenerativeProviderKey.AZURE_OPENAI,
|
|
1357
|
+
model_names=_OPENAI_REASONING_MODELS,
|
|
1358
|
+
)
|
|
1359
|
+
class AzureOpenAIReasoningNonStreamingClient(
|
|
1360
|
+
OpenAIReasoningReasoningModelsMixin,
|
|
1361
|
+
AzureOpenAIStreamingClient,
|
|
1362
|
+
):
|
|
1363
|
+
@override
|
|
1364
|
+
async def chat_completion_create(
|
|
1365
|
+
self,
|
|
1366
|
+
messages: list[
|
|
1367
|
+
tuple[ChatCompletionMessageRole, str, Optional[str], Optional[list[JSONScalarType]]]
|
|
1368
|
+
],
|
|
1369
|
+
tools: list[JSONScalarType],
|
|
1370
|
+
**invocation_parameters: Any,
|
|
1371
|
+
) -> AsyncIterator[ChatCompletionChunk]:
|
|
1372
|
+
from openai import NOT_GIVEN
|
|
1373
|
+
|
|
1374
|
+
# Convert standard messages to OpenAI messages
|
|
1375
|
+
openai_messages = []
|
|
1376
|
+
for message in messages:
|
|
1377
|
+
openai_message = self.to_openai_chat_completion_param(*message)
|
|
1378
|
+
if openai_message is not None:
|
|
1379
|
+
openai_messages.append(openai_message)
|
|
1380
|
+
|
|
1381
|
+
throttled_create = self.rate_limiter._alimit(self.client.chat.completions.create)
|
|
1382
|
+
response = await throttled_create(
|
|
1383
|
+
messages=openai_messages,
|
|
1384
|
+
model=self.model_name,
|
|
1385
|
+
stream=False,
|
|
1386
|
+
tools=tools or NOT_GIVEN,
|
|
1387
|
+
**invocation_parameters,
|
|
1388
|
+
)
|
|
1389
|
+
|
|
1390
|
+
if response.usage is not None:
|
|
1391
|
+
self._attributes.update(dict(self._llm_token_counts(response.usage)))
|
|
1392
|
+
|
|
1393
|
+
choice = response.choices[0]
|
|
1394
|
+
if choice.message.content:
|
|
1395
|
+
yield TextChunk(content=choice.message.content)
|
|
1396
|
+
|
|
1397
|
+
if choice.message.tool_calls:
|
|
1398
|
+
for tool_call in choice.message.tool_calls:
|
|
1399
|
+
yield ToolCallChunk(
|
|
1400
|
+
id=tool_call.id,
|
|
1401
|
+
function=FunctionCallChunk(
|
|
1402
|
+
name=tool_call.function.name,
|
|
1403
|
+
arguments=tool_call.function.arguments,
|
|
1404
|
+
),
|
|
1405
|
+
)
|
|
1406
|
+
|
|
1407
|
+
def to_openai_chat_completion_param(
|
|
1408
|
+
self,
|
|
1409
|
+
role: ChatCompletionMessageRole,
|
|
1410
|
+
content: JSONScalarType,
|
|
1411
|
+
tool_call_id: Optional[str] = None,
|
|
1412
|
+
tool_calls: Optional[list[JSONScalarType]] = None,
|
|
1413
|
+
) -> Optional["ChatCompletionMessageParam"]:
|
|
1414
|
+
from openai.types.chat import (
|
|
1415
|
+
ChatCompletionAssistantMessageParam,
|
|
1416
|
+
ChatCompletionDeveloperMessageParam,
|
|
1417
|
+
ChatCompletionToolMessageParam,
|
|
1418
|
+
ChatCompletionUserMessageParam,
|
|
1419
|
+
)
|
|
1420
|
+
|
|
1421
|
+
if role is ChatCompletionMessageRole.USER:
|
|
1422
|
+
return ChatCompletionUserMessageParam(
|
|
1423
|
+
{
|
|
1424
|
+
"content": content,
|
|
1425
|
+
"role": "user",
|
|
1426
|
+
}
|
|
1427
|
+
)
|
|
1428
|
+
if role is ChatCompletionMessageRole.SYSTEM:
|
|
1429
|
+
return ChatCompletionDeveloperMessageParam(
|
|
1430
|
+
{
|
|
1431
|
+
"content": content,
|
|
1432
|
+
"role": "developer",
|
|
1433
|
+
}
|
|
1434
|
+
)
|
|
1435
|
+
if role is ChatCompletionMessageRole.AI:
|
|
1436
|
+
if tool_calls is None:
|
|
1437
|
+
return ChatCompletionAssistantMessageParam(
|
|
1438
|
+
{
|
|
1439
|
+
"content": content,
|
|
1440
|
+
"role": "assistant",
|
|
1441
|
+
}
|
|
1442
|
+
)
|
|
1443
|
+
else:
|
|
1444
|
+
return ChatCompletionAssistantMessageParam(
|
|
1445
|
+
{
|
|
1446
|
+
"content": content,
|
|
1447
|
+
"role": "assistant",
|
|
1448
|
+
"tool_calls": [
|
|
1449
|
+
self.to_openai_tool_call_param(tool_call) for tool_call in tool_calls
|
|
1450
|
+
],
|
|
1451
|
+
}
|
|
1452
|
+
)
|
|
1453
|
+
if role is ChatCompletionMessageRole.TOOL:
|
|
1454
|
+
if tool_call_id is None:
|
|
1455
|
+
raise ValueError("tool_call_id is required for tool messages")
|
|
1456
|
+
return ChatCompletionToolMessageParam(
|
|
1457
|
+
{"content": content, "role": "tool", "tool_call_id": tool_call_id}
|
|
1458
|
+
)
|
|
1459
|
+
assert_never(role)
|
|
1460
|
+
|
|
1461
|
+
|
|
1379
1462
|
@register_llm_client(
|
|
1380
1463
|
provider_key=GenerativeProviderKey.ANTHROPIC,
|
|
1381
1464
|
model_names=[
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
|
1
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Union
|
|
2
2
|
|
|
3
3
|
from phoenix.server.api.types.GenerativeProvider import GenerativeProviderKey
|
|
4
4
|
|
|
@@ -59,7 +59,7 @@ PLAYGROUND_CLIENT_REGISTRY: PlaygroundClientRegistry = PlaygroundClientRegistry(
|
|
|
59
59
|
|
|
60
60
|
def register_llm_client(
|
|
61
61
|
provider_key: GenerativeProviderKey,
|
|
62
|
-
model_names:
|
|
62
|
+
model_names: Sequence[ModelName],
|
|
63
63
|
) -> Callable[[type["PlaygroundStreamingClient"]], type["PlaygroundStreamingClient"]]:
|
|
64
64
|
def decorator(cls: type["PlaygroundStreamingClient"]) -> type["PlaygroundStreamingClient"]:
|
|
65
65
|
provider_registry = PLAYGROUND_CLIENT_REGISTRY._registry.setdefault(provider_key, {})
|
phoenix/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "11.10.
|
|
1
|
+
__version__ = "11.10.1"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|