arize-phoenix 7.8.1__py3-none-any.whl → 7.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arize-phoenix
3
- Version: 7.8.1
3
+ Version: 7.9.0
4
4
  Summary: AI Observability and Evaluation
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -127,7 +127,7 @@ Description-Content-Type: text/markdown
127
127
 
128
128
  <p align="center">
129
129
  <a target="_blank" href="https://phoenix.arize.com" style="background:none">
130
- <img alt="phoenix banner" src="https://github.com/Arize-ai/phoenix-assets/blob/main/images/socal/github-large-banner-phoenix.jpg?raw=true" width="auto" height="auto"></img>
130
+ <img alt="phoenix banner" src="https://github.com/Arize-ai/phoenix-assets/blob/main/images/socal/github-large-banner-phoenix-v2.jpg?raw=true" width="auto" height="auto"></img>
131
131
  </a>
132
132
  <br/>
133
133
  <br/>
@@ -160,6 +160,7 @@ Phoenix is an open-source AI observability platform designed for experimentation
160
160
  - [**_Evaluation_**](https://docs.arize.com/phoenix/evaluation/llm-evals) - Leverage LLMs to benchmark your application's performance using response and retrieval evals.
161
161
  - [**_Datasets_**](https://docs.arize.com/phoenix/datasets-and-experiments/overview-datasets) - Create versioned datasets of examples for experimentation, evaluation, and fine-tuning.
162
162
  - [**_Experiments_**](https://docs.arize.com/phoenix/datasets-and-experiments/overview-datasets#experiments) - Track and evaluate changes to prompts, LLMs, and retrieval.
163
+ - [**_Playground_**](https://docs.arize.com/phoenix/prompt-engineering/overview-prompts)- Optemize prompts, compare models, adjust parameters, and replay traced LLM calls.
163
164
 
164
165
  Phoenix is vendor and language agnostic with out-of-the-box support for popular frameworks (🦙[LlamaIndex](https://docs.arize.com/phoenix/tracing/integrations-tracing/llamaindex), 🦜⛓[LangChain](https://docs.arize.com/phoenix/tracing/integrations-tracing/langchain), [Haystack](https://docs.arize.com/phoenix/tracing/integrations-tracing/haystack), 🧩[DSPy](https://docs.arize.com/phoenix/tracing/integrations-tracing/dspy)) and LLM providers ([OpenAI](https://docs.arize.com/phoenix/tracing/integrations-tracing/openai), [Bedrock](https://docs.arize.com/phoenix/tracing/integrations-tracing/bedrock), [MistralAI](https://docs.arize.com/phoenix/tracing/integrations-tracing/mistralai), [VertexAI](https://docs.arize.com/phoenix/tracing/integrations-tracing/vertexai), [LiteLLM](https://docs.arize.com/phoenix/tracing/integrations-tracing/litellm), and more). For details on auto-instrumentation, check out the [OpenInference](https://github.com/Arize-ai/openinference) project.
165
166
 
@@ -225,6 +226,7 @@ Phoenix is built on top of OpenTelemetry and is vendor, language, and framework
225
226
  | [Groq](https://docs.arize.com/phoenix/tracing/integrations-tracing/groq) | `openinference-instrumentation-groq` | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-groq.svg)](https://pypi.python.org/pypi/openinference-instrumentation-groq) |
226
227
  | [Instructor](https://docs.arize.com/phoenix/tracing/integrations-tracing/instructor) | `openinference-instrumentation-instructor` | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-instructor.svg)](https://pypi.python.org/pypi/openinference-instrumentation-instructor) |
227
228
  | [Anthropic](https://docs.arize.com/phoenix/tracing/integrations-tracing/anthropic) | `openinference-instrumentation-anthropic` | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-anthropic.svg)](https://pypi.python.org/pypi/openinference-instrumentation-anthropic) |
229
+ | [Smolagents](https://huggingface.co/docs/smolagents/en/tutorials/inspect_runs) | `openinference-instrumentation-smolagents` | [![PyPI Version](https://img.shields.io/pypi/v/openinference-instrumentation-smolagents.svg)](https://pypi.python.org/pypi/openinference-instrumentation-smolagents) |
228
230
 
229
231
  ### JavaScript
230
232
 
@@ -6,7 +6,7 @@ phoenix/exceptions.py,sha256=n2L2KKuecrdflB9MsCdAYCiSEvGJptIsfRkXMoJle7A,169
6
6
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
7
7
  phoenix/services.py,sha256=kpW1WL0kiB8XJsO6XycvZVJ-lBkNoenhQ7atCvBoSe8,5365
8
8
  phoenix/settings.py,sha256=ht-0oN-sMV6SPXrk7Tu1EZlngpAYkGNLYPhO8DyrdQI,661
9
- phoenix/version.py,sha256=q9HCYvd_KKYPNhrFJZNCqDLkLzTKEm20j2_jzE7u-Lw,22
9
+ phoenix/version.py,sha256=cR9GZGw5gRT-1yq69vN7Bfk48p6j47mGqQA4xv9soHw,22
10
10
  phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
12
12
  phoenix/core/model.py,sha256=qBFraOtmwCCnWJltKNP18DDG0mULXigytlFsa6YOz6k,4837
@@ -135,7 +135,7 @@ phoenix/server/api/dataloaders/cache/two_tier_cache.py,sha256=cmo8FUT3E91R139IEz
135
135
  phoenix/server/api/helpers/__init__.py,sha256=m2-xaSPqUiSs91k62JaRDjFNfl-1byxBfY-m_Vxw16U,272
136
136
  phoenix/server/api/helpers/dataset_helpers.py,sha256=AMlKY9_e0wnTrTSSQemM5NHfnpwARSytx-m9YK6f6bY,8421
137
137
  phoenix/server/api/helpers/experiment_run_filters.py,sha256=DOnVwrmn39eAkk2mwuZP8kIcAnR5jrOgllEwWSjsw94,29893
138
- phoenix/server/api/helpers/playground_clients.py,sha256=RJNl2WbeAXzK_Am_g9RJsacnLqmC0J0w-TV5cjfq0cg,36832
138
+ phoenix/server/api/helpers/playground_clients.py,sha256=JU42O_Og_LgpjJyH1kndae9xUqHezoOs34Mb2RSkaRo,38827
139
139
  phoenix/server/api/helpers/playground_registry.py,sha256=CPLMziFB2wmr-dfbx7VbzO2f8YIG_k5RftzvGXYGQ1w,2570
140
140
  phoenix/server/api/helpers/playground_spans.py,sha256=qGk7V7IZK7EkRE1mvZyROpLN5kgOahOZifFzUWmqYFc,16546
141
141
  phoenix/server/api/input_types/AddExamplesToDatasetInput.py,sha256=mIQz0S_z8YdrktKIY6RCvtNJ2yZF9pYvTGgasUsI-54,430
@@ -160,7 +160,7 @@ phoenix/server/api/input_types/DimensionFilter.py,sha256=eBYcn7ECSJQlEePvbStqkHB
160
160
  phoenix/server/api/input_types/DimensionInput.py,sha256=Vfx5FmiMKey4-EHDQsQRPzSAMRJMN5oVMLDUl4NKAa8,164
161
161
  phoenix/server/api/input_types/GenerativeModelInput.py,sha256=h_9dNkz-LBgOLKQ5_ijch4UNGiDb4x5CCC96WyverSg,551
162
162
  phoenix/server/api/input_types/Granularity.py,sha256=PiOFWmSMs_w7ZZMNfEWPzIY6S6guI83Vy4NXXSEewGo,2310
163
- phoenix/server/api/input_types/InvocationParameters.py,sha256=Lg6-SKsx72Lb-jzjJLSfPfmyt8bPz_8JtLpv5-TDcZQ,5154
163
+ phoenix/server/api/input_types/InvocationParameters.py,sha256=I_FvPvY2sXxNd-kFSB96-1DOW7lo-VbqVWzppU_5hy8,5196
164
164
  phoenix/server/api/input_types/PatchAnnotationInput.py,sha256=NWhkcbcGNPwfOYsN3wm5YFNNrSc5T-8Y5my74RK99HE,520
165
165
  phoenix/server/api/input_types/PatchDatasetExamplesInput.py,sha256=_uMqkAInhLDvzUSASl6HgLNulTsekMcYzyd5J6LF90I,884
166
166
  phoenix/server/api/input_types/PatchDatasetInput.py,sha256=OURtTVY8Z_oFEDtKwT1LCMaOK5D4QYo5TVQ6mDrex-g,328
@@ -337,9 +337,9 @@ phoenix/utilities/project.py,sha256=auVpARXkDb-JgeX5f2aStyFIkeKvGwN9l7qrFeJMVxI,
337
337
  phoenix/utilities/re.py,sha256=x8Xbk-Wa6qDMAtUd_7JtZvKtrYEuMY-bchB0n163_5c,2006
338
338
  phoenix/utilities/span_store.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
339
339
  phoenix/utilities/template_formatters.py,sha256=gh9PJD6WEGw7TEYXfSst1UR4pWWwmjxMLrDVQ_CkpkQ,2779
340
- arize_phoenix-7.8.1.dist-info/METADATA,sha256=saLGcoxnbaN2fat0opLf_ayMiIPtYHf7LgmOW-4RBjk,23052
341
- arize_phoenix-7.8.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
342
- arize_phoenix-7.8.1.dist-info/entry_points.txt,sha256=Pgpn8Upxx9P8z8joPXZWl2LlnAlGc3gcQoVchb06X1Q,94
343
- arize_phoenix-7.8.1.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
344
- arize_phoenix-7.8.1.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
345
- arize_phoenix-7.8.1.dist-info/RECORD,,
340
+ arize_phoenix-7.9.0.dist-info/METADATA,sha256=Dm7BoR78lSCEm7pc2Lar9DzCeqHvudTrG4veoMMrOSI,23522
341
+ arize_phoenix-7.9.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
342
+ arize_phoenix-7.9.0.dist-info/entry_points.txt,sha256=Pgpn8Upxx9P8z8joPXZWl2LlnAlGc3gcQoVchb06X1Q,94
343
+ arize_phoenix-7.9.0.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
344
+ arize_phoenix-7.9.0.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
345
+ arize_phoenix-7.9.0.dist-info/RECORD,,
@@ -37,6 +37,7 @@ from phoenix.server.api.input_types.InvocationParameters import (
37
37
  InvocationParameter,
38
38
  InvocationParameterInput,
39
39
  JSONInvocationParameter,
40
+ StringInvocationParameter,
40
41
  StringListInvocationParameter,
41
42
  extract_parameter,
42
43
  validate_invocation_parameters,
@@ -501,6 +502,11 @@ class OpenAIO1StreamingClient(OpenAIStreamingClient):
501
502
  @classmethod
502
503
  def supported_invocation_parameters(cls) -> list[InvocationParameter]:
503
504
  return [
505
+ StringInvocationParameter(
506
+ invocation_name="reasoning_effort",
507
+ label="Reasoning Effort",
508
+ canonical_name=CanonicalParameterName.REASONING_EFFORT,
509
+ ),
504
510
  IntInvocationParameter(
505
511
  invocation_name="max_completion_tokens",
506
512
  canonical_name=CanonicalParameterName.MAX_COMPLETION_TOKENS,
@@ -523,6 +529,49 @@ class OpenAIO1StreamingClient(OpenAIStreamingClient):
523
529
  ),
524
530
  ]
525
531
 
532
+ async def chat_completion_create(
533
+ self,
534
+ messages: list[
535
+ tuple[ChatCompletionMessageRole, str, Optional[str], Optional[list[JSONScalarType]]]
536
+ ],
537
+ tools: list[JSONScalarType],
538
+ **invocation_parameters: Any,
539
+ ) -> AsyncIterator[ChatCompletionChunk]:
540
+ from openai import NOT_GIVEN
541
+
542
+ # Convert standard messages to OpenAI messages
543
+ openai_messages = []
544
+ for message in messages:
545
+ openai_message = self.to_openai_chat_completion_param(*message)
546
+ if openai_message is not None:
547
+ openai_messages.append(openai_message)
548
+
549
+ throttled_create = self.rate_limiter._alimit(self.client.chat.completions.create)
550
+ response = await throttled_create(
551
+ messages=openai_messages,
552
+ model=self.model_name,
553
+ stream=False,
554
+ tools=tools or NOT_GIVEN,
555
+ **invocation_parameters,
556
+ )
557
+
558
+ if response.usage is not None:
559
+ self._attributes.update(dict(self._llm_token_counts(response.usage)))
560
+
561
+ choice = response.choices[0]
562
+ if choice.message.content:
563
+ yield TextChunk(content=choice.message.content)
564
+
565
+ if choice.message.tool_calls:
566
+ for tool_call in choice.message.tool_calls:
567
+ yield ToolCallChunk(
568
+ id=tool_call.id,
569
+ function=FunctionCallChunk(
570
+ name=tool_call.function.name,
571
+ arguments=tool_call.function.arguments,
572
+ ),
573
+ )
574
+
526
575
  def to_openai_chat_completion_param(
527
576
  self,
528
577
  role: ChatCompletionMessageRole,
@@ -532,6 +581,7 @@ class OpenAIO1StreamingClient(OpenAIStreamingClient):
532
581
  ) -> Optional["ChatCompletionMessageParam"]:
533
582
  from openai.types.chat import (
534
583
  ChatCompletionAssistantMessageParam,
584
+ ChatCompletionDeveloperMessageParam,
535
585
  ChatCompletionToolMessageParam,
536
586
  ChatCompletionUserMessageParam,
537
587
  )
@@ -544,7 +594,12 @@ class OpenAIO1StreamingClient(OpenAIStreamingClient):
544
594
  }
545
595
  )
546
596
  if role is ChatCompletionMessageRole.SYSTEM:
547
- return None # System messages are not supported for o1 models
597
+ return ChatCompletionDeveloperMessageParam(
598
+ {
599
+ "content": content,
600
+ "role": "developer",
601
+ }
602
+ )
548
603
  if role is ChatCompletionMessageRole.AI:
549
604
  if tool_calls is None:
550
605
  return ChatCompletionAssistantMessageParam(
@@ -15,6 +15,7 @@ class CanonicalParameterName(str, Enum):
15
15
  RANDOM_SEED = "random_seed"
16
16
  TOOL_CHOICE = "tool_choice"
17
17
  RESPONSE_FORMAT = "response_format"
18
+ REASONING_EFFORT = "reasoning_effort"
18
19
 
19
20
 
20
21
  @strawberry.enum
phoenix/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "7.8.1"
1
+ __version__ = "7.9.0"