arize-phoenix 3.0.3__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 3.0.3
3
+ Version: 3.1.1
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -51,7 +51,7 @@ Requires-Dist: hatch; extra == 'dev'
51
51
  Requires-Dist: jupyter; extra == 'dev'
52
52
  Requires-Dist: langchain>=0.0.334; extra == 'dev'
53
53
  Requires-Dist: litellm>=1.0.3; extra == 'dev'
54
- Requires-Dist: llama-index<0.10.0; extra == 'dev'
54
+ Requires-Dist: llama-index>=0.10.3; extra == 'dev'
55
55
  Requires-Dist: nbqa; extra == 'dev'
56
56
  Requires-Dist: pandas-stubs<=2.0.2.230605; extra == 'dev'
57
57
  Requires-Dist: pre-commit; extra == 'dev'
@@ -64,8 +64,9 @@ Requires-Dist: strawberry-graphql[debug-server]==0.208.2; extra == 'dev'
64
64
  Provides-Extra: experimental
65
65
  Requires-Dist: tenacity; extra == 'experimental'
66
66
  Provides-Extra: llama-index
67
- Requires-Dist: llama-index==0.9.45; extra == 'llama-index'
68
- Requires-Dist: openinference-instrumentation-llama-index==0.1.3; extra == 'llama-index'
67
+ Requires-Dist: llama-index-callbacks-arize-phoenix>=0.1.2; extra == 'llama-index'
68
+ Requires-Dist: llama-index==0.10.3; extra == 'llama-index'
69
+ Requires-Dist: openinference-instrumentation-llama-index>=1.0.0; extra == 'llama-index'
69
70
  Description-Content-Type: text/markdown
70
71
 
71
72
  <p align="center">
@@ -4,7 +4,7 @@ phoenix/datetime_utils.py,sha256=D955QLrkgrrSdUM6NyqbCeAu2SMsjhR5rHVQEsVUdng,277
4
4
  phoenix/exceptions.py,sha256=X5k9ipUDfwSCwZB-H5zFJLas86Gf9tAx0W4l5TZxp5k,108
5
5
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
6
6
  phoenix/services.py,sha256=f6AeyKTuOpy9RCcTCjVH3gx5nYZhbTMFOuv1WSUOB5o,4992
7
- phoenix/version.py,sha256=3PslnGRHLeT8kAWbhtBM110cQkzH_QzfQO5_B6lHOuU,22
7
+ phoenix/version.py,sha256=14eImCCNxRh4pWMIfkKe4h5OCS1ICfRjHSj2AfgEXa0,22
8
8
  phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
10
10
  phoenix/core/evals.py,sha256=gJyqQzpud5YjtoY8h4pgXvHDsdubGfqmEewLuZHPPmQ,10224
@@ -31,11 +31,11 @@ phoenix/experimental/evals/models/__init__.py,sha256=j1N7DhiOPbcaemtVBONcQ0miNnG
31
31
  phoenix/experimental/evals/models/anthropic.py,sha256=BZmLvepkSMj_opCWsZoL34a3yAwRdl7qbJB86DFR84E,6688
32
32
  phoenix/experimental/evals/models/base.py,sha256=RWz_Jzj3Z1fENl2WUXIz-4eMsk6HfYXc0K8IZ-BJss4,6306
33
33
  phoenix/experimental/evals/models/bedrock.py,sha256=nVOXRZr-iDwHEINozpO2bqZR2KEeDHNyj6jgQPONQYs,8565
34
- phoenix/experimental/evals/models/litellm.py,sha256=0c-eJFsx41W0MsqeUd4UPquLBKSZp3BRNhKhX2uFCAs,4123
35
- phoenix/experimental/evals/models/openai.py,sha256=R5DXvIVcASLsoOGQBOevOfpUx4j0dOIVPS3voE8CuuY,17367
34
+ phoenix/experimental/evals/models/litellm.py,sha256=Xo415fJehxIj32zpzVtvzQXADJURPUvNZJHmc_FAKvE,4759
35
+ phoenix/experimental/evals/models/openai.py,sha256=v2qkdFZc-u0ZAfxnV4v5UMWkMc9P3k4Gx1XaTnVFTz4,17922
36
36
  phoenix/experimental/evals/models/rate_limiters.py,sha256=5GVN0RQKt36Przg3-9jLgocRmyg-tbeO-cdbuLIx89w,10160
37
- phoenix/experimental/evals/models/vertex.py,sha256=1VAGJNoiUm56pP8G9Qvnf-4_Rl9u9NI7ToOKbWFNtpk,6226
38
- phoenix/experimental/evals/models/vertexai.py,sha256=_txsOP2RHyR3AnugeJRFUNvYm3xXvfMbWpULxTko4OA,4821
37
+ phoenix/experimental/evals/models/vertex.py,sha256=3kj0tdwyCcgs39x1XnMgFoSbZwXvvBAPL7AwHYe-qIE,6236
38
+ phoenix/experimental/evals/models/vertexai.py,sha256=nFN5isv1GPNsvUA4OxSnQd1hTMi-3BcxfDz1y7QcoA0,6189
39
39
  phoenix/experimental/evals/templates/__init__.py,sha256=GSJSoWJ4jwyoUANniidmWMUtXQhNQYbTJbfFqCvuYuo,1470
40
40
  phoenix/experimental/evals/templates/default_templates.py,sha256=dVKmoLwqgAyGcRuezz9WKnXSHhw7-qk1R8j6wSmqh0s,20722
41
41
  phoenix/experimental/evals/templates/template.py,sha256=ImFSaTPo9oalPNwq7cNdOCndrvuwLuIyIFKsgDVcoJE,6715
@@ -135,22 +135,22 @@ phoenix/session/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
135
135
  phoenix/session/client.py,sha256=lTV9Q4ljpV9wqasya4u3x7BCijrfAWKDm5akzrg0424,4885
136
136
  phoenix/session/data_extractor.py,sha256=V4ntT2FcmbV_-zobcbPR51HKWaDyMnXHj4Pmu7MZ0OQ,1664
137
137
  phoenix/session/evaluation.py,sha256=DaAtA0XYJbXRJO_StGywa-9APlz2ORSmCXzxrtn3rvI,4997
138
- phoenix/session/session.py,sha256=mrTAQkUFMGjN65ApYqRjhGXExbfxu2ODJcJ14MSDOLE,21719
138
+ phoenix/session/session.py,sha256=R83kPeiqLxEJTD6cTjJqqMjMV-iQzQFKw6rMXnbIxnc,21800
139
139
  phoenix/trace/__init__.py,sha256=4d_MqzUIFmlY9WWcFeTONJ4xL5mPGoWZaPM2TJ0ZDBQ,266
140
140
  phoenix/trace/errors.py,sha256=wB1z8qdPckngdfU-TORToekvg3344oNFAA83_hC2yFY,180
141
141
  phoenix/trace/evaluation_conventions.py,sha256=t8jydM3U0-T5YpiQKRJ3tWdWGlHtzKyttYdw-ddvPOk,1048
142
142
  phoenix/trace/exporter.py,sha256=O-9Arn-S_B9Me-jy4Qa84y6lvxKNoa8pczrsamPl3_Q,4871
143
- phoenix/trace/fixtures.py,sha256=LokNedhbGYxpzXznteO4m5QehvNYjzvoh231-CMJQeY,7113
143
+ phoenix/trace/fixtures.py,sha256=HOjuYOB_xtR7JUeLz7WpEroiGj4E5_SxVLSjBYUy8RQ,7055
144
144
  phoenix/trace/otel.py,sha256=naFWTwrqyIoeGJ5YMT9Fyhd44CEtQvpscVluDOXD6Fo,16744
145
145
  phoenix/trace/schemas.py,sha256=zlAY8v-PLgEDqTgbmDxY3NJlCAmzj_3ZCJFebQOBz3M,6028
146
146
  phoenix/trace/span_evaluations.py,sha256=FvY9YRnKuYIzCa-H9P5SuDaI2DeqGnVCjNgclC2v3HA,12909
147
147
  phoenix/trace/span_json_decoder.py,sha256=IAFakPRqSMYxTPKYFMiXYxm7U-FipdN8_xbvapDS0Qc,3131
148
148
  phoenix/trace/span_json_encoder.py,sha256=hIDd1I6xm01kaNmeKjHOHyxUGI3uTg5J_Os1kXtAb6g,1755
149
- phoenix/trace/trace_dataset.py,sha256=DF4JH3lq7ULsw6sGo6c4SULChoxNSRJA4knQXfSrTR8,13485
149
+ phoenix/trace/trace_dataset.py,sha256=RpHIfZLbMmULOIb-fKXJkQLhIdC0sJlAOTjlyJppMYA,13776
150
150
  phoenix/trace/tracer.py,sha256=JDKlyvjy6AsQmaA60ycJ1hKXoUQU61jqPx3nvYr8xUc,3647
151
151
  phoenix/trace/utils.py,sha256=7LurVGXn245cjj4MJsc7v6jq4DSJkpK6YGBfIaSywuw,1307
152
152
  phoenix/trace/dsl/__init__.py,sha256=WIQIjJg362XD3s50OsPJJ0xbDsGp41bSv7vDllLrPuA,144
153
- phoenix/trace/dsl/filter.py,sha256=7m1v9RtZsAzg-E8iXKLvVSlCJLPtnUapzEIb-nrygsM,13208
153
+ phoenix/trace/dsl/filter.py,sha256=fRPUIsucxXGJcbtei86ApSIjEP5PmdsONmYvT7EBu6w,14240
154
154
  phoenix/trace/dsl/helpers.py,sha256=U71HmCecJQ_zHq0g2mFKbxrRuiJDkopskWMpFoBNP-Y,1722
155
155
  phoenix/trace/dsl/missing.py,sha256=BWPOHr2_tBkPDgVeq8GVXXVbNbJiBelu4NtwHBg6mTE,1435
156
156
  phoenix/trace/dsl/query.py,sha256=XoFwKEALzGqUERy7B5fgD-n0s87zN6jRVrZgW6-jqRo,14819
@@ -167,8 +167,8 @@ phoenix/trace/v1/evaluation_pb2.pyi,sha256=cCbbx06gwQmaH14s3J1X25TtaARh-k1abbxQd
167
167
  phoenix/utilities/__init__.py,sha256=3TVirVnjIGyaCFuJCqeZO4tjlzQ_chZgYM0itIwsEpE,656
168
168
  phoenix/utilities/error_handling.py,sha256=7b5rpGFj9EWZ8yrZK1IHvxB89suWk3lggDayUQcvZds,1946
169
169
  phoenix/utilities/logging.py,sha256=lDXd6EGaamBNcQxL4vP1au9-i_SXe0OraUDiJOcszSw,222
170
- arize_phoenix-3.0.3.dist-info/METADATA,sha256=LZ4BcSMrV3NFDcOfufwwutdTnKMYt6cBBB7t_BJ02b8,28693
171
- arize_phoenix-3.0.3.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87
172
- arize_phoenix-3.0.3.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
173
- arize_phoenix-3.0.3.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
174
- arize_phoenix-3.0.3.dist-info/RECORD,,
170
+ arize_phoenix-3.1.1.dist-info/METADATA,sha256=EHZnYONC0nNQi_jXlGsztc4cFwfizBY27qolo8uA0ys,28776
171
+ arize_phoenix-3.1.1.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87
172
+ arize_phoenix-3.1.1.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
173
+ arize_phoenix-3.1.1.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
174
+ arize_phoenix-3.1.1.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import warnings
2
3
  from dataclasses import dataclass, field
3
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
4
5
 
@@ -12,7 +13,7 @@ logger = logging.getLogger(__name__)
12
13
 
13
14
  @dataclass
14
15
  class LiteLLMModel(BaseEvalModel):
15
- model_name: str = "gpt-3.5-turbo"
16
+ model: str = "gpt-3.5-turbo"
16
17
  """The model name to use."""
17
18
  temperature: float = 0.0
18
19
  """What sampling temperature to use."""
@@ -34,22 +35,42 @@ class LiteLLMModel(BaseEvalModel):
34
35
  max_content_size: Optional[int] = None
35
36
  """If you're using a fine-tuned model, set this to the maximum content size"""
36
37
 
38
+ # Deprecated fields
39
+ model_name: Optional[str] = None
40
+ """
41
+ .. deprecated:: 3.0.0
42
+ use `model` instead. This will be removed in a future release.
43
+ """
44
+
37
45
  def __post_init__(self) -> None:
46
+ self._migrate_model_name()
38
47
  self._init_environment()
39
48
  self._init_model_encoding()
40
49
 
50
+ def _migrate_model_name(self) -> None:
51
+ if self.model_name is not None:
52
+ warning_message = "The `model_name` field is deprecated. Use `model` instead. \
53
+ This will be removed in a future release."
54
+ warnings.warn(
55
+ warning_message,
56
+ DeprecationWarning,
57
+ )
58
+ print(warning_message)
59
+ self.model = self.model_name
60
+ self.model_name = None
61
+
41
62
  def _init_environment(self) -> None:
42
63
  try:
43
64
  import litellm
44
65
  from litellm import validate_environment
45
66
 
46
67
  self._litellm = litellm
47
- env_info = validate_environment(self._litellm.utils.get_llm_provider(self.model_name))
68
+ env_info = validate_environment(self._litellm.utils.get_llm_provider(self.model))
48
69
 
49
70
  if not env_info["keys_in_environment"]:
50
71
  raise RuntimeError(
51
72
  f"Missing environment variable(s): '{str(env_info['missing_keys'])}', for "
52
- f"model: {self.model_name}. \nFor additional information about the right "
73
+ f"model: {self.model}. \nFor additional information about the right "
53
74
  "environment variables for specific model providers:\n"
54
75
  "https://docs.litellm.ai/docs/completion/input#provider-specific-params."
55
76
  )
@@ -67,14 +88,14 @@ class LiteLLMModel(BaseEvalModel):
67
88
 
68
89
  @property
69
90
  def max_context_size(self) -> int:
70
- context_size = self.max_content_size or self._litellm.get_max_tokens(self.model_name).get(
91
+ context_size = self.max_content_size or self._litellm.get_max_tokens(self.model).get(
71
92
  "max_tokens", None
72
93
  )
73
94
 
74
95
  if context_size is None:
75
96
  raise ValueError(
76
- "Can't determine maximum context size. An unknown model name was "
77
- + f"used: {self.model_name}."
97
+ "Can't determine maximum context size. An unknown model was "
98
+ + f"used: {self.model}."
78
99
  )
79
100
 
80
101
  return context_size
@@ -84,11 +105,11 @@ class LiteLLMModel(BaseEvalModel):
84
105
  raise NotImplementedError
85
106
 
86
107
  def get_tokens_from_text(self, text: str) -> List[int]:
87
- result: List[int] = self._encoding(model=self.model_name, text=text)
108
+ result: List[int] = self._encoding(model=self.model, text=text)
88
109
  return result
89
110
 
90
111
  def get_text_from_tokens(self, tokens: List[int]) -> str:
91
- return str(self._decoding(model=self.model_name, tokens=tokens))
112
+ return str(self._decoding(model=self.model, tokens=tokens))
92
113
 
93
114
  async def _async_generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
94
115
  return self._generate(prompt, **kwargs)
@@ -96,7 +117,7 @@ class LiteLLMModel(BaseEvalModel):
96
117
  def _generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
97
118
  messages = self._get_messages_from_prompt(prompt)
98
119
  response = self._litellm.completion(
99
- model=self.model_name,
120
+ model=self.model,
100
121
  messages=messages,
101
122
  temperature=self.temperature,
102
123
  max_tokens=self.max_tokens,
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import os
3
+ import warnings
3
4
  from dataclasses import dataclass, field, fields
4
5
  from typing import (
5
6
  TYPE_CHECKING,
@@ -64,8 +65,10 @@ class OpenAIModel(BaseEvalModel):
64
65
  An optional base URL to use for the OpenAI API. If not provided, will default
65
66
  to what's configured in OpenAI
66
67
  """
67
- model_name: str = "gpt-4"
68
- """Model name to use. In of azure, this is the deployment name such as gpt-35-instant"""
68
+ model: str = "gpt-4"
69
+ """
70
+ Model name to use. In of azure, this is the deployment name such as gpt-35-instant
71
+ """
69
72
  temperature: float = 0.0
70
73
  """What sampling temperature to use."""
71
74
  max_tokens: int = 256
@@ -106,7 +109,15 @@ class OpenAIModel(BaseEvalModel):
106
109
  azure_ad_token: Optional[str] = field(default=None)
107
110
  azure_ad_token_provider: Optional[Callable[[], str]] = field(default=None)
108
111
 
112
+ # Deprecated fields
113
+ model_name: Optional[str] = field(default=None)
114
+ """
115
+ .. deprecated:: 3.0.0
116
+ use `model` instead. This will be removed
117
+ """
118
+
109
119
  def __post_init__(self) -> None:
120
+ self._migrate_model_name()
110
121
  self._init_environment()
111
122
  self._init_open_ai()
112
123
  self._init_tiktoken()
@@ -115,6 +126,17 @@ class OpenAIModel(BaseEvalModel):
115
126
  def reload_client(self) -> None:
116
127
  self._init_open_ai()
117
128
 
129
+ def _migrate_model_name(self) -> None:
130
+ if self.model_name:
131
+ warning_message = "The `model_name` field is deprecated. Use `model` instead. \
132
+ This will be removed in a future release."
133
+ print(
134
+ warning_message,
135
+ )
136
+ warnings.warn(warning_message, DeprecationWarning)
137
+ self.model = self.model_name
138
+ self.model_name = None
139
+
118
140
  def _init_environment(self) -> None:
119
141
  try:
120
142
  import openai
@@ -141,9 +163,7 @@ class OpenAIModel(BaseEvalModel):
141
163
  # For Azure, you need to provide the endpoint and the endpoint
142
164
  self._is_azure = bool(self.azure_endpoint)
143
165
 
144
- self._model_uses_legacy_completion_api = self.model_name.startswith(
145
- LEGACY_COMPLETION_API_MODELS
146
- )
166
+ self._model_uses_legacy_completion_api = self.model.startswith(LEGACY_COMPLETION_API_MODELS)
147
167
  if self.api_key is None:
148
168
  api_key = os.getenv(OPENAI_API_KEY_ENVVAR_NAME)
149
169
  if api_key is None:
@@ -203,7 +223,7 @@ class OpenAIModel(BaseEvalModel):
203
223
 
204
224
  def _init_tiktoken(self) -> None:
205
225
  try:
206
- encoding = self._tiktoken.encoding_for_model(self.model_name)
226
+ encoding = self._tiktoken.encoding_for_model(self.model)
207
227
  except KeyError:
208
228
  encoding = self._tiktoken.get_encoding("cl100k_base")
209
229
  self._tiktoken_encoding = encoding
@@ -333,20 +353,20 @@ class OpenAIModel(BaseEvalModel):
333
353
 
334
354
  @property
335
355
  def max_context_size(self) -> int:
336
- model_name = self.model_name
356
+ model = self.model
337
357
  # handling finetuned models
338
- if "ft-" in model_name:
339
- model_name = self.model_name.split(":")[0]
340
- if model_name == "gpt-4":
358
+ if "ft-" in model:
359
+ model = self.model.split(":")[0]
360
+ if model == "gpt-4":
341
361
  # Map gpt-4 to the current default
342
- model_name = "gpt-4-0613"
362
+ model = "gpt-4-0613"
343
363
 
344
- context_size = MODEL_TOKEN_LIMIT_MAPPING.get(model_name, None)
364
+ context_size = MODEL_TOKEN_LIMIT_MAPPING.get(model, None)
345
365
 
346
366
  if context_size is None:
347
367
  raise ValueError(
348
368
  "Can't determine maximum context size. An unknown model name was "
349
- f"used: {model_name}. Please provide a valid OpenAI model name. "
369
+ f"used: {model}. Please provide a valid OpenAI model name. "
350
370
  "Known models are: " + ", ".join(MODEL_TOKEN_LIMIT_MAPPING.keys())
351
371
  )
352
372
 
@@ -355,7 +375,7 @@ class OpenAIModel(BaseEvalModel):
355
375
  @property
356
376
  def public_invocation_params(self) -> Dict[str, Any]:
357
377
  return {
358
- **({"model": self.model_name}),
378
+ **({"model": self.model}),
359
379
  **self._default_params,
360
380
  **self.model_kwargs,
361
381
  }
@@ -388,8 +408,8 @@ class OpenAIModel(BaseEvalModel):
388
408
 
389
409
  Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
390
410
  """ # noqa
391
- model_name = self.model_name
392
- if model_name == "gpt-3.5-turbo-0301":
411
+ model = self.model
412
+ if model == "gpt-3.5-turbo-0301":
393
413
  tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
394
414
  tokens_per_name = -1 # if there's a name, the role is omitted
395
415
  else:
@@ -158,14 +158,14 @@ class GeminiModel(BaseEvalModel):
158
158
  printif(
159
159
  self._verbose, "The 'candidates' object does not have a 'text' attribute."
160
160
  )
161
- printif(self._verbose, response.candidates[0])
161
+ printif(self._verbose, str(response.candidates[0]))
162
162
  candidate = ""
163
163
  else:
164
164
  printif(
165
165
  self._verbose,
166
166
  "The 'candidates' attribute of 'response' is either not a list or is empty.",
167
167
  )
168
- printif(self._verbose, response)
168
+ printif(self._verbose, str(response))
169
169
  candidate = ""
170
170
  else:
171
171
  printif(self._verbose, "The 'response' object does not have a 'candidates' attribute.")
@@ -1,3 +1,5 @@
1
+ import logging
2
+ import warnings
1
3
  from dataclasses import dataclass
2
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
3
5
 
@@ -6,6 +8,7 @@ from phoenix.experimental.evals.models.base import BaseEvalModel
6
8
  if TYPE_CHECKING:
7
9
  from google.auth.credentials import Credentials # type:ignore
8
10
 
11
+ logger = logging.getLogger(__name__)
9
12
 
10
13
  MINIMUM_VERTEX_AI_VERSION = "1.33.0"
11
14
 
@@ -18,9 +21,9 @@ class VertexAIModel(BaseEvalModel):
18
21
  "location (str): The default location to use when making API calls. If not "
19
22
  "set defaults to us-central-1."
20
23
  credentials: Optional["Credentials"] = None
21
- model_name: str = "text-bison"
22
- tuned_model_name: Optional[str] = None
23
- "The name of a tuned model. If provided, model_name is ignored."
24
+ model: str = "text-bison"
25
+ tuned_model: Optional[str] = None
26
+ "The name of a tuned model. If provided, model is ignored."
24
27
  max_retries: int = 6
25
28
  """Maximum number of retries to make when generating."""
26
29
  retry_min_seconds: int = 10
@@ -40,11 +43,50 @@ class VertexAIModel(BaseEvalModel):
40
43
  "How the model selects tokens for output, the next token is selected from "
41
44
  "among the top-k most probable tokens. Top-k is ignored for Codey models."
42
45
 
46
+ # Deprecated fields
47
+ model_name: Optional[str] = None
48
+ """
49
+ .. deprecated:: 3.0.0
50
+ use `model` instead. This will be removed in a future release.
51
+ """
52
+ tuned_model_name: Optional[str] = None
53
+ """
54
+ .. deprecated:: 3.0.0
55
+ use `tuned_model` instead. This will be removed in a future release.
56
+ """
57
+
43
58
  def __post_init__(self) -> None:
59
+ self._migrate_model_name()
44
60
  self._init_environment()
45
61
  self._init_vertex_ai()
46
62
  self._instantiate_model()
47
63
 
64
+ def _migrate_model_name(self) -> None:
65
+ if self.model_name is not None:
66
+ warning_message = (
67
+ "The `model_name` field is deprecated. Use `model` instead. "
68
+ + "This will be removed in a future release."
69
+ )
70
+ warnings.warn(
71
+ warning_message,
72
+ DeprecationWarning,
73
+ )
74
+ print(warning_message)
75
+ self.model = self.model_name
76
+ self.model_name = None
77
+ if self.tuned_model_name is not None:
78
+ warning_message = (
79
+ "`tuned_model_name` field is deprecated. Use `tuned_model` instead. "
80
+ + "This will be removed in a future release."
81
+ )
82
+ warnings.warn(
83
+ warning_message,
84
+ DeprecationWarning,
85
+ )
86
+ print(warning_message)
87
+ self.tuned_model = self.tuned_model_name
88
+ self.tuned_model_name = None
89
+
48
90
  def _init_environment(self) -> None:
49
91
  try:
50
92
  import google.api_core.exceptions as google_exceptions # type:ignore
@@ -72,10 +114,10 @@ class VertexAIModel(BaseEvalModel):
72
114
 
73
115
  model = TextGenerationModel
74
116
 
75
- if self.tuned_model_name:
76
- self._model = model.get_tuned_model(self.tuned_model_name)
117
+ if self.tuned_model:
118
+ self._model = model.get_tuned_model(self.tuned_model)
77
119
  else:
78
- self._model = model.from_pretrained(self.model_name)
120
+ self._model = model.from_pretrained(self.model)
79
121
 
80
122
  def verbose_generation_info(self) -> str:
81
123
  return f"VertexAI invocation parameters: {self.invocation_params}"
@@ -93,7 +135,7 @@ class VertexAIModel(BaseEvalModel):
93
135
 
94
136
  @property
95
137
  def is_codey_model(self) -> bool:
96
- return is_codey_model(self.tuned_model_name or self.model_name)
138
+ return is_codey_model(self.tuned_model or self.model)
97
139
 
98
140
  @property
99
141
  def _init_params(self) -> Dict[str, Any]:
@@ -477,6 +477,7 @@ def launch_app(
477
477
  f"port {port} is not occupied by another process) or file an issue "
478
478
  f"with us at https://github.com/Arize-ai/phoenix"
479
479
  )
480
+ _session = None
480
481
  return None
481
482
 
482
483
  print(f"🌍 To view the Phoenix app in your browser, visit {_session.url}")
@@ -489,7 +490,9 @@ def active_session() -> Optional[Session]:
489
490
  """
490
491
  Returns the active session if one exists, otherwise returns None
491
492
  """
492
- return _session
493
+ if _session and _session.active:
494
+ return _session
495
+ return None
493
496
 
494
497
 
495
498
  def close_app() -> None:
@@ -166,8 +166,14 @@ class _Translator(ast.NodeTransformer):
166
166
  # In Python 3.9+, we can use `ast.unparse(node)` (no need for `source`).
167
167
  self._source = source
168
168
 
169
+ def visit_Subscript(self, node: ast.Subscript) -> Any:
170
+ if _is_metadata(node) and (key := _get_subscript_key(node)):
171
+ return _ast_metadata_subscript(key)
172
+ source_segment: str = cast(str, ast.get_source_segment(self._source, node))
173
+ raise SyntaxError(f"invalid expression: {source_segment}") # TODO: add details
174
+
169
175
  def visit_Attribute(self, node: ast.Attribute) -> Any:
170
- if _is_eval(node.value) and (eval_name := _get_eval_name(node.value)):
176
+ if _is_eval(node.value) and (eval_name := _get_subscript_key(node.value)):
171
177
  # e.g. `evals["name"].score`
172
178
  return _ast_evaluation_result_value(eval_name, node.attr)
173
179
  source_segment: str = cast(str, ast.get_source_segment(self._source, node))
@@ -209,9 +215,11 @@ def _validate_expression(
209
215
  if i == 0:
210
216
  if isinstance(node, (ast.BoolOp, ast.Compare)):
211
217
  continue
218
+ elif _is_metadata(node):
219
+ continue
212
220
  elif _is_eval(node):
213
221
  # e.g. `evals["name"]`
214
- if not (eval_name := _get_eval_name(node)) or (
222
+ if not (eval_name := _get_subscript_key(node)) or (
215
223
  valid_eval_names is not None and eval_name not in valid_eval_names
216
224
  ):
217
225
  source_segment = cast(str, ast.get_source_segment(source, node))
@@ -296,6 +304,19 @@ def _ast_evaluation_result_value(name: str, attr: str) -> ast.expr:
296
304
  return ast.parse(source, mode="eval").body
297
305
 
298
306
 
307
+ def _ast_metadata_subscript(key: str) -> ast.expr:
308
+ source = (
309
+ f"_MISSING if ("
310
+ f" _MD := span.attributes.get('metadata')"
311
+ f") is None else ("
312
+ f" _MISSING if not hasattr(_MD, 'get') or ("
313
+ f" _VALUE := _MD.get('{key}')"
314
+ f" ) is None else _VALUE"
315
+ f")"
316
+ )
317
+ return ast.parse(source, mode="eval").body
318
+
319
+
299
320
  def _is_eval(node: Any) -> TypeGuard[ast.Subscript]:
300
321
  # e.g. `evals["name"]`
301
322
  return (
@@ -305,7 +326,16 @@ def _is_eval(node: Any) -> TypeGuard[ast.Subscript]:
305
326
  )
306
327
 
307
328
 
308
- def _get_eval_name(node: ast.Subscript) -> Optional[str]:
329
+ def _is_metadata(node: Any) -> TypeGuard[ast.Subscript]:
330
+ # e.g. `metadata["name"]`
331
+ return (
332
+ isinstance(node, ast.Subscript)
333
+ and isinstance(value := node.value, ast.Name)
334
+ and value.id == "metadata"
335
+ )
336
+
337
+
338
+ def _get_subscript_key(node: ast.Subscript) -> Optional[str]:
309
339
  if sys.version_info < (3, 9):
310
340
  # Note that `ast.Index` is deprecated in Python 3.9+, but is necessary
311
341
  # for Python 3.8 as part of `ast.Subscript`.
phoenix/trace/fixtures.py CHANGED
@@ -137,8 +137,6 @@ def _download_traces_fixture(
137
137
  def load_example_traces(use_case: str) -> TraceDataset:
138
138
  """
139
139
  Loads a trace dataframe by name.
140
-
141
- NB: this functionality is under active construction.
142
140
  """
143
141
  fixture = _get_trace_fixture_by_name(use_case)
144
142
  return TraceDataset(json_lines_to_df(_download_traces_fixture(fixture)))
@@ -101,9 +101,12 @@ class TraceDataset:
101
101
  """
102
102
 
103
103
  name: str
104
+ """
105
+ A human readable name for the dataset.
106
+ """
104
107
  dataframe: pd.DataFrame
105
108
  evaluations: List[Evaluations] = []
106
- _id: UUID = uuid4()
109
+ _id: UUID
107
110
  _data_file_name: str = "data.parquet"
108
111
 
109
112
  def __init__(
@@ -131,8 +134,10 @@ class TraceDataset:
131
134
  raise ValueError(
132
135
  f"The dataframe is missing some required columns: {', '.join(missing_columns)}"
133
136
  )
137
+ self._id = uuid4()
134
138
  self.dataframe = normalize_dataframe(dataframe)
135
- self.name = name or f"{GENERATED_DATASET_NAME_PREFIX}{str(uuid4())}"
139
+ # TODO: This is not used in any meaningful way. Should remove
140
+ self.name = name or f"{GENERATED_DATASET_NAME_PREFIX}{str(self._id)}"
136
141
  self.evaluations = list(evaluations)
137
142
 
138
143
  @classmethod
@@ -249,6 +254,8 @@ class TraceDataset:
249
254
  }
250
255
  )
251
256
  parquet.write_table(table, path)
257
+ print(f"💾 Trace dataset saved to under ID: {self._id}")
258
+ print(f"📂 Trace dataset path: {path}")
252
259
  return self._id
253
260
 
254
261
  @classmethod
@@ -288,7 +295,7 @@ class TraceDataset:
288
295
  warn(f'Failed to load evaluations with id: "{eval_id}"')
289
296
  table = parquet.read_table(path)
290
297
  dataframe = table.to_pandas()
291
- ds = cls(dataframe, dataset_name, evaluations)
298
+ ds = cls(dataframe=dataframe, name=dataset_name, evaluations=evaluations)
292
299
  ds._id = dataset_id
293
300
  return ds
294
301
 
phoenix/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "3.0.3"
1
+ __version__ = "3.1.1"