arize-phoenix 2.1.0__py3-none-any.whl → 2.2.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 2.1.0
3
+ Version: 2.2.0rc0
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -38,8 +38,10 @@ Requires-Dist: umap-learn
38
38
  Requires-Dist: uvicorn
39
39
  Requires-Dist: wrapt
40
40
  Provides-Extra: dev
41
+ Requires-Dist: anthropic; extra == 'dev'
41
42
  Requires-Dist: arize[autoembeddings,llm-evaluation]; extra == 'dev'
42
43
  Requires-Dist: gcsfs; extra == 'dev'
44
+ Requires-Dist: google-cloud-aiplatform>=1.3; extra == 'dev'
43
45
  Requires-Dist: hatch; extra == 'dev'
44
46
  Requires-Dist: jupyter; extra == 'dev'
45
47
  Requires-Dist: langchain>=0.0.334; extra == 'dev'
@@ -1,9 +1,10 @@
1
- phoenix/__init__.py,sha256=VtlQlins6lDwQzHwmklebwg43hXwQA8wl9Qcu9iQ67E,1373
1
+ phoenix/__init__.py,sha256=EEh0vZGRQS8686h34GQ64OjQoZ7neKYO_iO5j6Oa9Jw,1402
2
2
  phoenix/config.py,sha256=ErvGg22SSiuqPJtIX1WZE5KcM2lt6XOGZ__HwRg3JqA,2390
3
3
  phoenix/datetime_utils.py,sha256=D955QLrkgrrSdUM6NyqbCeAu2SMsjhR5rHVQEsVUdng,2773
4
4
  phoenix/exceptions.py,sha256=igIWGAg3m8jm5YwQDeCY1p8ml_60A7zaGVXJ1yZhY9s,44
5
5
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
6
6
  phoenix/services.py,sha256=slL4Uu___QQSKEssgD738-WAld-kzVQnpW92uKLxV4E,4886
7
+ phoenix/version.py,sha256=pG4VqwySwU54SQ_mHFbajVD0oK3-38mb_fLPwWYLdoc,25
7
8
  phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
9
  phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
9
10
  phoenix/core/evals.py,sha256=OrHeYlh804rpcZIXTA6kan2mzSZMfgpphNNQdPMpNoM,7597
@@ -23,15 +24,17 @@ phoenix/experimental/evals/evaluators.py,sha256=rLvvXBK2H_cjJyRMBQStTlMYntTJI3Rt
23
24
  phoenix/experimental/evals/retrievals.py,sha256=o3fqrsYbYZjyGj_jWkN_9VQVyXjLkDKDw5Ws7l8bwdI,3828
24
25
  phoenix/experimental/evals/functions/__init__.py,sha256=3FMGrjmgxegXAwgDV_RpaN-73cFVyBiO8YwZvml5P9c,156
25
26
  phoenix/experimental/evals/functions/classify.py,sha256=uCTZR_ctQorzS0Abcwxzsza0g-4q_91DHiObjJISIXE,18177
26
- phoenix/experimental/evals/functions/executor.py,sha256=TSw2lVhkl6-VBYcXSlUl3E0U4OuJWnhwhed7NmFgbF8,13376
27
+ phoenix/experimental/evals/functions/executor.py,sha256=bM7PI2rcPukQQzZ2rWqN_-Kfo_a935YJj0bh1Red8Ps,13406
27
28
  phoenix/experimental/evals/functions/generate.py,sha256=sdr6TeXn5JLEKM0NqYtvq01Lq48Q7uatb0fsq5zQgVY,5310
28
29
  phoenix/experimental/evals/functions/processing.py,sha256=F4xtLsulLV4a8CkuLldRddsCim75dSTIShEJUYN6I6w,1823
29
30
  phoenix/experimental/evals/models/__init__.py,sha256=j1N7DhiOPbcaemtVBONcQ0miNnGQwEXz4u3P3Vwe6-4,320
31
+ phoenix/experimental/evals/models/anthropic.py,sha256=Tcv8R-vTyY8sLAv1wIHeZdMCBtqhyayqMPJXRDc7blI,6267
30
32
  phoenix/experimental/evals/models/base.py,sha256=aSE3Al3MsLvzNKuN2e-z6O-RB5mgpisH4UQqwNQcqp0,7734
31
33
  phoenix/experimental/evals/models/bedrock.py,sha256=CRPmBuSLc_nRnKKWLHhGMxdWEISIKUJM1tzIlOQ_qWM,7927
32
34
  phoenix/experimental/evals/models/litellm.py,sha256=jrRlph22xWxMXMUabUWjIO2e-sHxQzlQwSM-SnAACFQ,4714
33
35
  phoenix/experimental/evals/models/openai.py,sha256=Kl2uES3HRcZGFqblfBQZ6D1BpDffuLZDAqVTjhrSXXQ,17101
34
36
  phoenix/experimental/evals/models/rate_limiters.py,sha256=5GVN0RQKt36Przg3-9jLgocRmyg-tbeO-cdbuLIx89w,10160
37
+ phoenix/experimental/evals/models/vertex.py,sha256=nwTIjVn4gGFfoKfGqUGwPD9GLJaBM4HLXDnMNs9hSrw,5407
35
38
  phoenix/experimental/evals/models/vertexai.py,sha256=NfBpQq0l7XzP-wDEDsK27IRiQBzA1GXEdfwlAf8leX4,5609
36
39
  phoenix/experimental/evals/templates/__init__.py,sha256=GSJSoWJ4jwyoUANniidmWMUtXQhNQYbTJbfFqCvuYuo,1470
37
40
  phoenix/experimental/evals/templates/default_templates.py,sha256=_VVxuhPsY8fkasA9XMNNM_fvftltkIfVCfElSdFbsQY,21056
@@ -133,7 +136,7 @@ phoenix/trace/__init__.py,sha256=lnuxATMemAqjURYqOfIo_HyCo5oIWIVTy98XAsiS1d8,215
133
136
  phoenix/trace/evaluation_conventions.py,sha256=t8jydM3U0-T5YpiQKRJ3tWdWGlHtzKyttYdw-ddvPOk,1048
134
137
  phoenix/trace/exporter.py,sha256=z3xrGJhIRh7XMy4Q1FkR3KmFZym-GX0XxLTZ6eSnN0Q,4347
135
138
  phoenix/trace/fixtures.py,sha256=lFuhPe-di54BmCT-RxS95m3e2-z1YBOo8CxsqYSgDD8,6341
136
- phoenix/trace/otel.py,sha256=cYD93sW2otPcfwMWMaAn7QeWgwn2o4Y0fdo7IyEpkZY,13838
139
+ phoenix/trace/otel.py,sha256=4cJ85O_y_S6C_kkVvYh8d1d1mzKpGk_eQKjg688PI0k,13899
137
140
  phoenix/trace/schemas.py,sha256=m1wVlYFT6qL3FovD3TtTYsEgN6OHvv52gNdJkoPCmuY,5400
138
141
  phoenix/trace/semantic_conventions.py,sha256=u6NG85ZhbreriZr8cqJaddldM_jUcew7JilszY7JUk8,4652
139
142
  phoenix/trace/span_evaluations.py,sha256=9RTJ8BFhXDJNtqErWRlMj65FG7wJiI41YTgB7vYLqcY,8429
@@ -162,8 +165,8 @@ phoenix/trace/v1/evaluation_pb2.pyi,sha256=cCbbx06gwQmaH14s3J1X25TtaARh-k1abbxQd
162
165
  phoenix/utilities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
163
166
  phoenix/utilities/error_handling.py,sha256=7b5rpGFj9EWZ8yrZK1IHvxB89suWk3lggDayUQcvZds,1946
164
167
  phoenix/utilities/logging.py,sha256=lDXd6EGaamBNcQxL4vP1au9-i_SXe0OraUDiJOcszSw,222
165
- arize_phoenix-2.1.0.dist-info/METADATA,sha256=GHS_5V3vwFzP_F--H0UJ9YNnvDcyH79gLiTJ_lG6CMk,26378
166
- arize_phoenix-2.1.0.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87
167
- arize_phoenix-2.1.0.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
168
- arize_phoenix-2.1.0.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
169
- arize_phoenix-2.1.0.dist-info/RECORD,,
168
+ arize_phoenix-2.2.0rc0.dist-info/METADATA,sha256=cV0tw8Sq1yms7yncqdyAkIoUOMVBIfDQkS_X9wlmgKY,26482
169
+ arize_phoenix-2.2.0rc0.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87
170
+ arize_phoenix-2.2.0rc0.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
171
+ arize_phoenix-2.2.0rc0.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
172
+ arize_phoenix-2.2.0rc0.dist-info/RECORD,,
phoenix/__init__.py CHANGED
@@ -5,8 +5,7 @@ from .session.evaluation import log_evaluations
5
5
  from .session.session import NotebookEnvironment, Session, active_session, close_app, launch_app
6
6
  from .trace.fixtures import load_example_traces
7
7
  from .trace.trace_dataset import TraceDataset
8
-
9
- __version__ = "2.1.0"
8
+ from .version import __version__
10
9
 
11
10
  # module level doc-string
12
11
  __doc__ = """
@@ -25,6 +24,7 @@ Here are just a few of the things that phoenix does well:
25
24
  """
26
25
 
27
26
  __all__ = [
27
+ "__version__",
28
28
  "Dataset",
29
29
  "EmbeddingColumnNames",
30
30
  "RetrievalEmbeddingColumnNames",
@@ -275,6 +275,7 @@ class SyncExecutor(Executor):
275
275
  result = self.generate(input)
276
276
  outputs[index] = result
277
277
  progress_bar.update()
278
+ break
278
279
  except Exception as exc:
279
280
  is_phoenix_exception = isinstance(exc, PhoenixException)
280
281
  if attempt >= self.max_retries or is_phoenix_exception:
@@ -0,0 +1,171 @@
1
+ import logging
2
+ from dataclasses import dataclass, field
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
4
+
5
+ from phoenix.experimental.evals.models.base import BaseEvalModel
6
+ from phoenix.experimental.evals.models.rate_limiters import RateLimiter
7
+
8
+ if TYPE_CHECKING:
9
+ from tiktoken import Encoding
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ MODEL_TOKEN_LIMIT_MAPPING = {
14
+ "claude-2.1": 200000,
15
+ "claude-2.0": 100000,
16
+ "claude-instant-1.2": 100000,
17
+ }
18
+
19
+
20
+ @dataclass
21
+ class AnthropicModel(BaseEvalModel):
22
+ model: str = "claude-2.1"
23
+ """The model name to use."""
24
+ temperature: float = 0.0
25
+ """What sampling temperature to use."""
26
+ max_tokens: int = 256
27
+ """The maximum number of tokens to generate in the completion."""
28
+ top_p: float = 1
29
+ """Total probability mass of tokens to consider at each step."""
30
+ top_k: int = 256
31
+ """The cutoff where the model no longer selects the words"""
32
+ stop_sequences: List[str] = field(default_factory=list)
33
+ """If the model encounters a stop sequence, it stops generating further tokens. """
34
+ max_retries: int = 6
35
+ """Maximum number of retries to make when generating."""
36
+ retry_min_seconds: int = 10
37
+ """Minimum number of seconds to wait when retrying."""
38
+ retry_max_seconds: int = 60
39
+ """Maximum number of seconds to wait when retrying."""
40
+ extra_parameters: Dict[str, Any] = field(default_factory=dict)
41
+ """Any extra parameters to add to the request body (e.g., countPenalty for a21 models)"""
42
+ max_content_size: Optional[int] = None
43
+ """If you're using a fine-tuned model, set this to the maximum content size"""
44
+
45
+ def __post_init__(self) -> None:
46
+ self._init_environment()
47
+ self._init_client()
48
+ self._init_tiktoken()
49
+ self._init_rate_limiter()
50
+ self.retry = self._retry(
51
+ error_types=[], # default to catching all errors
52
+ min_seconds=self.retry_min_seconds,
53
+ max_seconds=self.retry_max_seconds,
54
+ max_retries=self.max_retries,
55
+ )
56
+
57
+ def _init_environment(self) -> None:
58
+ try:
59
+ import tiktoken
60
+
61
+ self._tiktoken = tiktoken
62
+ except ImportError:
63
+ self._raise_import_error(
64
+ package_name="tiktoken",
65
+ )
66
+
67
+ def _init_client(self) -> None:
68
+ try:
69
+ import anthropic # type:ignore
70
+
71
+ self._anthropic = anthropic
72
+ self.client = self._anthropic.Anthropic()
73
+ self.async_client = self._anthropic.AsyncAnthropic()
74
+ except ImportError:
75
+ self._raise_import_error(
76
+ package_name="anthropic",
77
+ )
78
+
79
+ def _init_tiktoken(self) -> None:
80
+ try:
81
+ encoding = self._tiktoken.encoding_for_model(self.model)
82
+ except KeyError:
83
+ logger.warning("Warning: model not found. Using cl100k_base encoding.")
84
+ encoding = self._tiktoken.get_encoding("cl100k_base")
85
+ self._tiktoken_encoding = encoding
86
+
87
+ def _init_rate_limiter(self) -> None:
88
+ self._rate_limiter = RateLimiter(
89
+ rate_limit_error=self._anthropic.RateLimitError,
90
+ max_rate_limit_retries=10,
91
+ initial_per_second_request_rate=1,
92
+ maximum_per_second_request_rate=20,
93
+ enforcement_window_minutes=1,
94
+ )
95
+
96
+ def invocation_parameters(self) -> Dict[str, Any]:
97
+ return {
98
+ "max_tokens_to_sample": self.max_tokens,
99
+ "stop_sequences": self.stop_sequences,
100
+ "temperature": self.temperature,
101
+ "top_p": self.top_p,
102
+ "top_k": self.top_k,
103
+ }
104
+
105
+ @property
106
+ def encoder(self) -> "Encoding":
107
+ return self._tiktoken_encoding
108
+
109
+ def get_tokens_from_text(self, text: str) -> List[int]:
110
+ return self.encoder.encode(text)
111
+
112
+ def get_text_from_tokens(self, tokens: List[int]) -> str:
113
+ return self.encoder.decode(tokens)
114
+
115
+ @property
116
+ def max_context_size(self) -> int:
117
+ context_size = self.max_content_size or MODEL_TOKEN_LIMIT_MAPPING.get(self.model, None)
118
+
119
+ if context_size is None:
120
+ raise ValueError(
121
+ "Can't determine maximum context size. An unknown model name was "
122
+ + f"used: {self.model}. Please set the `max_content_size` argument"
123
+ + "when using fine-tuned models. "
124
+ )
125
+
126
+ return context_size
127
+
128
+ def _generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
129
+ # instruction is an invalid input to Anthropic models, it is passed in by
130
+ # BaseEvalModel.__call__ and needs to be removed
131
+ kwargs.pop("instruction", None)
132
+ invocation_parameters = self.invocation_parameters()
133
+ invocation_parameters.update(kwargs)
134
+ response = self._generate_with_retry(
135
+ model=self.model,
136
+ prompt=self._format_prompt_for_claude(prompt),
137
+ **invocation_parameters,
138
+ )
139
+
140
+ return str(response)
141
+
142
+ def _generate_with_retry(self, **kwargs: Any) -> Any:
143
+ @self.retry
144
+ @self._rate_limiter.limit
145
+ def _completion_with_retry(**kwargs: Any) -> Any:
146
+ response = self.client.completions.create(**kwargs)
147
+ return response.completion
148
+
149
+ return _completion_with_retry(**kwargs)
150
+
151
+ async def _async_generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
152
+ invocation_parameters = self.invocation_parameters()
153
+ invocation_parameters.update(kwargs)
154
+ response = await self._async_generate_with_retry(
155
+ model=self.model, prompt=self._format_prompt_for_claude(prompt), **invocation_parameters
156
+ )
157
+
158
+ return str(response)
159
+
160
+ async def _async_generate_with_retry(self, **kwargs: Any) -> Any:
161
+ @self.retry
162
+ @self._rate_limiter.alimit
163
+ async def _async_completion_with_retry(**kwargs: Any) -> Any:
164
+ response = await self.async_client.completions.create(**kwargs)
165
+ return response.completion
166
+
167
+ return await _async_completion_with_retry(**kwargs)
168
+
169
+ def _format_prompt_for_claude(self, prompt: str) -> str:
170
+ # Claude requires prompt in the format of Human: ... Assistant:
171
+ return f"{self._anthropic.HUMAN_PROMPT} {prompt} {self._anthropic.AI_PROMPT}"
@@ -0,0 +1,155 @@
1
+ import logging
2
+ from dataclasses import dataclass, field
3
+ from typing import TYPE_CHECKING, Any, Dict, List
4
+
5
+ from phoenix.experimental.evals.models.base import BaseEvalModel
6
+ from phoenix.experimental.evals.models.rate_limiters import RateLimiter
7
+
8
+ if TYPE_CHECKING:
9
+ from tiktoken import Encoding
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ # https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
15
+ MODEL_TOKEN_LIMIT_MAPPING = {
16
+ "gemini-pro": 32760,
17
+ "gemini-pro-vision": 16384,
18
+ }
19
+
20
+
21
+ @dataclass
22
+ class GeminiModel(BaseEvalModel):
23
+ model: str = "gemini-pro"
24
+ """The model name to use."""
25
+ temperature: float = 0.0
26
+ """What sampling temperature to use."""
27
+ max_tokens: int = 256
28
+ """The maximum number of tokens to generate in the completion."""
29
+ top_p: float = 1
30
+ """Total probability mass of tokens to consider at each step."""
31
+ top_k: int = 32
32
+ """The cutoff where the model no longer selects the words"""
33
+ stop_sequences: List[str] = field(default_factory=list)
34
+ """If the model encounters a stop sequence, it stops generating further tokens. """
35
+ max_retries: int = 6
36
+ """Maximum number of retries to make when generating."""
37
+ retry_min_seconds: int = 10
38
+ """Minimum number of seconds to wait when retrying."""
39
+ retry_max_seconds: int = 60
40
+ """Maximum number of seconds to wait when retrying."""
41
+
42
+ def __post_init__(self) -> None:
43
+ self._init_client()
44
+ self._init_rate_limiter()
45
+ self.retry = self._retry(
46
+ error_types=[], # default to catching all errors
47
+ min_seconds=self.retry_min_seconds,
48
+ max_seconds=self.retry_max_seconds,
49
+ max_retries=self.max_retries,
50
+ )
51
+
52
+ def _init_client(self) -> None:
53
+ try:
54
+ from google.api_core import exceptions # type:ignore
55
+ from vertexai.preview import generative_models as vertex # type:ignore
56
+
57
+ self._vertex = vertex
58
+ self._gcp_exceptions = exceptions
59
+ self._model = self._vertex.GenerativeModel(self.model)
60
+ except ImportError:
61
+ self._raise_import_error(
62
+ package_name="vertexai",
63
+ )
64
+
65
+ def _init_rate_limiter(self) -> None:
66
+ self._rate_limiter = RateLimiter(
67
+ rate_limit_error=self._gcp_exceptions.ResourceExhausted,
68
+ max_rate_limit_retries=10,
69
+ initial_per_second_request_rate=1,
70
+ maximum_per_second_request_rate=20,
71
+ enforcement_window_minutes=1,
72
+ )
73
+
74
+ @property
75
+ def encoder(self) -> "Encoding":
76
+ raise TypeError("Gemini models contain their own token counting")
77
+
78
+ def get_tokens_from_text(self, text: str) -> List[int]:
79
+ raise NotImplementedError
80
+
81
+ def get_text_from_tokens(self, tokens: List[int]) -> str:
82
+ raise NotImplementedError
83
+
84
+ @property
85
+ def max_context_size(self) -> int:
86
+ context_size = MODEL_TOKEN_LIMIT_MAPPING.get(self.model, None)
87
+
88
+ if context_size is None:
89
+ raise ValueError(
90
+ "Can't determine maximum context size. An unknown model name was "
91
+ + f"used: {self.model}. Please set the `max_content_size` argument"
92
+ + "when using fine-tuned models. "
93
+ )
94
+
95
+ return context_size
96
+
97
+ @property
98
+ def generation_config(self) -> Dict[str, Any]:
99
+ return {
100
+ "temperature": self.temperature,
101
+ "max_output_tokens": self.max_tokens,
102
+ "top_p": self.top_p,
103
+ "top_k": self.top_k,
104
+ "stop_sequences": self.stop_sequences,
105
+ }
106
+
107
+ def _generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
108
+ # instruction is an invalid input to Gemini models, it is passed in by
109
+ # BaseEvalModel.__call__ and needs to be removed
110
+ kwargs.pop("instruction", None)
111
+ response = self._generate_with_retry(
112
+ prompt=prompt,
113
+ generation_config=self.generation_config,
114
+ **kwargs,
115
+ )
116
+
117
+ return str(response)
118
+
119
+ def _generate_with_retry(
120
+ self, prompt: str, generation_config: Dict[str, Any], **kwargs: Any
121
+ ) -> Any:
122
+ @self.retry
123
+ @self._rate_limiter.limit
124
+ def _completion_with_retry(**kwargs: Any) -> Any:
125
+ response = self._model.generate_content(
126
+ contents=prompt, generation_config=generation_config, **kwargs
127
+ )
128
+ candidate = response.candidates[0]
129
+ return candidate.text
130
+
131
+ return _completion_with_retry(**kwargs)
132
+
133
+ async def _async_generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
134
+ kwargs.pop("instruction", None)
135
+ response = await self._async_generate_with_retry(
136
+ prompt=prompt,
137
+ generation_config=self.generation_config,
138
+ **kwargs,
139
+ )
140
+
141
+ return str(response)
142
+
143
+ async def _async_generate_with_retry(
144
+ self, prompt: str, generation_config: Dict[str, Any], **kwargs: Any
145
+ ) -> Any:
146
+ @self.retry
147
+ @self._rate_limiter.limit
148
+ async def _completion_with_retry(**kwargs: Any) -> Any:
149
+ response = await self._model.generate_content_async(
150
+ contents=prompt, generation_config=generation_config, **kwargs
151
+ )
152
+ candidate = response.candidates[0]
153
+ return candidate.text
154
+
155
+ return await _completion_with_retry(**kwargs)
phoenix/trace/otel.py CHANGED
@@ -396,11 +396,11 @@ def _encode_value(value: AttributeValue) -> AnyValue:
396
396
  return AnyValue(int_value=value)
397
397
  if isinstance(value, float):
398
398
  return AnyValue(double_value=value)
399
- if isinstance(value, Sequence):
400
- return AnyValue(array_value=ArrayValue(values=map(_encode_value, value)))
401
399
  if isinstance(value, bytes):
402
400
  return AnyValue(bytes_value=value)
403
- assert_never(value)
401
+ if isinstance(value, Sequence):
402
+ return AnyValue(array_value=ArrayValue(values=map(_encode_value, value)))
403
+ raise ValueError(f"Unexpected attribute value {value} with type {type(value)}.")
404
404
 
405
405
 
406
406
  __all__ = [
phoenix/version.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "2.2.0rc0"