arize-phoenix 5.5.2__py3-none-any.whl → 5.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (186) hide show
  1. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/METADATA +4 -7
  2. arize_phoenix-5.7.0.dist-info/RECORD +330 -0
  3. phoenix/config.py +50 -8
  4. phoenix/core/model.py +3 -3
  5. phoenix/core/model_schema.py +41 -50
  6. phoenix/core/model_schema_adapter.py +17 -16
  7. phoenix/datetime_utils.py +2 -2
  8. phoenix/db/bulk_inserter.py +10 -20
  9. phoenix/db/engines.py +2 -1
  10. phoenix/db/enums.py +2 -2
  11. phoenix/db/helpers.py +8 -7
  12. phoenix/db/insertion/dataset.py +9 -19
  13. phoenix/db/insertion/document_annotation.py +14 -13
  14. phoenix/db/insertion/helpers.py +6 -16
  15. phoenix/db/insertion/span_annotation.py +14 -13
  16. phoenix/db/insertion/trace_annotation.py +14 -13
  17. phoenix/db/insertion/types.py +19 -30
  18. phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +8 -8
  19. phoenix/db/models.py +28 -28
  20. phoenix/experiments/evaluators/base.py +2 -1
  21. phoenix/experiments/evaluators/code_evaluators.py +4 -5
  22. phoenix/experiments/evaluators/llm_evaluators.py +157 -4
  23. phoenix/experiments/evaluators/utils.py +3 -2
  24. phoenix/experiments/functions.py +10 -21
  25. phoenix/experiments/tracing.py +2 -1
  26. phoenix/experiments/types.py +20 -29
  27. phoenix/experiments/utils.py +2 -1
  28. phoenix/inferences/errors.py +6 -5
  29. phoenix/inferences/fixtures.py +6 -5
  30. phoenix/inferences/inferences.py +37 -37
  31. phoenix/inferences/schema.py +11 -10
  32. phoenix/inferences/validation.py +13 -14
  33. phoenix/logging/_formatter.py +3 -3
  34. phoenix/metrics/__init__.py +5 -4
  35. phoenix/metrics/binning.py +2 -1
  36. phoenix/metrics/metrics.py +2 -1
  37. phoenix/metrics/mixins.py +7 -6
  38. phoenix/metrics/retrieval_metrics.py +2 -1
  39. phoenix/metrics/timeseries.py +5 -4
  40. phoenix/metrics/wrappers.py +2 -2
  41. phoenix/pointcloud/clustering.py +3 -4
  42. phoenix/pointcloud/pointcloud.py +7 -5
  43. phoenix/pointcloud/umap_parameters.py +2 -1
  44. phoenix/server/api/dataloaders/annotation_summaries.py +12 -19
  45. phoenix/server/api/dataloaders/average_experiment_run_latency.py +2 -2
  46. phoenix/server/api/dataloaders/cache/two_tier_cache.py +3 -2
  47. phoenix/server/api/dataloaders/dataset_example_revisions.py +3 -8
  48. phoenix/server/api/dataloaders/dataset_example_spans.py +2 -5
  49. phoenix/server/api/dataloaders/document_evaluation_summaries.py +12 -18
  50. phoenix/server/api/dataloaders/document_evaluations.py +3 -7
  51. phoenix/server/api/dataloaders/document_retrieval_metrics.py +6 -13
  52. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +4 -8
  53. phoenix/server/api/dataloaders/experiment_error_rates.py +2 -5
  54. phoenix/server/api/dataloaders/experiment_run_annotations.py +3 -7
  55. phoenix/server/api/dataloaders/experiment_run_counts.py +1 -5
  56. phoenix/server/api/dataloaders/experiment_sequence_number.py +2 -5
  57. phoenix/server/api/dataloaders/latency_ms_quantile.py +21 -30
  58. phoenix/server/api/dataloaders/min_start_or_max_end_times.py +7 -13
  59. phoenix/server/api/dataloaders/project_by_name.py +3 -3
  60. phoenix/server/api/dataloaders/record_counts.py +11 -18
  61. phoenix/server/api/dataloaders/span_annotations.py +3 -7
  62. phoenix/server/api/dataloaders/span_dataset_examples.py +3 -8
  63. phoenix/server/api/dataloaders/span_descendants.py +3 -7
  64. phoenix/server/api/dataloaders/span_projects.py +2 -2
  65. phoenix/server/api/dataloaders/token_counts.py +12 -19
  66. phoenix/server/api/dataloaders/trace_row_ids.py +3 -7
  67. phoenix/server/api/dataloaders/user_roles.py +3 -3
  68. phoenix/server/api/dataloaders/users.py +3 -3
  69. phoenix/server/api/helpers/__init__.py +4 -3
  70. phoenix/server/api/helpers/dataset_helpers.py +10 -9
  71. phoenix/server/api/helpers/playground_clients.py +671 -0
  72. phoenix/server/api/helpers/playground_registry.py +70 -0
  73. phoenix/server/api/helpers/playground_spans.py +325 -0
  74. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +2 -2
  75. phoenix/server/api/input_types/AddSpansToDatasetInput.py +2 -2
  76. phoenix/server/api/input_types/ChatCompletionInput.py +38 -0
  77. phoenix/server/api/input_types/ChatCompletionMessageInput.py +13 -1
  78. phoenix/server/api/input_types/ClusterInput.py +2 -2
  79. phoenix/server/api/input_types/DeleteAnnotationsInput.py +1 -3
  80. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +2 -2
  81. phoenix/server/api/input_types/DeleteExperimentsInput.py +1 -3
  82. phoenix/server/api/input_types/DimensionFilter.py +4 -4
  83. phoenix/server/api/input_types/GenerativeModelInput.py +17 -0
  84. phoenix/server/api/input_types/Granularity.py +1 -1
  85. phoenix/server/api/input_types/InvocationParameters.py +156 -13
  86. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +2 -2
  87. phoenix/server/api/input_types/TemplateOptions.py +10 -0
  88. phoenix/server/api/mutations/__init__.py +4 -0
  89. phoenix/server/api/mutations/chat_mutations.py +374 -0
  90. phoenix/server/api/mutations/dataset_mutations.py +4 -4
  91. phoenix/server/api/mutations/experiment_mutations.py +1 -2
  92. phoenix/server/api/mutations/export_events_mutations.py +7 -7
  93. phoenix/server/api/mutations/span_annotations_mutations.py +4 -4
  94. phoenix/server/api/mutations/trace_annotations_mutations.py +4 -4
  95. phoenix/server/api/mutations/user_mutations.py +4 -4
  96. phoenix/server/api/openapi/schema.py +2 -2
  97. phoenix/server/api/queries.py +61 -72
  98. phoenix/server/api/routers/oauth2.py +4 -4
  99. phoenix/server/api/routers/v1/datasets.py +22 -36
  100. phoenix/server/api/routers/v1/evaluations.py +6 -5
  101. phoenix/server/api/routers/v1/experiment_evaluations.py +2 -2
  102. phoenix/server/api/routers/v1/experiment_runs.py +2 -2
  103. phoenix/server/api/routers/v1/experiments.py +4 -4
  104. phoenix/server/api/routers/v1/spans.py +13 -12
  105. phoenix/server/api/routers/v1/traces.py +5 -5
  106. phoenix/server/api/routers/v1/utils.py +5 -5
  107. phoenix/server/api/schema.py +42 -10
  108. phoenix/server/api/subscriptions.py +347 -494
  109. phoenix/server/api/types/AnnotationSummary.py +3 -3
  110. phoenix/server/api/types/ChatCompletionSubscriptionPayload.py +44 -0
  111. phoenix/server/api/types/Cluster.py +8 -7
  112. phoenix/server/api/types/Dataset.py +5 -4
  113. phoenix/server/api/types/Dimension.py +3 -3
  114. phoenix/server/api/types/DocumentEvaluationSummary.py +8 -7
  115. phoenix/server/api/types/EmbeddingDimension.py +6 -5
  116. phoenix/server/api/types/EvaluationSummary.py +3 -3
  117. phoenix/server/api/types/Event.py +7 -7
  118. phoenix/server/api/types/Experiment.py +3 -3
  119. phoenix/server/api/types/ExperimentComparison.py +2 -4
  120. phoenix/server/api/types/GenerativeProvider.py +27 -3
  121. phoenix/server/api/types/Inferences.py +9 -8
  122. phoenix/server/api/types/InferencesRole.py +2 -2
  123. phoenix/server/api/types/Model.py +2 -2
  124. phoenix/server/api/types/Project.py +11 -18
  125. phoenix/server/api/types/Segments.py +3 -3
  126. phoenix/server/api/types/Span.py +45 -7
  127. phoenix/server/api/types/TemplateLanguage.py +9 -0
  128. phoenix/server/api/types/TimeSeries.py +8 -7
  129. phoenix/server/api/types/Trace.py +2 -2
  130. phoenix/server/api/types/UMAPPoints.py +6 -6
  131. phoenix/server/api/types/User.py +3 -3
  132. phoenix/server/api/types/node.py +1 -3
  133. phoenix/server/api/types/pagination.py +4 -4
  134. phoenix/server/api/utils.py +2 -4
  135. phoenix/server/app.py +76 -37
  136. phoenix/server/bearer_auth.py +4 -10
  137. phoenix/server/dml_event.py +3 -3
  138. phoenix/server/dml_event_handler.py +10 -24
  139. phoenix/server/grpc_server.py +3 -2
  140. phoenix/server/jwt_store.py +22 -21
  141. phoenix/server/main.py +17 -4
  142. phoenix/server/oauth2.py +3 -2
  143. phoenix/server/rate_limiters.py +5 -8
  144. phoenix/server/static/.vite/manifest.json +31 -31
  145. phoenix/server/static/assets/components-Csu8UKOs.js +1612 -0
  146. phoenix/server/static/assets/{index-DCzakdJq.js → index-Bk5C9EA7.js} +2 -2
  147. phoenix/server/static/assets/{pages-CAL1FDMt.js → pages-UeWaKXNs.js} +337 -442
  148. phoenix/server/static/assets/{vendor-6IcPAw_j.js → vendor-CtqfhlbC.js} +6 -6
  149. phoenix/server/static/assets/{vendor-arizeai-DRZuoyuF.js → vendor-arizeai-C_3SBz56.js} +2 -2
  150. phoenix/server/static/assets/{vendor-codemirror-DVE2_WBr.js → vendor-codemirror-wfdk9cjp.js} +1 -1
  151. phoenix/server/static/assets/{vendor-recharts-DwrexFA4.js → vendor-recharts-BiVnSv90.js} +1 -1
  152. phoenix/server/templates/index.html +1 -0
  153. phoenix/server/thread_server.py +1 -1
  154. phoenix/server/types.py +17 -29
  155. phoenix/services.py +8 -3
  156. phoenix/session/client.py +12 -24
  157. phoenix/session/data_extractor.py +3 -3
  158. phoenix/session/evaluation.py +1 -2
  159. phoenix/session/session.py +26 -21
  160. phoenix/trace/attributes.py +16 -28
  161. phoenix/trace/dsl/filter.py +17 -21
  162. phoenix/trace/dsl/helpers.py +3 -3
  163. phoenix/trace/dsl/query.py +13 -22
  164. phoenix/trace/fixtures.py +11 -17
  165. phoenix/trace/otel.py +5 -15
  166. phoenix/trace/projects.py +3 -2
  167. phoenix/trace/schemas.py +2 -2
  168. phoenix/trace/span_evaluations.py +9 -8
  169. phoenix/trace/span_json_decoder.py +3 -3
  170. phoenix/trace/span_json_encoder.py +2 -2
  171. phoenix/trace/trace_dataset.py +6 -5
  172. phoenix/trace/utils.py +6 -6
  173. phoenix/utilities/deprecation.py +3 -2
  174. phoenix/utilities/error_handling.py +3 -2
  175. phoenix/utilities/json.py +2 -1
  176. phoenix/utilities/logging.py +2 -2
  177. phoenix/utilities/project.py +1 -1
  178. phoenix/utilities/re.py +3 -4
  179. phoenix/utilities/template_formatters.py +16 -5
  180. phoenix/version.py +1 -1
  181. arize_phoenix-5.5.2.dist-info/RECORD +0 -321
  182. phoenix/server/static/assets/components-hX0LgYz3.js +0 -1428
  183. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/WHEEL +0 -0
  184. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/entry_points.txt +0 -0
  185. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/licenses/IP_NOTICE +0 -0
  186. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from datetime import datetime
3
- from typing import List, Optional, Union, cast
3
+ from typing import Optional, Union, cast
4
4
 
5
5
  import pandas as pd
6
6
 
@@ -28,7 +28,7 @@ class TraceDataExtractor(ABC):
28
28
  root_spans_only: Optional[bool] = None,
29
29
  project_name: Optional[str] = None,
30
30
  timeout: Optional[int] = DEFAULT_TIMEOUT_IN_SECONDS,
31
- ) -> Optional[Union[pd.DataFrame, List[pd.DataFrame]]]: ...
31
+ ) -> Optional[Union[pd.DataFrame, list[pd.DataFrame]]]: ...
32
32
 
33
33
  def get_spans_dataframe(
34
34
  self,
@@ -58,7 +58,7 @@ class TraceDataExtractor(ABC):
58
58
  def get_evaluations(
59
59
  self,
60
60
  project_name: Optional[str] = None,
61
- ) -> List[Evaluations]: ...
61
+ ) -> list[Evaluations]: ...
62
62
 
63
63
  def get_trace_dataset(
64
64
  self,
@@ -13,7 +13,6 @@ from typing import (
13
13
  Iterator,
14
14
  Optional,
15
15
  Sequence,
16
- Tuple,
17
16
  Union,
18
17
  cast,
19
18
  )
@@ -44,7 +43,7 @@ def encode_evaluations(evaluations: Evaluations) -> Iterator[pb.Evaluation]:
44
43
  for index, row in dataframe.iterrows():
45
44
  subject_id = _extract_subject_id_from_index(
46
45
  index_names,
47
- cast(Union[str, Tuple[Any]], index),
46
+ cast(Union[str, tuple[Any]], index),
48
47
  )
49
48
  if (result := _extract_result(row)) is None:
50
49
  continue
@@ -5,23 +5,14 @@ import shutil
5
5
  import warnings
6
6
  from abc import ABC, abstractmethod
7
7
  from collections import UserList
8
+ from collections.abc import Iterable, Mapping
8
9
  from datetime import datetime
9
10
  from enum import Enum
10
11
  from importlib.util import find_spec
11
12
  from itertools import chain
12
13
  from pathlib import Path
13
14
  from tempfile import TemporaryDirectory
14
- from typing import (
15
- TYPE_CHECKING,
16
- Any,
17
- Iterable,
18
- List,
19
- Mapping,
20
- NamedTuple,
21
- Optional,
22
- Set,
23
- Union,
24
- )
15
+ from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union
25
16
  from urllib.parse import urljoin
26
17
 
27
18
  import pandas as pd
@@ -33,6 +24,7 @@ from phoenix.config import (
33
24
  ENV_PHOENIX_PORT,
34
25
  ensure_working_dir,
35
26
  get_env_database_connection_str,
27
+ get_env_enable_websockets,
36
28
  get_env_host,
37
29
  get_env_port,
38
30
  get_exported_files,
@@ -88,8 +80,8 @@ class NotebookEnvironment(Enum):
88
80
 
89
81
  class ExportedData(_BaseList):
90
82
  def __init__(self) -> None:
91
- self.paths: Set[Path] = set()
92
- self.names: List[str] = []
83
+ self.paths: set[Path] = set()
84
+ self.names: list[str] = []
93
85
  super().__init__()
94
86
 
95
87
  def __repr__(self) -> str:
@@ -112,7 +104,7 @@ class Session(TraceDataExtractor, ABC):
112
104
  notebook_env: NotebookEnvironment
113
105
  """The notebook environment that the session is running in."""
114
106
 
115
- def __dir__(self) -> List[str]:
107
+ def __dir__(self) -> list[str]:
116
108
  return ["exports", "view", "url"]
117
109
 
118
110
  def __init__(
@@ -157,7 +149,7 @@ class Session(TraceDataExtractor, ABC):
157
149
  # Deprecated fields
158
150
  stop_time: Optional[datetime] = None,
159
151
  timeout: Optional[int] = DEFAULT_TIMEOUT_IN_SECONDS,
160
- ) -> Optional[Union[pd.DataFrame, List[pd.DataFrame]]]:
152
+ ) -> Optional[Union[pd.DataFrame, list[pd.DataFrame]]]:
161
153
  """
162
154
  Queries the spans in the project based on the provided parameters.
163
155
 
@@ -203,7 +195,7 @@ class Session(TraceDataExtractor, ABC):
203
195
  def get_evaluations(
204
196
  self,
205
197
  project_name: Optional[str] = None,
206
- ) -> List[Evaluations]:
198
+ ) -> list[Evaluations]:
207
199
  """
208
200
  Get the evaluations for a project.
209
201
 
@@ -216,7 +208,7 @@ class Session(TraceDataExtractor, ABC):
216
208
 
217
209
  Returns
218
210
  -------
219
- evaluations : List[Evaluations]
211
+ evaluations : list[Evaluations]
220
212
  A list of evaluations for the specified project.
221
213
 
222
214
  """
@@ -278,6 +270,7 @@ class ProcessSession(Session):
278
270
  self,
279
271
  database_url: str,
280
272
  primary_inferences: Inferences,
273
+ enable_websockets: bool,
281
274
  reference_inferences: Optional[Inferences] = None,
282
275
  corpus_inferences: Optional[Inferences] = None,
283
276
  trace_dataset: Optional[TraceDataset] = None,
@@ -328,6 +321,7 @@ class ProcessSession(Session):
328
321
  trace_dataset_name=(
329
322
  self.trace_dataset.name if self.trace_dataset is not None else None
330
323
  ),
324
+ enable_websockets=enable_websockets,
331
325
  )
332
326
 
333
327
  @property
@@ -344,6 +338,7 @@ class ThreadSession(Session):
344
338
  self,
345
339
  database_url: str,
346
340
  primary_inferences: Inferences,
341
+ enable_websockets: bool,
347
342
  reference_inferences: Optional[Inferences] = None,
348
343
  corpus_inferences: Optional[Inferences] = None,
349
344
  trace_dataset: Optional[TraceDataset] = None,
@@ -384,6 +379,7 @@ class ThreadSession(Session):
384
379
  export_path=self.export_path,
385
380
  model=self.model,
386
381
  authentication_enabled=False,
382
+ enable_websockets=enable_websockets,
387
383
  corpus=self.corpus,
388
384
  umap_params=self.umap_parameters,
389
385
  initial_spans=trace_dataset.to_spans() if trace_dataset else None,
@@ -447,6 +443,7 @@ def launch_app(
447
443
  run_in_thread: bool = True,
448
444
  notebook_environment: Optional[Union[NotebookEnvironment, str]] = None,
449
445
  use_temp_dir: bool = True,
446
+ enable_websockets: Optional[bool] = None,
450
447
  ) -> Optional[Session]:
451
448
  """
452
449
  Launches the phoenix application and returns a session to interact with.
@@ -471,7 +468,7 @@ def launch_app(
471
468
  Defaults to 6006.
472
469
  run_in_thread: bool, optional, default=True
473
470
  Whether the server should run in a Thread or Process.
474
- default_umap_parameters: Dict[str, Union[int, float]], optional, default=None
471
+ default_umap_parameters: dict[str, Union[int, float]], optional, default=None
475
472
  User specified default UMAP parameters
476
473
  eg: {"n_neighbors": 10, "n_samples": 5, "min_dist": 0.5}
477
474
  notebook_environment: str, optional, default=None
@@ -481,7 +478,8 @@ def launch_app(
481
478
  use_temp_dir: bool, optional, default=True
482
479
  Whether to use a temporary directory to store the data. If set to False, the data will be
483
480
  stored in the directory specified by PHOENIX_WORKING_DIR environment variable via SQLite.
484
-
481
+ enable_websockets: bool, optional, default=False
482
+ Whether to enable websockets.
485
483
 
486
484
  Returns
487
485
  -------
@@ -562,10 +560,16 @@ def launch_app(
562
560
  else:
563
561
  database_url = get_env_database_connection_str()
564
562
 
563
+ enable_websockets_env = get_env_enable_websockets() or False
564
+ enable_websockets = (
565
+ enable_websockets if enable_websockets is not None else enable_websockets_env
566
+ )
567
+
565
568
  if run_in_thread:
566
569
  _session = ThreadSession(
567
570
  database_url,
568
571
  primary,
572
+ enable_websockets,
569
573
  reference,
570
574
  corpus,
571
575
  trace,
@@ -579,6 +583,7 @@ def launch_app(
579
583
  _session = ProcessSession(
580
584
  database_url,
581
585
  primary,
586
+ enable_websockets,
582
587
  reference,
583
588
  corpus,
584
589
  trace,
@@ -638,7 +643,7 @@ def close_app(delete_data: bool = False) -> None:
638
643
  def _get_url(host: str, port: int, notebook_env: NotebookEnvironment) -> str:
639
644
  """Determines the IFrame URL based on whether this is in a Colab or in a local notebook"""
640
645
  if notebook_env == NotebookEnvironment.COLAB:
641
- from google.colab.output import eval_js # type: ignore
646
+ from google.colab.output import eval_js
642
647
 
643
648
  return str(eval_js(f"google.colab.kernel.proxyPort({port}, {{'cache': true}})"))
644
649
  if notebook_env == NotebookEnvironment.SAGEMAKER:
@@ -656,7 +661,7 @@ def _get_url(host: str, port: int, notebook_env: NotebookEnvironment) -> str:
656
661
  def _is_colab() -> bool:
657
662
  """Determines whether this is in a Colab"""
658
663
  try:
659
- import google.colab # type: ignore # noqa: F401
664
+ import google.colab # noqa: F401
660
665
  except ImportError:
661
666
  return False
662
667
  try:
@@ -17,21 +17,9 @@ them into a nested list of dictionaries i.e.
17
17
 
18
18
  import inspect
19
19
  import json
20
- from typing import (
21
- Any,
22
- DefaultDict,
23
- Dict,
24
- Iterable,
25
- Iterator,
26
- List,
27
- Mapping,
28
- Optional,
29
- Sequence,
30
- Set,
31
- Tuple,
32
- Union,
33
- cast,
34
- )
20
+ from collections import defaultdict
21
+ from collections.abc import Iterable, Iterator, Mapping, Sequence
22
+ from typing import Any, Optional, Union, cast
35
23
 
36
24
  import numpy as np
37
25
  from openinference.semconv import trace
@@ -51,7 +39,7 @@ JSON_STRING_ATTRIBUTES = (
51
39
  TOOL_PARAMETERS,
52
40
  )
53
41
 
54
- SEMANTIC_CONVENTIONS: List[str] = sorted(
42
+ SEMANTIC_CONVENTIONS: list[str] = sorted(
55
43
  # e.g. "input.value", "llm.token_count.total", etc.
56
44
  (
57
45
  cast(str, getattr(klass, attr))
@@ -66,11 +54,11 @@ SEMANTIC_CONVENTIONS: List[str] = sorted(
66
54
 
67
55
 
68
56
  def unflatten(
69
- key_value_pairs: Iterable[Tuple[str, Any]],
57
+ key_value_pairs: Iterable[tuple[str, Any]],
70
58
  *,
71
59
  prefix_exclusions: Sequence[str] = (),
72
60
  separator: str = ".",
73
- ) -> Dict[str, Any]:
61
+ ) -> dict[str, Any]:
74
62
  # `prefix_exclusions` is intended to contain the semantic conventions
75
63
  trie = _build_trie(key_value_pairs, separator=separator, prefix_exclusions=prefix_exclusions)
76
64
  return dict(_walk(trie, separator=separator))
@@ -83,7 +71,7 @@ def flatten(
83
71
  separator: str = ".",
84
72
  recurse_on_sequence: bool = False,
85
73
  json_string_attributes: Optional[Sequence[str]] = None,
86
- ) -> Iterator[Tuple[str, Any]]:
74
+ ) -> Iterator[tuple[str, Any]]:
87
75
  """
88
76
  Flatten a nested dictionary or a sequence of dictionaries into a list of
89
77
  key value pairs. If `recurse_on_sequence` is True, then the function will
@@ -149,7 +137,7 @@ def get_attribute_value(
149
137
  return attributes.get(sub_keys[-1])
150
138
 
151
139
 
152
- def load_json_strings(key_values: Iterable[Tuple[str, Any]]) -> Iterator[Tuple[str, Any]]:
140
+ def load_json_strings(key_values: Iterable[tuple[str, Any]]) -> Iterator[tuple[str, Any]]:
153
141
  for key, value in key_values:
154
142
  if key.endswith(JSON_STRING_ATTRIBUTES):
155
143
  try:
@@ -167,7 +155,7 @@ def _partition_with_prefix_exclusion(
167
155
  key: str,
168
156
  separator: str = ".",
169
157
  prefix_exclusions: Sequence[str] = (),
170
- ) -> Tuple[str, str, str]:
158
+ ) -> tuple[str, str, str]:
171
159
  """
172
160
  Partition `key` by `separator`, but exclude prefixes in `prefix_exclusions`,
173
161
  which is usually the list of semantic conventions. `prefix_exclusions` should
@@ -181,7 +169,7 @@ def _partition_with_prefix_exclusion(
181
169
  return key.partition(separator)
182
170
 
183
171
 
184
- class _Trie(DefaultDict[Union[str, int], "_Trie"]):
172
+ class _Trie(defaultdict[Union[str, int], "_Trie"]):
185
173
  """
186
174
  Prefix Tree with special handling for indices (i.e. all-digit keys). Indices
187
175
  represent the position of an element in a nested list, while branches represent
@@ -191,8 +179,8 @@ class _Trie(DefaultDict[Union[str, int], "_Trie"]):
191
179
  def __init__(self) -> None:
192
180
  super().__init__(_Trie)
193
181
  self.value: Any = None
194
- self.indices: Set[int] = set()
195
- self.branches: Set[Union[str, int]] = set()
182
+ self.indices: set[int] = set()
183
+ self.branches: set[Union[str, int]] = set()
196
184
 
197
185
  def set_value(self, value: Any) -> None:
198
186
  self.value = value
@@ -215,7 +203,7 @@ class _Trie(DefaultDict[Union[str, int], "_Trie"]):
215
203
 
216
204
 
217
205
  def _build_trie(
218
- key_value_pairs: Iterable[Tuple[str, Any]],
206
+ key_value_pairs: Iterable[tuple[str, Any]],
219
207
  *,
220
208
  prefix_exclusions: Sequence[str] = (),
221
209
  separator: str = ".",
@@ -254,7 +242,7 @@ def _walk(
254
242
  *,
255
243
  prefix: str = "",
256
244
  separator: str = ".",
257
- ) -> Iterator[Tuple[str, Any]]:
245
+ ) -> Iterator[tuple[str, Any]]:
258
246
  """
259
247
  Walk the Trie and yield key value pairs. If the Trie node has a value, then
260
248
  yield the prefix and the value. If the Trie node has indices, then yield the
@@ -286,7 +274,7 @@ def _flatten_mapping(
286
274
  recurse_on_sequence: bool = False,
287
275
  json_string_attributes: Optional[Sequence[str]] = None,
288
276
  separator: str = ".",
289
- ) -> Iterator[Tuple[str, Any]]:
277
+ ) -> Iterator[tuple[str, Any]]:
290
278
  """
291
279
  Flatten a nested dictionary into a list of key value pairs. If `recurse_on_sequence`
292
280
  is True, then the function will also recursively flatten nested sequences of dictionaries.
@@ -327,7 +315,7 @@ def _flatten_sequence(
327
315
  recurse_on_sequence: bool = False,
328
316
  json_string_attributes: Optional[Sequence[str]] = None,
329
317
  separator: str = ".",
330
- ) -> Iterator[Tuple[str, Any]]:
318
+ ) -> Iterator[tuple[str, Any]]:
331
319
  """
332
320
  Flatten a sequence of dictionaries into a list of key value pairs. If `recurse_on_sequence`
333
321
  is True, then the function will also recursively flatten nested sequences of dictionaries.
@@ -17,7 +17,7 @@ from typing_extensions import TypeAlias, TypeGuard, assert_never
17
17
  import phoenix.trace.v1 as pb
18
18
  from phoenix.db import models
19
19
 
20
- _VALID_EVAL_ATTRIBUTES: typing.Tuple[str, ...] = tuple(
20
+ _VALID_EVAL_ATTRIBUTES: tuple[str, ...] = tuple(
21
21
  field.name for field in pb.Evaluation.Result.DESCRIPTOR.fields
22
22
  )
23
23
 
@@ -59,7 +59,7 @@ class AliasedAnnotationRelation:
59
59
  object.__setattr__(self, "table", table)
60
60
 
61
61
  @property
62
- def attributes(self) -> typing.Iterator[typing.Tuple[str, Mapped[typing.Any]]]:
62
+ def attributes(self) -> typing.Iterator[tuple[str, Mapped[typing.Any]]]:
63
63
  """
64
64
  Alias names and attributes (i.e., columns) of the `span_annotation`
65
65
  relation.
@@ -80,7 +80,7 @@ class AliasedAnnotationRelation:
80
80
 
81
81
  # Because postgresql is strongly typed, we cast JSON values to string
82
82
  # by default unless it's hinted otherwise as done here.
83
- _FLOAT_ATTRIBUTES: typing.FrozenSet[str] = frozenset(
83
+ _FLOAT_ATTRIBUTES: frozenset[str] = frozenset(
84
84
  {
85
85
  "llm.token_count.completion",
86
86
  "llm.token_count.prompt",
@@ -142,12 +142,8 @@ class SpanFilter:
142
142
  valid_eval_names: typing.Optional[typing.Sequence[str]] = None
143
143
  translated: ast.Expression = field(init=False, repr=False)
144
144
  compiled: typing.Any = field(init=False, repr=False)
145
- _aliased_annotation_relations: typing.Tuple[AliasedAnnotationRelation] = field(
146
- init=False, repr=False
147
- )
148
- _aliased_annotation_attributes: typing.Dict[str, Mapped[typing.Any]] = field(
149
- init=False, repr=False
150
- )
145
+ _aliased_annotation_relations: tuple[AliasedAnnotationRelation] = field(init=False, repr=False)
146
+ _aliased_annotation_attributes: dict[str, Mapped[typing.Any]] = field(init=False, repr=False)
151
147
 
152
148
  def __bool__(self) -> bool:
153
149
  return bool(self.condition)
@@ -198,7 +194,7 @@ class SpanFilter:
198
194
  )
199
195
  )
200
196
 
201
- def to_dict(self) -> typing.Dict[str, typing.Any]:
197
+ def to_dict(self) -> dict[str, typing.Any]:
202
198
  return {"condition": self.condition}
203
199
 
204
200
  @classmethod
@@ -439,7 +435,7 @@ class _ProjectionTranslator(ast.NodeTransformer):
439
435
  class _FilterTranslator(_ProjectionTranslator):
440
436
  def visit_Compare(self, node: ast.Compare) -> typing.Any:
441
437
  if len(node.comparators) > 1:
442
- args: typing.List[typing.Any] = []
438
+ args: list[typing.Any] = []
443
439
  left = node.left
444
440
  for i, (op, comparator) in enumerate(zip(node.ops, node.comparators)):
445
441
  args.append(self.visit(ast.Compare(left=left, ops=[op], comparators=[comparator])))
@@ -540,7 +536,7 @@ class _FilterTranslator(_ProjectionTranslator):
540
536
  def _validate_expression(
541
537
  expression: ast.Expression,
542
538
  valid_eval_names: typing.Optional[typing.Sequence[str]] = None,
543
- valid_eval_attributes: typing.Tuple[str, ...] = _VALID_EVAL_ATTRIBUTES,
539
+ valid_eval_attributes: tuple[str, ...] = _VALID_EVAL_ATTRIBUTES,
544
540
  ) -> None:
545
541
  """
546
542
  Validate primarily the structural (i.e. not semantic) characteristics of an
@@ -638,7 +634,7 @@ def _validate_expression(
638
634
 
639
635
 
640
636
  def _as_attribute(
641
- keys: typing.List[ast.Constant],
637
+ keys: list[ast.Constant],
642
638
  # as_float: typing.Optional[bool] = None,
643
639
  ) -> ast.Subscript:
644
640
  return ast.Subscript(
@@ -675,14 +671,14 @@ def _is_subscript(
675
671
 
676
672
  def _get_attribute_keys_list(
677
673
  node: typing.Any,
678
- ) -> typing.Optional[typing.List[ast.Constant]]:
674
+ ) -> typing.Optional[list[ast.Constant]]:
679
675
  # e.g. `attributes["key"]` -> `["key"]`
680
676
  # e.g. `attributes["a"]["b.c"][["d"]]` -> `["a", "b.c", "d"]`
681
677
  # e.g. `attributes["a"][["b.c", "d"]]` -> `["a", "b.c", "d"]`
682
678
  # e.g. `metadata["key"]` -> `["metadata", "key"]`
683
679
  # e.g. `metadata["a"]["b.c"][["d"]]` -> `["metadata", "a", "b.c", "d"]`
684
680
  # e.g. `metadata["a"][["b.c", "d"]]` -> `["metadata", "a", "b.c", "d"]`
685
- keys: typing.List[ast.Constant] = []
681
+ keys: list[ast.Constant] = []
686
682
  if isinstance(node, ast.Attribute):
687
683
  while isinstance(node, ast.Attribute):
688
684
  keys.append(ast.Constant(value=node.attr, kind=None))
@@ -707,7 +703,7 @@ def _get_attribute_keys_list(
707
703
 
708
704
  def _get_subscript_keys_list(
709
705
  node: ast.Subscript,
710
- ) -> typing.Optional[typing.List[ast.Constant]]:
706
+ ) -> typing.Optional[list[ast.Constant]]:
711
707
  child = node.slice
712
708
  if isinstance(child, ast.Constant):
713
709
  if not isinstance(child.value, (str, int)) or isinstance(child.value, bool):
@@ -751,7 +747,7 @@ def _disjunction(choices: typing.Sequence[str]) -> str:
751
747
 
752
748
  def _find_best_match(
753
749
  source: str, choices: typing.Iterable[str]
754
- ) -> typing.Tuple[typing.Optional[str], float]:
750
+ ) -> tuple[typing.Optional[str], float]:
755
751
  best_choice, best_score = None, 0.0
756
752
  for choice in choices:
757
753
  score = SequenceMatcher(None, source, choice).ratio()
@@ -762,9 +758,9 @@ def _find_best_match(
762
758
 
763
759
  def _apply_eval_aliasing(
764
760
  source: str,
765
- ) -> typing.Tuple[
761
+ ) -> tuple[
766
762
  str,
767
- typing.Tuple[AliasedAnnotationRelation, ...],
763
+ tuple[AliasedAnnotationRelation, ...],
768
764
  ]:
769
765
  """
770
766
  Substitutes `evals[<eval-name>].<attribute>` with aliases. Returns the
@@ -784,7 +780,7 @@ def _apply_eval_aliasing(
784
780
  span_annotation_0_label_123 == 'correct' or span_annotation_0_score_456 < 0.5
785
781
  ```
786
782
  """
787
- eval_aliases: typing.Dict[AnnotationName, AliasedAnnotationRelation] = {}
783
+ eval_aliases: dict[AnnotationName, AliasedAnnotationRelation] = {}
788
784
  for (
789
785
  annotation_expression,
790
786
  annotation_type,
@@ -802,7 +798,7 @@ def _apply_eval_aliasing(
802
798
  def _parse_annotation_expressions_and_names(
803
799
  source: str,
804
800
  ) -> typing.Iterator[
805
- typing.Tuple[AnnotationExpression, AnnotationType, AnnotationName, AnnotationAttribute]
801
+ tuple[AnnotationExpression, AnnotationType, AnnotationName, AnnotationAttribute]
806
802
  ]:
807
803
  """
808
804
  Parses filter conditions for evaluation expressions of the form:
@@ -1,6 +1,6 @@
1
1
  import warnings
2
2
  from datetime import datetime
3
- from typing import List, Optional, Protocol, Tuple, Union, cast
3
+ from typing import Optional, Protocol, Union, cast
4
4
 
5
5
  import pandas as pd
6
6
  from openinference.semconv.trace import DocumentAttributes, SpanAttributes
@@ -31,7 +31,7 @@ class CanQuerySpans(Protocol):
31
31
  start_time: Optional[datetime] = None,
32
32
  end_time: Optional[datetime] = None,
33
33
  project_name: Optional[str] = None,
34
- ) -> Optional[Union[pd.DataFrame, List[pd.DataFrame]]]: ...
34
+ ) -> Optional[Union[pd.DataFrame, list[pd.DataFrame]]]: ...
35
35
 
36
36
 
37
37
  def get_retrieved_documents(
@@ -94,7 +94,7 @@ def get_qa_with_reference(
94
94
  .with_index("trace_id")
95
95
  )
96
96
  df_qa, df_docs = cast(
97
- Tuple[pd.DataFrame, pd.DataFrame],
97
+ tuple[pd.DataFrame, pd.DataFrame],
98
98
  obj.query_spans(
99
99
  qa_query,
100
100
  docs_query,
@@ -1,22 +1,13 @@
1
1
  import warnings
2
2
  from collections import defaultdict
3
+ from collections.abc import Iterable, Mapping, Sequence
3
4
  from dataclasses import dataclass, field, replace
4
5
  from datetime import datetime
5
6
  from functools import cached_property
6
7
  from itertools import chain
7
8
  from random import randint, random
8
9
  from types import MappingProxyType
9
- from typing import (
10
- Any,
11
- DefaultDict,
12
- Dict,
13
- Iterable,
14
- List,
15
- Mapping,
16
- Optional,
17
- Sequence,
18
- cast,
19
- )
10
+ from typing import Any, Optional, cast
20
11
 
21
12
  import pandas as pd
22
13
  from openinference.semconv.trace import SpanAttributes
@@ -83,7 +74,7 @@ class Projection(_Base):
83
74
  def __call__(self) -> SQLColumnExpression[Any]:
84
75
  return self._projector()
85
76
 
86
- def to_dict(self) -> Dict[str, Any]:
77
+ def to_dict(self) -> dict[str, Any]:
87
78
  return {"key": self.key}
88
79
 
89
80
  @classmethod
@@ -138,7 +129,7 @@ class Explosion(_HasTmpSuffix, Projection):
138
129
  object.__setattr__(self, "_array_tmp_col_label", f"__array_tmp_col_{random()}")
139
130
 
140
131
  @cached_property
141
- def index_keys(self) -> List[str]:
132
+ def index_keys(self) -> list[str]:
142
133
  return [self._primary_index.key, f"{self._position_prefix}position"]
143
134
 
144
135
  def with_primary_index_key(self, _: str) -> "Explosion":
@@ -215,7 +206,7 @@ class Explosion(_HasTmpSuffix, Projection):
215
206
  # Because sqlite doesn't support `WITH ORDINALITY`, the order of
216
207
  # the returned (table) values is not guaranteed. So we resort to
217
208
  # post hoc processing using pandas.
218
- def _extract_values(array: List[Any]) -> List[Dict[str, Any]]:
209
+ def _extract_values(array: list[Any]) -> list[dict[str, Any]]:
219
210
  if not isinstance(array, Iterable):
220
211
  return []
221
212
  if not self.kwargs:
@@ -227,11 +218,11 @@ class Explosion(_HasTmpSuffix, Projection):
227
218
  for i, obj in enumerate(array)
228
219
  if isinstance(obj, Mapping)
229
220
  ]
230
- res: List[Dict[str, Any]] = []
221
+ res: list[dict[str, Any]] = []
231
222
  for i, obj in enumerate(array):
232
223
  if not isinstance(obj, Mapping):
233
224
  continue
234
- values: Dict[str, Any] = {f"{self._position_prefix}position": i}
225
+ values: dict[str, Any] = {f"{self._position_prefix}position": i}
235
226
  for name, key in self.kwargs.items():
236
227
  if (value := get_attribute_value(obj, key)) is not None:
237
228
  values[name] = value
@@ -261,7 +252,7 @@ class Explosion(_HasTmpSuffix, Projection):
261
252
  df = df.set_index(self.index_keys)
262
253
  return df
263
254
 
264
- def to_dict(self) -> Dict[str, Any]:
255
+ def to_dict(self) -> dict[str, Any]:
265
256
  return {
266
257
  **super().to_dict(),
267
258
  **({"kwargs": dict(self.kwargs)} if self.kwargs else {}),
@@ -384,12 +375,12 @@ class Concatenation(_HasTmpSuffix, Projection):
384
375
  # Because SQLite doesn't support `WITH ORDINALITY`, the order of
385
376
  # the returned table-values is not guaranteed. So we resort to
386
377
  # post hoc processing using pandas.
387
- def _concat_values(array: List[Any]) -> Dict[str, Any]:
378
+ def _concat_values(array: list[Any]) -> dict[str, Any]:
388
379
  if not isinstance(array, Iterable):
389
380
  return {}
390
381
  if not self.kwargs:
391
382
  return {self.key: self.separator.join(str(obj) for obj in array)}
392
- values: DefaultDict[str, List[str]] = defaultdict(list)
383
+ values: defaultdict[str, list[str]] = defaultdict(list)
393
384
  for i, obj in enumerate(array):
394
385
  if not isinstance(obj, Mapping):
395
386
  continue
@@ -407,7 +398,7 @@ class Concatenation(_HasTmpSuffix, Projection):
407
398
  assert_never(dialect)
408
399
  return df
409
400
 
410
- def to_dict(self) -> Dict[str, Any]:
401
+ def to_dict(self) -> dict[str, Any]:
411
402
  return {
412
403
  **super().to_dict(),
413
404
  **({"kwargs": dict(self.kwargs)} if self.kwargs else {}),
@@ -619,7 +610,7 @@ class SpanQuery(_HasTmpSuffix):
619
610
  df = df.rename(self._rename, axis=1, errors="ignore")
620
611
  return df
621
612
 
622
- def to_dict(self) -> Dict[str, Any]:
613
+ def to_dict(self) -> dict[str, Any]:
623
614
  return {
624
615
  **(
625
616
  {"select": {name: proj.to_dict() for name, proj in self._select.items()}}
@@ -771,7 +762,7 @@ def _outer_join(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
771
762
  return df
772
763
 
773
764
 
774
- def _flatten_semantic_conventions(attributes: Mapping[str, Any]) -> Dict[str, Any]:
765
+ def _flatten_semantic_conventions(attributes: Mapping[str, Any]) -> dict[str, Any]:
775
766
  # This may be inefficient, but is needed to preserve backward-compatibility.
776
767
  # For example, custom attributes do not get flattened.
777
768
  ans = unflatten(