arize-phoenix 3.4.1__tar.gz → 3.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (177) hide show
  1. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/PKG-INFO +31 -31
  2. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/README.md +29 -29
  3. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/pyproject.toml +12 -14
  4. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/evals.py +9 -9
  5. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/model.py +18 -18
  6. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/model_schema.py +22 -44
  7. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/functions/executor.py +1 -2
  8. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/functions/processing.py +33 -1
  9. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/base.py +4 -8
  10. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/litellm.py +1 -1
  11. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/rate_limiters.py +1 -2
  12. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/__init__.py +2 -4
  13. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/binning.py +3 -6
  14. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/mixins.py +1 -0
  15. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/wrappers.py +1 -0
  16. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/pointcloud/pointcloud.py +2 -4
  17. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/SpanSort.py +1 -2
  18. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/interceptor.py +1 -2
  19. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/routers/trace_handler.py +1 -2
  20. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/schema.py +20 -3
  21. arize_phoenix-3.5.0/src/phoenix/server/api/types/Project.py +72 -0
  22. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Segments.py +2 -4
  23. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Span.py +18 -0
  24. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/app.py +4 -0
  25. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/main.py +35 -2
  26. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/index.js +534 -494
  27. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/templates/index.html +2 -1
  28. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/session/data_extractor.py +2 -4
  29. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/session/evaluation.py +1 -0
  30. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/dsl/filter.py +1 -2
  31. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/dsl/helpers.py +3 -2
  32. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/dsl/query.py +3 -7
  33. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/langchain/tracer.py +1 -0
  34. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/span_evaluations.py +1 -2
  35. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/span_json_encoder.py +13 -3
  36. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/tracer.py +2 -2
  37. arize_phoenix-3.5.0/src/phoenix/version.py +1 -0
  38. arize_phoenix-3.4.1/src/phoenix/version.py +0 -1
  39. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/.gitignore +0 -0
  40. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/IP_NOTICE +0 -0
  41. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/LICENSE +0 -0
  42. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/__init__.py +0 -0
  43. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/config.py +0 -0
  44. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/__init__.py +0 -0
  45. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/embedding_dimension.py +0 -0
  46. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/model_schema_adapter.py +0 -0
  47. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/core/traces.py +0 -0
  48. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/__init__.py +0 -0
  49. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/dataset.py +0 -0
  50. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/errors.py +0 -0
  51. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/fixtures.py +0 -0
  52. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/schema.py +0 -0
  53. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datasets/validation.py +0 -0
  54. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/datetime_utils.py +0 -0
  55. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/exceptions.py +0 -0
  56. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/__init__.py +0 -0
  57. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/__init__.py +0 -0
  58. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/evaluators.py +0 -0
  59. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/functions/__init__.py +0 -0
  60. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/functions/classify.py +0 -0
  61. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/functions/generate.py +0 -0
  62. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/__init__.py +0 -0
  63. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/anthropic.py +0 -0
  64. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/bedrock.py +0 -0
  65. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/openai.py +0 -0
  66. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/vertex.py +0 -0
  67. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/models/vertexai.py +0 -0
  68. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/retrievals.py +0 -0
  69. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/templates/__init__.py +0 -0
  70. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/templates/default_templates.py +0 -0
  71. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/templates/template.py +0 -0
  72. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/utils/__init__.py +0 -0
  73. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/experimental/evals/utils/threads.py +0 -0
  74. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/README.md +0 -0
  75. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/metrics.py +0 -0
  76. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/retrieval_metrics.py +0 -0
  77. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/metrics/timeseries.py +0 -0
  78. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/pointcloud/__init__.py +0 -0
  79. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/pointcloud/clustering.py +0 -0
  80. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/pointcloud/projectors.py +0 -0
  81. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/pointcloud/umap_parameters.py +0 -0
  82. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/py.typed +0 -0
  83. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/__init__.py +0 -0
  84. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/__init__.py +0 -0
  85. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/context.py +0 -0
  86. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/helpers.py +0 -0
  87. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/ClusterInput.py +0 -0
  88. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/Coordinates.py +0 -0
  89. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/DataQualityMetricInput.py +0 -0
  90. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/DimensionFilter.py +0 -0
  91. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/DimensionInput.py +0 -0
  92. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/Granularity.py +0 -0
  93. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/PerformanceMetricInput.py +0 -0
  94. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/TimeRange.py +0 -0
  95. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/input_types/__init__.py +0 -0
  96. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/routers/__init__.py +0 -0
  97. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/routers/evaluation_handler.py +0 -0
  98. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/routers/span_handler.py +0 -0
  99. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/routers/utils.py +0 -0
  100. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Cluster.py +0 -0
  101. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DataQualityMetric.py +0 -0
  102. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Dataset.py +0 -0
  103. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DatasetInfo.py +0 -0
  104. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DatasetRole.py +0 -0
  105. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DatasetValues.py +0 -0
  106. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Dimension.py +0 -0
  107. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DimensionDataType.py +0 -0
  108. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DimensionShape.py +0 -0
  109. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DimensionType.py +0 -0
  110. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DimensionWithValue.py +0 -0
  111. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DocumentEvaluationSummary.py +0 -0
  112. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/DocumentRetrievalMetrics.py +0 -0
  113. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/EmbeddingDimension.py +0 -0
  114. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/EmbeddingMetadata.py +0 -0
  115. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Evaluation.py +0 -0
  116. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/EvaluationSummary.py +0 -0
  117. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Event.py +0 -0
  118. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/EventMetadata.py +0 -0
  119. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/ExportEventsMutation.py +0 -0
  120. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/ExportedFile.py +0 -0
  121. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Functionality.py +0 -0
  122. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/MimeType.py +0 -0
  123. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Model.py +0 -0
  124. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/NumericRange.py +0 -0
  125. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/PerformanceMetric.py +0 -0
  126. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/PromptResponse.py +0 -0
  127. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/Retrieval.py +0 -0
  128. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/ScalarDriftMetricEnum.py +0 -0
  129. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/SortDir.py +0 -0
  130. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/TimeSeries.py +0 -0
  131. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/UMAPPoints.py +0 -0
  132. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/ValidationResult.py +0 -0
  133. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/VectorDriftMetricEnum.py +0 -0
  134. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/__init__.py +0 -0
  135. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/node.py +0 -0
  136. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/api/types/pagination.py +0 -0
  137. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-114x114.png +0 -0
  138. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-120x120.png +0 -0
  139. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-144x144.png +0 -0
  140. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-152x152.png +0 -0
  141. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-180x180.png +0 -0
  142. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-72x72.png +0 -0
  143. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon-76x76.png +0 -0
  144. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/apple-touch-icon.png +0 -0
  145. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/favicon.ico +0 -0
  146. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/index.css +0 -0
  147. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/static/modernizr.js +0 -0
  148. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/templates/__init__.py +0 -0
  149. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/server/thread_server.py +0 -0
  150. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/services.py +0 -0
  151. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/session/__init__.py +0 -0
  152. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/session/client.py +0 -0
  153. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/session/session.py +0 -0
  154. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/__init__.py +0 -0
  155. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/dsl/__init__.py +0 -0
  156. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/dsl/missing.py +0 -0
  157. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/errors.py +0 -0
  158. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/evaluation_conventions.py +0 -0
  159. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/exporter.py +0 -0
  160. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/fixtures.py +0 -0
  161. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/langchain/__init__.py +0 -0
  162. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/langchain/instrumentor.py +0 -0
  163. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/llama_index/__init__.py +0 -0
  164. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/llama_index/callback.py +0 -0
  165. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/openai/__init__.py +0 -0
  166. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/openai/instrumentor.py +0 -0
  167. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/otel.py +0 -0
  168. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/schemas.py +0 -0
  169. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/span_json_decoder.py +0 -0
  170. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/trace_dataset.py +0 -0
  171. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/utils.py +0 -0
  172. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/v1/__init__.py +0 -0
  173. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/v1/evaluation_pb2.py +0 -0
  174. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/trace/v1/evaluation_pb2.pyi +0 -0
  175. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/utilities/__init__.py +0 -0
  176. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/utilities/error_handling.py +0 -0
  177. {arize_phoenix-3.4.1 → arize_phoenix-3.5.0}/src/phoenix/utilities/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 3.4.1
3
+ Version: 3.5.0
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -59,7 +59,7 @@ Requires-Dist: pytest-asyncio; extra == 'dev'
59
59
  Requires-Dist: pytest-cov; extra == 'dev'
60
60
  Requires-Dist: pytest-lazy-fixture; extra == 'dev'
61
61
  Requires-Dist: pytest==7.4.4; extra == 'dev'
62
- Requires-Dist: ruff==0.1.5; extra == 'dev'
62
+ Requires-Dist: ruff==0.3.0; extra == 'dev'
63
63
  Requires-Dist: strawberry-graphql[debug-server]==0.208.2; extra == 'dev'
64
64
  Provides-Extra: evals
65
65
  Requires-Dist: arize-phoenix-evals>=0.0.3; extra == 'evals'
@@ -134,14 +134,11 @@ Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config o
134
134
  Install Phoenix via `pip` or or `conda` as well as any of its subpackages.
135
135
 
136
136
  ```shell
137
- pip install arize-phoenix
137
+ pip install arize-phoenix[evals]
138
138
  ```
139
139
 
140
- Some functionality such as LLM evals are under the `experimental` subpackage.
141
-
142
- ```shell
143
- pip install arize-phoenix[experimental]
144
- ```
140
+ > [!NOTE]
141
+ > The above will install Phoenix and its `evals` subpackage. To just install phoenix's evaluation package, you can run `pip install arize-phoenix-evals` instead.
145
142
 
146
143
  ## LLM Traces
147
144
 
@@ -159,39 +156,44 @@ To extract traces from your LlamaIndex application, you will have to add Phoenix
159
156
 
160
157
  ```shell
161
158
  # Install phoenix as well as llama_index and your LLM of choice
162
- pip install arize-phoenix llama-index openai
163
-
159
+ pip install "arize-phoenix[evals]" "openai>=1" "llama-index>=0.10.3" "openinference-instrumentation-llama-index>=1.0.0" "llama-index-callbacks-arize-phoenix>=0.1.2" llama-index-llms-openai
164
160
  ```
165
161
 
166
162
  Launch Phoenix in a notebook and view the traces of your LlamaIndex application in the Phoenix UI.
167
163
 
168
164
  ```python
165
+ import os
169
166
  import phoenix as px
167
+ from llama_index.core import (
168
+ Settings,
169
+ VectorStoreIndex,
170
+ SimpleDirectoryReader,
171
+ set_global_handler,
172
+ )
173
+ from llama_index.embeddings.openai import OpenAIEmbedding
174
+ from llama_index.llms.openai import OpenAI
175
+
176
+ os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
170
177
 
171
178
  # To view traces in Phoenix, you will first have to start a Phoenix server. You can do this by running the following:
172
179
  session = px.launch_app()
173
180
 
174
181
 
175
- # Once you have started a Phoenix server, you can start your LlamaIndex application with the `OpenInferenceTraceCallback` as a callback. To do this, you will have to add the callback to the initialization of your LlamaIndex application:
182
+ # Once you have started a Phoenix server, you can start your LlamaIndex application and configure it to send traces to Phoenix. To do this, you will have to add configure Phoenix as the global handler
176
183
 
177
- from phoenix.trace.llama_index import (
178
- OpenInferenceTraceCallbackHandler,
179
- )
184
+ set_global_handler("arize_phoenix")
180
185
 
181
- # Initialize the callback handler
182
- callback_handler = OpenInferenceTraceCallbackHandler()
183
186
 
184
187
  # LlamaIndex application initialization may vary
185
188
  # depending on your application
186
- service_context = ServiceContext.from_defaults(
187
- llm_predictor=LLMPredictor(llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)),
188
- embed_model=OpenAIEmbedding(model="text-embedding-ada-002"),
189
- callback_manager=CallbackManager(handlers=[callback_handler]),
190
- )
191
- index = load_index_from_storage(
192
- storage_context,
193
- service_context=service_context,
194
- )
189
+ Settings.llm = OpenAI(model="gpt-4-turbo-preview")
190
+ Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
191
+
192
+
193
+ # Load your data and create an index. Note you usually want to store your index in a persistent store like a database or the file system
194
+ documents = SimpleDirectoryReader("YOUR_DATA_DIRECTORY").load_data()
195
+ index = VectorStoreIndex.from_documents(documents)
196
+
195
197
  query_engine = index.as_query_engine()
196
198
 
197
199
  # Query your LlamaIndex application
@@ -267,8 +269,6 @@ session.url
267
269
 
268
270
  ## LLM Evals
269
271
 
270
- 🚧 LLM Evals is still under construction under a sub-module `arize-phoenix[experimental]`
271
-
272
272
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/evals/evaluate_relevance_classifications.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/evals/evaluate_relevance_classifications.ipynb)
273
273
 
274
274
  Phoenix provides tooling to evaluate LLM applications, including tools to determine the relevance or irrelevance of documents retrieved by retrieval-augmented generation (RAG) application, whether or not the response is toxic, and much more.
@@ -283,12 +283,12 @@ Phoenix's approach to LLM evals is notable for the following reasons:
283
283
  Here is an example of running the RAG relevance eval on a dataset of Wikipedia questions and answers:
284
284
 
285
285
  ```shell
286
- # Install phoenix as well as the experimental subpackage
287
- pip install arize-phoenix[experimental] ipython matplotlib openai pycm scikit-learn
286
+ # Install phoenix as well as the evals subpackage
287
+ pip install 'arize-phoenix[evals]' ipython matplotlib openai pycm scikit-learn
288
288
  ```
289
289
 
290
290
  ```python
291
- from phoenix.experimental.evals import (
291
+ from phoenix.evals import (
292
292
  RAG_RELEVANCY_PROMPT_TEMPLATE,
293
293
  RAG_RELEVANCY_PROMPT_RAILS_MAP,
294
294
  OpenAIModel,
@@ -324,7 +324,7 @@ y_pred = df["eval_relevance"]
324
324
  precision, recall, f1, support = precision_recall_fscore_support(y_true, y_pred)
325
325
  ```
326
326
 
327
- To learn more about LLM Evals, see the [LLM Evals documentation](https://docs.arize.com/phoenix/concepts/llm-evals/).
327
+ To learn more about LLM Evals, see the [Evals documentation](https://docs.arize.com/phoenix/concepts/llm-evals/).
328
328
 
329
329
  ## Embedding Analysis
330
330
 
@@ -61,14 +61,11 @@ Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config o
61
61
  Install Phoenix via `pip` or or `conda` as well as any of its subpackages.
62
62
 
63
63
  ```shell
64
- pip install arize-phoenix
64
+ pip install arize-phoenix[evals]
65
65
  ```
66
66
 
67
- Some functionality such as LLM evals are under the `experimental` subpackage.
68
-
69
- ```shell
70
- pip install arize-phoenix[experimental]
71
- ```
67
+ > [!NOTE]
68
+ > The above will install Phoenix and its `evals` subpackage. To just install phoenix's evaluation package, you can run `pip install arize-phoenix-evals` instead.
72
69
 
73
70
  ## LLM Traces
74
71
 
@@ -86,39 +83,44 @@ To extract traces from your LlamaIndex application, you will have to add Phoenix
86
83
 
87
84
  ```shell
88
85
  # Install phoenix as well as llama_index and your LLM of choice
89
- pip install arize-phoenix llama-index openai
90
-
86
+ pip install "arize-phoenix[evals]" "openai>=1" "llama-index>=0.10.3" "openinference-instrumentation-llama-index>=1.0.0" "llama-index-callbacks-arize-phoenix>=0.1.2" llama-index-llms-openai
91
87
  ```
92
88
 
93
89
  Launch Phoenix in a notebook and view the traces of your LlamaIndex application in the Phoenix UI.
94
90
 
95
91
  ```python
92
+ import os
96
93
  import phoenix as px
94
+ from llama_index.core import (
95
+ Settings,
96
+ VectorStoreIndex,
97
+ SimpleDirectoryReader,
98
+ set_global_handler,
99
+ )
100
+ from llama_index.embeddings.openai import OpenAIEmbedding
101
+ from llama_index.llms.openai import OpenAI
102
+
103
+ os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
97
104
 
98
105
  # To view traces in Phoenix, you will first have to start a Phoenix server. You can do this by running the following:
99
106
  session = px.launch_app()
100
107
 
101
108
 
102
- # Once you have started a Phoenix server, you can start your LlamaIndex application with the `OpenInferenceTraceCallback` as a callback. To do this, you will have to add the callback to the initialization of your LlamaIndex application:
109
+ # Once you have started a Phoenix server, you can start your LlamaIndex application and configure it to send traces to Phoenix. To do this, you will have to add configure Phoenix as the global handler
103
110
 
104
- from phoenix.trace.llama_index import (
105
- OpenInferenceTraceCallbackHandler,
106
- )
111
+ set_global_handler("arize_phoenix")
107
112
 
108
- # Initialize the callback handler
109
- callback_handler = OpenInferenceTraceCallbackHandler()
110
113
 
111
114
  # LlamaIndex application initialization may vary
112
115
  # depending on your application
113
- service_context = ServiceContext.from_defaults(
114
- llm_predictor=LLMPredictor(llm=ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)),
115
- embed_model=OpenAIEmbedding(model="text-embedding-ada-002"),
116
- callback_manager=CallbackManager(handlers=[callback_handler]),
117
- )
118
- index = load_index_from_storage(
119
- storage_context,
120
- service_context=service_context,
121
- )
116
+ Settings.llm = OpenAI(model="gpt-4-turbo-preview")
117
+ Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
118
+
119
+
120
+ # Load your data and create an index. Note you usually want to store your index in a persistent store like a database or the file system
121
+ documents = SimpleDirectoryReader("YOUR_DATA_DIRECTORY").load_data()
122
+ index = VectorStoreIndex.from_documents(documents)
123
+
122
124
  query_engine = index.as_query_engine()
123
125
 
124
126
  # Query your LlamaIndex application
@@ -194,8 +196,6 @@ session.url
194
196
 
195
197
  ## LLM Evals
196
198
 
197
- 🚧 LLM Evals is still under construction under a sub-module `arize-phoenix[experimental]`
198
-
199
199
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/evals/evaluate_relevance_classifications.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/evals/evaluate_relevance_classifications.ipynb)
200
200
 
201
201
  Phoenix provides tooling to evaluate LLM applications, including tools to determine the relevance or irrelevance of documents retrieved by retrieval-augmented generation (RAG) application, whether or not the response is toxic, and much more.
@@ -210,12 +210,12 @@ Phoenix's approach to LLM evals is notable for the following reasons:
210
210
  Here is an example of running the RAG relevance eval on a dataset of Wikipedia questions and answers:
211
211
 
212
212
  ```shell
213
- # Install phoenix as well as the experimental subpackage
214
- pip install arize-phoenix[experimental] ipython matplotlib openai pycm scikit-learn
213
+ # Install phoenix as well as the evals subpackage
214
+ pip install 'arize-phoenix[evals]' ipython matplotlib openai pycm scikit-learn
215
215
  ```
216
216
 
217
217
  ```python
218
- from phoenix.experimental.evals import (
218
+ from phoenix.evals import (
219
219
  RAG_RELEVANCY_PROMPT_TEMPLATE,
220
220
  RAG_RELEVANCY_PROMPT_RAILS_MAP,
221
221
  OpenAIModel,
@@ -251,7 +251,7 @@ y_pred = df["eval_relevance"]
251
251
  precision, recall, f1, support = precision_recall_fscore_support(y_true, y_pred)
252
252
  ```
253
253
 
254
- To learn more about LLM Evals, see the [LLM Evals documentation](https://docs.arize.com/phoenix/concepts/llm-evals/).
254
+ To learn more about LLM Evals, see the [Evals documentation](https://docs.arize.com/phoenix/concepts/llm-evals/).
255
255
 
256
256
  ## Embedding Analysis
257
257
 
@@ -56,7 +56,7 @@ dev = [
56
56
  "hatch",
57
57
  "jupyter",
58
58
  "nbqa",
59
- "ruff==0.1.5",
59
+ "ruff==0.3.0",
60
60
  "pandas-stubs<=2.0.2.230605", # version 2.0.3.230814 is causing a dependency conflict.
61
61
  "pytest==7.4.4",
62
62
  "pytest-asyncio",
@@ -115,14 +115,9 @@ dependencies = [
115
115
  "pytest-cov",
116
116
  "pytest-lazy-fixture",
117
117
  "arize",
118
- "langchain>=0.0.334",
119
118
  "litellm>=1.0.3",
120
- "llama-index>=0.10.3",
121
119
  "openai>=1.0.0",
122
120
  "tenacity",
123
- "nltk==3.8.1",
124
- "sentence-transformers==2.2.2",
125
- "pydantic<2", # for @root_validator in llama-index
126
121
  "requests",
127
122
  "protobuf==3.20", # version minimum (for tests)
128
123
  "responses",
@@ -136,13 +131,13 @@ dependencies = [
136
131
  [tool.hatch.envs.type]
137
132
  dependencies = [
138
133
  "mypy==1.5.1",
139
- "pydantic==v1.10.14", # for mypy
140
- "llama-index<0.10.0",
134
+ "tenacity",
141
135
  "pandas-stubs<=2.0.2.230605", # version 2.0.3.230814 is causing a dependency conflict.
142
136
  "types-psutil",
143
137
  "types-tqdm",
144
138
  "types-requests",
145
139
  "types-protobuf",
140
+ "types-setuptools",
146
141
  "openai>=1.0.0",
147
142
  "litellm>=1.0.3",
148
143
  ]
@@ -150,7 +145,7 @@ dependencies = [
150
145
  [tool.hatch.envs.style]
151
146
  detached = true
152
147
  dependencies = [
153
- "ruff~=0.1.5",
148
+ "ruff==0.3.0",
154
149
  ]
155
150
 
156
151
  [tool.hatch.envs.notebooks]
@@ -207,12 +202,12 @@ check = [
207
202
 
208
203
  [tool.hatch.envs.style.scripts]
209
204
  check = [
210
- "ruff .",
211
205
  "ruff format --check --diff .",
206
+ "ruff check .",
212
207
  ]
213
208
  fix = [
214
209
  "ruff format .",
215
- "ruff --fix .",
210
+ "ruff check --fix .",
216
211
  ]
217
212
 
218
213
  [tool.hatch.envs.notebooks.scripts]
@@ -315,6 +310,7 @@ ignore_missing_imports = true
315
310
  exclude = [
316
311
  "packages",
317
312
  "src/phoenix/evals/",
313
+ "dist/",
318
314
  ".git",
319
315
  "__pycache__",
320
316
  "docs/source/conf.py",
@@ -322,13 +318,15 @@ exclude = [
322
318
  "*.pyi",
323
319
  ]
324
320
  extend-include = ["*.ipynb"]
325
- ignore-init-module-imports = true
326
321
  line-length = 100
327
- select = ["E", "F", "W", "I"]
328
322
  target-version = "py38"
329
323
 
330
324
  [tool.ruff.lint.per-file-ignores]
331
325
  "*.ipynb" = ["E402", "E501"]
332
326
 
333
- [tool.ruff.isort]
327
+ [tool.ruff.lint]
328
+ ignore-init-module-imports = true
329
+ select = ["E", "F", "W", "I"]
330
+
331
+ [tool.ruff.lint.isort]
334
332
  force-single-line = false
@@ -33,15 +33,15 @@ class Evals:
33
33
  self._trace_evaluations_by_name: DefaultDict[
34
34
  EvaluationName, Dict[TraceID, pb.Evaluation]
35
35
  ] = defaultdict(dict)
36
- self._evaluations_by_trace_id: DefaultDict[
37
- TraceID, Dict[EvaluationName, pb.Evaluation]
38
- ] = defaultdict(dict)
39
- self._span_evaluations_by_name: DefaultDict[
40
- EvaluationName, Dict[SpanID, pb.Evaluation]
41
- ] = defaultdict(dict)
42
- self._evaluations_by_span_id: DefaultDict[
43
- SpanID, Dict[EvaluationName, pb.Evaluation]
44
- ] = defaultdict(dict)
36
+ self._evaluations_by_trace_id: DefaultDict[TraceID, Dict[EvaluationName, pb.Evaluation]] = (
37
+ defaultdict(dict)
38
+ )
39
+ self._span_evaluations_by_name: DefaultDict[EvaluationName, Dict[SpanID, pb.Evaluation]] = (
40
+ defaultdict(dict)
41
+ )
42
+ self._evaluations_by_span_id: DefaultDict[SpanID, Dict[EvaluationName, pb.Evaluation]] = (
43
+ defaultdict(dict)
44
+ )
45
45
  self._span_evaluation_labels: DefaultDict[EvaluationName, Set[str]] = defaultdict(set)
46
46
  self._document_evaluations_by_span_id: DefaultDict[
47
47
  SpanID, DefaultDict[EvaluationName, Dict[DocumentPosition, pb.Evaluation]]
@@ -12,36 +12,36 @@ def _get_embedding_dimensions(
12
12
  embedding_dimensions: List[EmbeddingDimension] = []
13
13
  embedding_features: EmbeddingFeatures = {}
14
14
 
15
- primary_embedding_features: Optional[
16
- EmbeddingFeatures
17
- ] = primary_dataset.schema.embedding_feature_column_names
15
+ primary_embedding_features: Optional[EmbeddingFeatures] = (
16
+ primary_dataset.schema.embedding_feature_column_names
17
+ )
18
18
  if primary_embedding_features is not None:
19
19
  embedding_features.update(primary_embedding_features)
20
- primary_prompt_column_names: Optional[
21
- EmbeddingColumnNames
22
- ] = primary_dataset.schema.prompt_column_names
20
+ primary_prompt_column_names: Optional[EmbeddingColumnNames] = (
21
+ primary_dataset.schema.prompt_column_names
22
+ )
23
23
  if primary_prompt_column_names is not None:
24
24
  embedding_features.update({"prompt": primary_prompt_column_names})
25
- primary_response_column_names: Optional[
26
- Union[str, EmbeddingColumnNames]
27
- ] = primary_dataset.schema.response_column_names
25
+ primary_response_column_names: Optional[Union[str, EmbeddingColumnNames]] = (
26
+ primary_dataset.schema.response_column_names
27
+ )
28
28
  if isinstance(primary_response_column_names, EmbeddingColumnNames):
29
29
  embedding_features.update({"response": primary_response_column_names})
30
30
 
31
31
  if reference_dataset is not None:
32
- reference_embedding_features: Optional[
33
- EmbeddingFeatures
34
- ] = reference_dataset.schema.embedding_feature_column_names
32
+ reference_embedding_features: Optional[EmbeddingFeatures] = (
33
+ reference_dataset.schema.embedding_feature_column_names
34
+ )
35
35
  if reference_embedding_features is not None:
36
36
  embedding_features.update(reference_embedding_features)
37
- reference_prompt_column_names: Optional[
38
- EmbeddingColumnNames
39
- ] = reference_dataset.schema.prompt_column_names
37
+ reference_prompt_column_names: Optional[EmbeddingColumnNames] = (
38
+ reference_dataset.schema.prompt_column_names
39
+ )
40
40
  if reference_prompt_column_names is not None:
41
41
  embedding_features.update({"prompt": reference_prompt_column_names})
42
- reference_response_column_names: Optional[
43
- Union[str, EmbeddingColumnNames]
44
- ] = reference_dataset.schema.response_column_names
42
+ reference_response_column_names: Optional[Union[str, EmbeddingColumnNames]] = (
43
+ reference_dataset.schema.response_column_names
44
+ )
45
45
  if isinstance(reference_response_column_names, EmbeddingColumnNames):
46
46
  embedding_features.update({"response": reference_response_column_names})
47
47
 
@@ -52,8 +52,7 @@ from phoenix.config import GENERATED_DATASET_NAME_PREFIX
52
52
  from phoenix.datetime_utils import floor_to_minute
53
53
 
54
54
 
55
- class DimensionRole(IntEnum):
56
- ...
55
+ class DimensionRole(IntEnum): ...
57
56
 
58
57
 
59
58
  @unique
@@ -151,8 +150,7 @@ class CompositeDimensionSpec(SchemaSpec, ABC):
151
150
  ...
152
151
 
153
152
  @abstractmethod
154
- def __iter__(self) -> Iterator[str]:
155
- ...
153
+ def __iter__(self) -> Iterator[str]: ...
156
154
 
157
155
 
158
156
  @dataclass(frozen=True)
@@ -311,12 +309,10 @@ class Column:
311
309
  object.__setattr__(self, "name", _rand_str())
312
310
 
313
311
  @overload
314
- def __call__(self, data: pd.DataFrame) -> "pd.Series[Any]":
315
- ...
312
+ def __call__(self, data: pd.DataFrame) -> "pd.Series[Any]": ...
316
313
 
317
314
  @overload
318
- def __call__(self, data: "pd.Series[Any]") -> Any:
319
- ...
315
+ def __call__(self, data: "pd.Series[Any]") -> Any: ...
320
316
 
321
317
  def __call__(self, data: DataFrameOrSeries) -> Any:
322
318
  """Extracts a value from series, or a series from a dataframe. If
@@ -569,8 +565,7 @@ class ModelData(ObjectProxy, ABC): # type: ignore
569
565
 
570
566
  @property
571
567
  @abstractmethod
572
- def null_value(self) -> Any:
573
- ...
568
+ def null_value(self) -> Any: ...
574
569
 
575
570
  def __getitem__(self, key: Any) -> Any:
576
571
  if _is_column_key(key):
@@ -614,12 +609,10 @@ class Event(ModelData):
614
609
  return np.nan
615
610
 
616
611
  @overload
617
- def __getitem__(self, key: ColumnKey) -> Any:
618
- ...
612
+ def __getitem__(self, key: ColumnKey) -> Any: ...
619
613
 
620
614
  @overload
621
- def __getitem__(self, key: Any) -> Any:
622
- ...
615
+ def __getitem__(self, key: Any) -> Any: ...
623
616
 
624
617
  def __getitem__(self, key: Any) -> Any:
625
618
  return super().__getitem__(key)
@@ -668,12 +661,10 @@ class Events(ModelData):
668
661
  )
669
662
 
670
663
  @overload
671
- def __getitem__(self, key: ColumnKey) -> "pd.Series[Any]":
672
- ...
664
+ def __getitem__(self, key: ColumnKey) -> "pd.Series[Any]": ...
673
665
 
674
666
  @overload
675
- def __getitem__(self, key: List[RowId]) -> "Events":
676
- ...
667
+ def __getitem__(self, key: List[RowId]) -> "Events": ...
677
668
 
678
669
  def __getitem__(self, key: Any) -> Any:
679
670
  if isinstance(key, list):
@@ -728,12 +719,10 @@ class Dataset(Events):
728
719
  return pd.Index(self[PREDICTION_ID])
729
720
 
730
721
  @overload
731
- def __getitem__(self, key: ColumnKey) -> "pd.Series[Any]":
732
- ...
722
+ def __getitem__(self, key: ColumnKey) -> "pd.Series[Any]": ...
733
723
 
734
724
  @overload
735
- def __getitem__(self, key: List[RowId]) -> Events:
736
- ...
725
+ def __getitem__(self, key: List[RowId]) -> Events: ...
737
726
 
738
727
  def __getitem__(self, key: Any) -> Any:
739
728
  if isinstance(key, list):
@@ -1030,32 +1019,25 @@ class Model:
1030
1019
  return ans
1031
1020
 
1032
1021
  @overload
1033
- def __getitem__(self, key: Type[Dataset]) -> Iterator[Dataset]:
1034
- ...
1022
+ def __getitem__(self, key: Type[Dataset]) -> Iterator[Dataset]: ...
1035
1023
 
1036
1024
  @overload
1037
- def __getitem__(self, key: DatasetRole) -> Dataset:
1038
- ...
1025
+ def __getitem__(self, key: DatasetRole) -> Dataset: ...
1039
1026
 
1040
1027
  @overload
1041
- def __getitem__(self, key: ColumnKey) -> Dimension:
1042
- ...
1028
+ def __getitem__(self, key: ColumnKey) -> Dimension: ...
1043
1029
 
1044
1030
  @overload
1045
- def __getitem__(self, key: MultiDimensionKey) -> Iterator[Dimension]:
1046
- ...
1031
+ def __getitem__(self, key: MultiDimensionKey) -> Iterator[Dimension]: ...
1047
1032
 
1048
1033
  @overload
1049
- def __getitem__(self, key: Type[ScalarDimension]) -> Iterator[ScalarDimension]:
1050
- ...
1034
+ def __getitem__(self, key: Type[ScalarDimension]) -> Iterator[ScalarDimension]: ...
1051
1035
 
1052
1036
  @overload
1053
- def __getitem__(self, key: Type[EmbeddingDimension]) -> Iterator[EmbeddingDimension]:
1054
- ...
1037
+ def __getitem__(self, key: Type[EmbeddingDimension]) -> Iterator[EmbeddingDimension]: ...
1055
1038
 
1056
1039
  @overload
1057
- def __getitem__(self, key: Type[Dimension]) -> Iterator[Dimension]:
1058
- ...
1040
+ def __getitem__(self, key: Type[Dimension]) -> Iterator[Dimension]: ...
1059
1041
 
1060
1042
  @overload
1061
1043
  def __getitem__(
@@ -1064,8 +1046,7 @@ class Model:
1064
1046
  MultiDimensionKey,
1065
1047
  Union[Type[ScalarDimension], Type[EmbeddingDimension]],
1066
1048
  ],
1067
- ) -> Iterator[Dimension]:
1068
- ...
1049
+ ) -> Iterator[Dimension]: ...
1069
1050
 
1070
1051
  def __getitem__(self, key: Any) -> Any:
1071
1052
  if key is Dataset:
@@ -1124,8 +1105,7 @@ class Model:
1124
1105
  obj: DimensionRole,
1125
1106
  cls: Type[Dimension] = ScalarDimension,
1126
1107
  **kwargs: Any,
1127
- ) -> Dimension:
1128
- ...
1108
+ ) -> Dimension: ...
1129
1109
 
1130
1110
  @overload
1131
1111
  def _new_dimension(
@@ -1133,16 +1113,14 @@ class Model:
1133
1113
  obj: Name,
1134
1114
  cls: Type[Dimension] = ScalarDimension,
1135
1115
  **kwargs: Any,
1136
- ) -> Dimension:
1137
- ...
1116
+ ) -> Dimension: ...
1138
1117
 
1139
1118
  @overload
1140
1119
  def _new_dimension(
1141
1120
  self,
1142
1121
  obj: Dimension,
1143
1122
  **kwargs: Any,
1144
- ) -> Dimension:
1145
- ...
1123
+ ) -> Dimension: ...
1146
1124
 
1147
1125
  def _new_dimension(
1148
1126
  self, obj: Any, cls: Type[Dimension] = ScalarDimension, **kwargs: Any
@@ -21,8 +21,7 @@ _unset = Unset()
21
21
 
22
22
 
23
23
  class Executor(Protocol):
24
- def run(self, inputs: Sequence[Any]) -> List[Any]:
25
- ...
24
+ def run(self, inputs: Sequence[Any]) -> List[Any]: ...
26
25
 
27
26
 
28
27
  class AsyncExecutor(Executor):
@@ -1,7 +1,22 @@
1
- from typing import List
1
+ """
2
+ Token processing functions for supported models. This module is being deprecated.
3
+ """
4
+
5
+ import logging
6
+ import sys
7
+ from typing import Any, List
2
8
 
3
9
  from ..models import BaseEvalModel
4
10
 
11
+ logger = logging.getLogger(__name__)
12
+
13
+ _DEPRECATION_WARNING = (
14
+ "The processing module is being deprecated. For advanced token processing, please use the "
15
+ "encoding approach recommended by the model provider. For example, OpenAI models can use the "
16
+ "`tiktoken` library to encode and decode text. For other models, please refer to the model "
17
+ "provider's documentation."
18
+ )
19
+
5
20
 
6
21
  def truncate_text_by_model(model: BaseEvalModel, text: str, token_buffer: int = 0) -> str:
7
22
  """Truncates text using a give model token limit.
@@ -42,3 +57,20 @@ def concatenate_and_truncate_chunks(
42
57
  str: _description_
43
58
  """
44
59
  return truncate_text_by_model(model=model, text=" ".join(chunks), token_buffer=token_buffer)
60
+
61
+
62
+ class _DEPRECATED_MODULE:
63
+ __all__ = ("truncate_text_by_model", "concatenate_and_truncate_chunks")
64
+
65
+ def __getattr__(self, name: str) -> Any:
66
+ if name == "truncate_text_by_model":
67
+ logger.warning(_DEPRECATION_WARNING)
68
+ return truncate_text_by_model
69
+ if name == "concatenate_and_truncate_chunks":
70
+ logger.warning(_DEPRECATION_WARNING)
71
+ return concatenate_and_truncate_chunks
72
+ raise AttributeError(f"module {__name__} has no attribute {name}")
73
+
74
+
75
+ # See e.g. https://stackoverflow.com/a/7668273
76
+ sys.modules[__name__] = _DEPRECATED_MODULE() # type: ignore
@@ -158,17 +158,13 @@ class BaseEvalModel(ABC):
158
158
  raise ImportError(msg)
159
159
 
160
160
  @abstractmethod
161
- def get_tokens_from_text(self, text: str) -> List[int]:
162
- ...
161
+ def get_tokens_from_text(self, text: str) -> List[int]: ...
163
162
 
164
163
  @abstractmethod
165
- def get_text_from_tokens(self, tokens: List[int]) -> str:
166
- ...
164
+ def get_text_from_tokens(self, tokens: List[int]) -> str: ...
167
165
 
168
166
  @abstractproperty
169
- def max_context_size(self) -> int:
170
- ...
167
+ def max_context_size(self) -> int: ...
171
168
 
172
169
  @abstractproperty
173
- def encoder(self) -> "Encoding":
174
- ...
170
+ def encoder(self) -> "Encoding": ...
@@ -67,7 +67,7 @@ class LiteLLMModel(BaseEvalModel):
67
67
  self._litellm = litellm
68
68
  env_info = validate_environment(self._litellm.utils.get_llm_provider(self.model))
69
69
 
70
- if not env_info["keys_in_environment"]:
70
+ if not env_info["keys_in_environment"] and env_info["missing_keys"]:
71
71
  raise RuntimeError(
72
72
  f"Missing environment variable(s): '{str(env_info['missing_keys'])}', for "
73
73
  f"model: {self.model}. \nFor additional information about the right "