arize-phoenix 3.0.2__tar.gz → 3.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (180) hide show
  1. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/PKG-INFO +5 -3
  2. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/pyproject.toml +5 -3
  3. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/traces.py +14 -9
  4. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/functions/classify.py +5 -1
  5. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/litellm.py +30 -9
  6. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/openai.py +36 -16
  7. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/vertexai.py +49 -7
  8. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/utils/__init__.py +1 -1
  9. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/SpanSort.py +4 -4
  10. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Span.py +13 -14
  11. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/session/session.py +4 -1
  12. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/dsl/filter.py +40 -7
  13. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/dsl/helpers.py +7 -7
  14. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/dsl/query.py +3 -1
  15. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/errors.py +4 -0
  16. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/fixtures.py +0 -2
  17. arize_phoenix-3.1.0/src/phoenix/trace/llama_index/__init__.py +3 -0
  18. arize_phoenix-3.1.0/src/phoenix/trace/llama_index/callback.py +77 -0
  19. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/otel.py +52 -14
  20. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/schemas.py +4 -6
  21. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/span_json_decoder.py +6 -5
  22. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/span_json_encoder.py +1 -6
  23. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/trace_dataset.py +24 -14
  24. arize_phoenix-3.1.0/src/phoenix/version.py +1 -0
  25. arize_phoenix-3.0.2/src/phoenix/trace/llama_index/__init__.py +0 -4
  26. arize_phoenix-3.0.2/src/phoenix/trace/llama_index/callback.py +0 -42
  27. arize_phoenix-3.0.2/src/phoenix/trace/llama_index/debug_callback.py +0 -50
  28. arize_phoenix-3.0.2/src/phoenix/trace/semantic_conventions.py +0 -172
  29. arize_phoenix-3.0.2/src/phoenix/version.py +0 -1
  30. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/.gitignore +0 -0
  31. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/IP_NOTICE +0 -0
  32. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/LICENSE +0 -0
  33. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/README.md +0 -0
  34. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/__init__.py +0 -0
  35. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/config.py +0 -0
  36. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/__init__.py +0 -0
  37. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/embedding_dimension.py +0 -0
  38. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/evals.py +0 -0
  39. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/model.py +0 -0
  40. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/model_schema.py +0 -0
  41. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/core/model_schema_adapter.py +0 -0
  42. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/__init__.py +0 -0
  43. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/dataset.py +0 -0
  44. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/errors.py +0 -0
  45. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/fixtures.py +0 -0
  46. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/schema.py +0 -0
  47. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datasets/validation.py +0 -0
  48. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/datetime_utils.py +0 -0
  49. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/exceptions.py +0 -0
  50. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/__init__.py +0 -0
  51. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/__init__.py +0 -0
  52. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/evaluators.py +0 -0
  53. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/functions/__init__.py +0 -0
  54. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/functions/executor.py +0 -0
  55. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/functions/generate.py +0 -0
  56. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/functions/processing.py +0 -0
  57. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/__init__.py +0 -0
  58. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/anthropic.py +0 -0
  59. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/base.py +0 -0
  60. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/bedrock.py +0 -0
  61. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/rate_limiters.py +0 -0
  62. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/models/vertex.py +0 -0
  63. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/retrievals.py +0 -0
  64. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/templates/__init__.py +0 -0
  65. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/templates/default_templates.py +0 -0
  66. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/templates/template.py +0 -0
  67. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/experimental/evals/utils/threads.py +0 -0
  68. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/README.md +0 -0
  69. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/__init__.py +0 -0
  70. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/binning.py +0 -0
  71. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/metrics.py +0 -0
  72. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/mixins.py +0 -0
  73. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/retrieval_metrics.py +0 -0
  74. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/timeseries.py +0 -0
  75. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/metrics/wrappers.py +0 -0
  76. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/pointcloud/__init__.py +0 -0
  77. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/pointcloud/clustering.py +0 -0
  78. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/pointcloud/pointcloud.py +0 -0
  79. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/pointcloud/projectors.py +0 -0
  80. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/pointcloud/umap_parameters.py +0 -0
  81. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/py.typed +0 -0
  82. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/__init__.py +0 -0
  83. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/__init__.py +0 -0
  84. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/context.py +0 -0
  85. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/helpers.py +0 -0
  86. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/ClusterInput.py +0 -0
  87. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/Coordinates.py +0 -0
  88. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/DataQualityMetricInput.py +0 -0
  89. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/DimensionFilter.py +0 -0
  90. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/DimensionInput.py +0 -0
  91. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/Granularity.py +0 -0
  92. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/PerformanceMetricInput.py +0 -0
  93. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/TimeRange.py +0 -0
  94. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/input_types/__init__.py +0 -0
  95. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/interceptor.py +0 -0
  96. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/routers/__init__.py +0 -0
  97. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/routers/evaluation_handler.py +0 -0
  98. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/routers/span_handler.py +0 -0
  99. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/routers/trace_handler.py +0 -0
  100. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/routers/utils.py +0 -0
  101. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/schema.py +0 -0
  102. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Cluster.py +0 -0
  103. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DataQualityMetric.py +0 -0
  104. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Dataset.py +0 -0
  105. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DatasetInfo.py +0 -0
  106. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DatasetRole.py +0 -0
  107. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DatasetValues.py +0 -0
  108. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Dimension.py +0 -0
  109. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DimensionDataType.py +0 -0
  110. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DimensionShape.py +0 -0
  111. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DimensionType.py +0 -0
  112. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DimensionWithValue.py +0 -0
  113. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DocumentEvaluationSummary.py +0 -0
  114. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/DocumentRetrievalMetrics.py +0 -0
  115. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/EmbeddingDimension.py +0 -0
  116. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/EmbeddingMetadata.py +0 -0
  117. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Evaluation.py +0 -0
  118. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/EvaluationSummary.py +0 -0
  119. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Event.py +0 -0
  120. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/EventMetadata.py +0 -0
  121. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/ExportEventsMutation.py +0 -0
  122. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/ExportedFile.py +0 -0
  123. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Functionality.py +0 -0
  124. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/MimeType.py +0 -0
  125. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Model.py +0 -0
  126. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/NumericRange.py +0 -0
  127. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/PerformanceMetric.py +0 -0
  128. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/PromptResponse.py +0 -0
  129. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Retrieval.py +0 -0
  130. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/ScalarDriftMetricEnum.py +0 -0
  131. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/Segments.py +0 -0
  132. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/SortDir.py +0 -0
  133. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/TimeSeries.py +0 -0
  134. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/UMAPPoints.py +0 -0
  135. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/ValidationResult.py +0 -0
  136. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/VectorDriftMetricEnum.py +0 -0
  137. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/__init__.py +0 -0
  138. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/node.py +0 -0
  139. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/api/types/pagination.py +0 -0
  140. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/app.py +0 -0
  141. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/main.py +0 -0
  142. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-114x114.png +0 -0
  143. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-120x120.png +0 -0
  144. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-144x144.png +0 -0
  145. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-152x152.png +0 -0
  146. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-180x180.png +0 -0
  147. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-72x72.png +0 -0
  148. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon-76x76.png +0 -0
  149. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/apple-touch-icon.png +0 -0
  150. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/favicon.ico +0 -0
  151. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/index.css +0 -0
  152. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/index.js +0 -0
  153. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/static/modernizr.js +0 -0
  154. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/templates/__init__.py +0 -0
  155. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/templates/index.html +0 -0
  156. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/server/thread_server.py +0 -0
  157. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/services.py +0 -0
  158. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/session/__init__.py +0 -0
  159. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/session/client.py +0 -0
  160. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/session/data_extractor.py +0 -0
  161. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/session/evaluation.py +0 -0
  162. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/__init__.py +0 -0
  163. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/dsl/__init__.py +0 -0
  164. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/dsl/missing.py +0 -0
  165. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/evaluation_conventions.py +0 -0
  166. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/exporter.py +0 -0
  167. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/langchain/__init__.py +0 -0
  168. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/langchain/instrumentor.py +0 -0
  169. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/langchain/tracer.py +0 -0
  170. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/openai/__init__.py +0 -0
  171. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/openai/instrumentor.py +0 -0
  172. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/span_evaluations.py +0 -0
  173. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/tracer.py +0 -0
  174. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/utils.py +0 -0
  175. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/v1/__init__.py +0 -0
  176. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/v1/evaluation_pb2.py +0 -0
  177. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/trace/v1/evaluation_pb2.pyi +0 -0
  178. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/utilities/__init__.py +0 -0
  179. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/utilities/error_handling.py +0 -0
  180. {arize_phoenix-3.0.2 → arize_phoenix-3.1.0}/src/phoenix/utilities/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 3.0.2
3
+ Version: 3.1.0
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -23,6 +23,7 @@ Requires-Dist: numpy
23
23
  Requires-Dist: openinference-instrumentation-langchain
24
24
  Requires-Dist: openinference-instrumentation-llama-index
25
25
  Requires-Dist: openinference-instrumentation-openai
26
+ Requires-Dist: openinference-semantic-conventions
26
27
  Requires-Dist: opentelemetry-exporter-otlp
27
28
  Requires-Dist: opentelemetry-proto
28
29
  Requires-Dist: opentelemetry-sdk
@@ -50,7 +51,7 @@ Requires-Dist: hatch; extra == 'dev'
50
51
  Requires-Dist: jupyter; extra == 'dev'
51
52
  Requires-Dist: langchain>=0.0.334; extra == 'dev'
52
53
  Requires-Dist: litellm>=1.0.3; extra == 'dev'
53
- Requires-Dist: llama-index<0.10.0; extra == 'dev'
54
+ Requires-Dist: llama-index>=0.10.3; extra == 'dev'
54
55
  Requires-Dist: nbqa; extra == 'dev'
55
56
  Requires-Dist: pandas-stubs<=2.0.2.230605; extra == 'dev'
56
57
  Requires-Dist: pre-commit; extra == 'dev'
@@ -63,7 +64,8 @@ Requires-Dist: strawberry-graphql[debug-server]==0.208.2; extra == 'dev'
63
64
  Provides-Extra: experimental
64
65
  Requires-Dist: tenacity; extra == 'experimental'
65
66
  Provides-Extra: llama-index
66
- Requires-Dist: llama-index==0.9.45; extra == 'llama-index'
67
+ Requires-Dist: llama-index==0.10.3; extra == 'llama-index'
68
+ Requires-Dist: openinference-instrumentation-llama-index>=1.0.0; extra == 'llama-index'
67
69
  Description-Content-Type: text/markdown
68
70
 
69
71
  <p align="center">
@@ -43,6 +43,7 @@ dependencies = [
43
43
  "opentelemetry-sdk",
44
44
  "opentelemetry-proto",
45
45
  "opentelemetry-exporter-otlp",
46
+ "openinference-semantic-conventions",
46
47
  "openinference-instrumentation-langchain",
47
48
  "openinference-instrumentation-llama-index",
48
49
  "openinference-instrumentation-openai",
@@ -64,7 +65,7 @@ dev = [
64
65
  "strawberry-graphql[debug-server]==0.208.2",
65
66
  "pre-commit",
66
67
  "arize[AutoEmbeddings, LLM_Evaluation]",
67
- "llama-index<0.10.0",
68
+ "llama-index>=0.10.3",
68
69
  "langchain>=0.0.334",
69
70
  "litellm>=1.0.3",
70
71
  "google-cloud-aiplatform>=1.3",
@@ -74,7 +75,8 @@ experimental = [
74
75
  "tenacity",
75
76
  ]
76
77
  llama-index = [
77
- "llama-index==0.9.45", # always pin to a version that keeps our notebooks working
78
+ "llama-index==0.10.3", # always pin to a version that keeps our notebooks working
79
+ "openinference-instrumentation-llama-index>=1.0.0",
78
80
  ]
79
81
 
80
82
  [project.urls]
@@ -109,7 +111,7 @@ dependencies = [
109
111
  "arize",
110
112
  "langchain>=0.0.334",
111
113
  "litellm>=1.0.3",
112
- "llama-index<0.10.0",
114
+ "llama-index>=0.10.3",
113
115
  "openai>=1.0.0",
114
116
  "tenacity",
115
117
  "nltk==3.8.1",
@@ -20,12 +20,13 @@ from typing import (
20
20
 
21
21
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
22
22
  from ddsketch import DDSketch
23
+ from openinference.semconv.trace import SpanAttributes
23
24
  from sortedcontainers import SortedKeyList
24
25
  from typing_extensions import TypeAlias
25
26
  from wrapt import ObjectProxy
26
27
 
28
+ import phoenix.trace.schemas
27
29
  from phoenix.datetime_utils import right_open_time_range
28
- from phoenix.trace import semantic_conventions
29
30
  from phoenix.trace.otel import decode
30
31
  from phoenix.trace.schemas import (
31
32
  ATTRIBUTE_PREFIX,
@@ -33,12 +34,10 @@ from phoenix.trace.schemas import (
33
34
  CONTEXT_PREFIX,
34
35
  ComputedAttributes,
35
36
  Span,
36
- SpanAttributes,
37
37
  SpanID,
38
38
  SpanStatusCode,
39
39
  TraceID,
40
40
  )
41
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
42
41
 
43
42
  END_OF_QUEUE = None # sentinel value for queue termination
44
43
 
@@ -50,9 +49,9 @@ SPAN_ID = CONTEXT_PREFIX + "span_id"
50
49
  PARENT_ID = "parent_id"
51
50
  START_TIME = "start_time"
52
51
  END_TIME = "end_time"
53
- LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_TOTAL
54
- LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_PROMPT
55
- LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
52
+ LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_TOTAL
53
+ LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_PROMPT
54
+ LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
56
55
 
57
56
 
58
57
  class ReadableSpan(ObjectProxy): # type: ignore
@@ -73,7 +72,9 @@ class ReadableSpan(ObjectProxy): # type: ignore
73
72
  @property
74
73
  def span(self) -> Span:
75
74
  span = decode(self._self_otlp_span)
76
- span.attributes.update(cast(SpanAttributes, self._self_computed_values))
75
+ span.attributes.update(
76
+ cast(phoenix.trace.schemas.SpanAttributes, self._self_computed_values)
77
+ )
77
78
  # TODO: compute latency rank percent (which can change depending on how
78
79
  # many spans already ingested).
79
80
  return span
@@ -333,9 +334,13 @@ class Traces:
333
334
  self._token_count_total -= existing_span[LLM_TOKEN_COUNT_TOTAL] or 0
334
335
  self._token_count_total += new_span[LLM_TOKEN_COUNT_TOTAL] or 0
335
336
  # Update number of documents
336
- num_documents_update = len(new_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
337
+ num_documents_update = len(
338
+ new_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
339
+ )
337
340
  if existing_span:
338
- num_documents_update -= len(existing_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
341
+ num_documents_update -= len(
342
+ existing_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
343
+ )
339
344
  if num_documents_update:
340
345
  self._num_documents[span_id] += num_documents_update
341
346
  # Process previously orphaned spans, if any.
@@ -19,6 +19,7 @@ from typing import (
19
19
  )
20
20
 
21
21
  import pandas as pd
22
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
22
23
  from pandas import DataFrame
23
24
  from typing_extensions import TypeAlias
24
25
 
@@ -41,9 +42,12 @@ from phoenix.experimental.evals.utils import (
41
42
  parse_openai_function_call,
42
43
  snap_to_rail,
43
44
  )
44
- from phoenix.trace.semantic_conventions import DOCUMENT_CONTENT, INPUT_VALUE, RETRIEVAL_DOCUMENTS
45
45
  from phoenix.utilities.logging import printif
46
46
 
47
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
48
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
49
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
50
+
47
51
  logger = logging.getLogger(__name__)
48
52
 
49
53
 
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import warnings
2
3
  from dataclasses import dataclass, field
3
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
4
5
 
@@ -12,7 +13,7 @@ logger = logging.getLogger(__name__)
12
13
 
13
14
  @dataclass
14
15
  class LiteLLMModel(BaseEvalModel):
15
- model_name: str = "gpt-3.5-turbo"
16
+ model: str = "gpt-3.5-turbo"
16
17
  """The model name to use."""
17
18
  temperature: float = 0.0
18
19
  """What sampling temperature to use."""
@@ -34,22 +35,42 @@ class LiteLLMModel(BaseEvalModel):
34
35
  max_content_size: Optional[int] = None
35
36
  """If you're using a fine-tuned model, set this to the maximum content size"""
36
37
 
38
+ # Deprecated fields
39
+ model_name: Optional[str] = None
40
+ """
41
+ .. deprecated:: 3.0.0
42
+ use `model` instead. This will be removed in a future release.
43
+ """
44
+
37
45
  def __post_init__(self) -> None:
46
+ self._migrate_model_name()
38
47
  self._init_environment()
39
48
  self._init_model_encoding()
40
49
 
50
+ def _migrate_model_name(self) -> None:
51
+ if self.model_name is not None:
52
+ warning_message = "The `model_name` field is deprecated. Use `model` instead. \
53
+ This will be removed in a future release."
54
+ warnings.warn(
55
+ warning_message,
56
+ DeprecationWarning,
57
+ )
58
+ print(warning_message)
59
+ self.model = self.model_name
60
+ self.model_name = None
61
+
41
62
  def _init_environment(self) -> None:
42
63
  try:
43
64
  import litellm
44
65
  from litellm import validate_environment
45
66
 
46
67
  self._litellm = litellm
47
- env_info = validate_environment(self._litellm.utils.get_llm_provider(self.model_name))
68
+ env_info = validate_environment(self._litellm.utils.get_llm_provider(self.model))
48
69
 
49
70
  if not env_info["keys_in_environment"]:
50
71
  raise RuntimeError(
51
72
  f"Missing environment variable(s): '{str(env_info['missing_keys'])}', for "
52
- f"model: {self.model_name}. \nFor additional information about the right "
73
+ f"model: {self.model}. \nFor additional information about the right "
53
74
  "environment variables for specific model providers:\n"
54
75
  "https://docs.litellm.ai/docs/completion/input#provider-specific-params."
55
76
  )
@@ -67,14 +88,14 @@ class LiteLLMModel(BaseEvalModel):
67
88
 
68
89
  @property
69
90
  def max_context_size(self) -> int:
70
- context_size = self.max_content_size or self._litellm.get_max_tokens(self.model_name).get(
91
+ context_size = self.max_content_size or self._litellm.get_max_tokens(self.model).get(
71
92
  "max_tokens", None
72
93
  )
73
94
 
74
95
  if context_size is None:
75
96
  raise ValueError(
76
- "Can't determine maximum context size. An unknown model name was "
77
- + f"used: {self.model_name}."
97
+ "Can't determine maximum context size. An unknown model was "
98
+ + f"used: {self.model}."
78
99
  )
79
100
 
80
101
  return context_size
@@ -84,11 +105,11 @@ class LiteLLMModel(BaseEvalModel):
84
105
  raise NotImplementedError
85
106
 
86
107
  def get_tokens_from_text(self, text: str) -> List[int]:
87
- result: List[int] = self._encoding(model=self.model_name, text=text)
108
+ result: List[int] = self._encoding(model=self.model, text=text)
88
109
  return result
89
110
 
90
111
  def get_text_from_tokens(self, tokens: List[int]) -> str:
91
- return str(self._decoding(model=self.model_name, tokens=tokens))
112
+ return str(self._decoding(model=self.model, tokens=tokens))
92
113
 
93
114
  async def _async_generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
94
115
  return self._generate(prompt, **kwargs)
@@ -96,7 +117,7 @@ class LiteLLMModel(BaseEvalModel):
96
117
  def _generate(self, prompt: str, **kwargs: Dict[str, Any]) -> str:
97
118
  messages = self._get_messages_from_prompt(prompt)
98
119
  response = self._litellm.completion(
99
- model=self.model_name,
120
+ model=self.model,
100
121
  messages=messages,
101
122
  temperature=self.temperature,
102
123
  max_tokens=self.max_tokens,
@@ -1,5 +1,6 @@
1
1
  import logging
2
2
  import os
3
+ import warnings
3
4
  from dataclasses import dataclass, field, fields
4
5
  from typing import (
5
6
  TYPE_CHECKING,
@@ -64,8 +65,10 @@ class OpenAIModel(BaseEvalModel):
64
65
  An optional base URL to use for the OpenAI API. If not provided, will default
65
66
  to what's configured in OpenAI
66
67
  """
67
- model_name: str = "gpt-4"
68
- """Model name to use. In of azure, this is the deployment name such as gpt-35-instant"""
68
+ model: str = "gpt-4"
69
+ """
70
+ Model name to use. In of azure, this is the deployment name such as gpt-35-instant
71
+ """
69
72
  temperature: float = 0.0
70
73
  """What sampling temperature to use."""
71
74
  max_tokens: int = 256
@@ -106,7 +109,15 @@ class OpenAIModel(BaseEvalModel):
106
109
  azure_ad_token: Optional[str] = field(default=None)
107
110
  azure_ad_token_provider: Optional[Callable[[], str]] = field(default=None)
108
111
 
112
+ # Deprecated fields
113
+ model_name: Optional[str] = field(default=None)
114
+ """
115
+ .. deprecated:: 3.0.0
116
+ use `model` instead. This will be removed
117
+ """
118
+
109
119
  def __post_init__(self) -> None:
120
+ self._migrate_model_name()
110
121
  self._init_environment()
111
122
  self._init_open_ai()
112
123
  self._init_tiktoken()
@@ -115,6 +126,17 @@ class OpenAIModel(BaseEvalModel):
115
126
  def reload_client(self) -> None:
116
127
  self._init_open_ai()
117
128
 
129
+ def _migrate_model_name(self) -> None:
130
+ if self.model_name:
131
+ warning_message = "The `model_name` field is deprecated. Use `model` instead. \
132
+ This will be removed in a future release."
133
+ print(
134
+ warning_message,
135
+ )
136
+ warnings.warn(warning_message, DeprecationWarning)
137
+ self.model = self.model_name
138
+ self.model_name = None
139
+
118
140
  def _init_environment(self) -> None:
119
141
  try:
120
142
  import openai
@@ -141,9 +163,7 @@ class OpenAIModel(BaseEvalModel):
141
163
  # For Azure, you need to provide the endpoint and the endpoint
142
164
  self._is_azure = bool(self.azure_endpoint)
143
165
 
144
- self._model_uses_legacy_completion_api = self.model_name.startswith(
145
- LEGACY_COMPLETION_API_MODELS
146
- )
166
+ self._model_uses_legacy_completion_api = self.model.startswith(LEGACY_COMPLETION_API_MODELS)
147
167
  if self.api_key is None:
148
168
  api_key = os.getenv(OPENAI_API_KEY_ENVVAR_NAME)
149
169
  if api_key is None:
@@ -203,7 +223,7 @@ class OpenAIModel(BaseEvalModel):
203
223
 
204
224
  def _init_tiktoken(self) -> None:
205
225
  try:
206
- encoding = self._tiktoken.encoding_for_model(self.model_name)
226
+ encoding = self._tiktoken.encoding_for_model(self.model)
207
227
  except KeyError:
208
228
  encoding = self._tiktoken.get_encoding("cl100k_base")
209
229
  self._tiktoken_encoding = encoding
@@ -333,20 +353,20 @@ class OpenAIModel(BaseEvalModel):
333
353
 
334
354
  @property
335
355
  def max_context_size(self) -> int:
336
- model_name = self.model_name
356
+ model = self.model
337
357
  # handling finetuned models
338
- if "ft-" in model_name:
339
- model_name = self.model_name.split(":")[0]
340
- if model_name == "gpt-4":
358
+ if "ft-" in model:
359
+ model = self.model.split(":")[0]
360
+ if model == "gpt-4":
341
361
  # Map gpt-4 to the current default
342
- model_name = "gpt-4-0613"
362
+ model = "gpt-4-0613"
343
363
 
344
- context_size = MODEL_TOKEN_LIMIT_MAPPING.get(model_name, None)
364
+ context_size = MODEL_TOKEN_LIMIT_MAPPING.get(model, None)
345
365
 
346
366
  if context_size is None:
347
367
  raise ValueError(
348
368
  "Can't determine maximum context size. An unknown model name was "
349
- f"used: {model_name}. Please provide a valid OpenAI model name. "
369
+ f"used: {model}. Please provide a valid OpenAI model name. "
350
370
  "Known models are: " + ", ".join(MODEL_TOKEN_LIMIT_MAPPING.keys())
351
371
  )
352
372
 
@@ -355,7 +375,7 @@ class OpenAIModel(BaseEvalModel):
355
375
  @property
356
376
  def public_invocation_params(self) -> Dict[str, Any]:
357
377
  return {
358
- **({"model": self.model_name}),
378
+ **({"model": self.model}),
359
379
  **self._default_params,
360
380
  **self.model_kwargs,
361
381
  }
@@ -388,8 +408,8 @@ class OpenAIModel(BaseEvalModel):
388
408
 
389
409
  Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
390
410
  """ # noqa
391
- model_name = self.model_name
392
- if model_name == "gpt-3.5-turbo-0301":
411
+ model = self.model
412
+ if model == "gpt-3.5-turbo-0301":
393
413
  tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
394
414
  tokens_per_name = -1 # if there's a name, the role is omitted
395
415
  else:
@@ -1,3 +1,5 @@
1
+ import logging
2
+ import warnings
1
3
  from dataclasses import dataclass
2
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
3
5
 
@@ -6,6 +8,7 @@ from phoenix.experimental.evals.models.base import BaseEvalModel
6
8
  if TYPE_CHECKING:
7
9
  from google.auth.credentials import Credentials # type:ignore
8
10
 
11
+ logger = logging.getLogger(__name__)
9
12
 
10
13
  MINIMUM_VERTEX_AI_VERSION = "1.33.0"
11
14
 
@@ -18,9 +21,9 @@ class VertexAIModel(BaseEvalModel):
18
21
  "location (str): The default location to use when making API calls. If not "
19
22
  "set defaults to us-central-1."
20
23
  credentials: Optional["Credentials"] = None
21
- model_name: str = "text-bison"
22
- tuned_model_name: Optional[str] = None
23
- "The name of a tuned model. If provided, model_name is ignored."
24
+ model: str = "text-bison"
25
+ tuned_model: Optional[str] = None
26
+ "The name of a tuned model. If provided, model is ignored."
24
27
  max_retries: int = 6
25
28
  """Maximum number of retries to make when generating."""
26
29
  retry_min_seconds: int = 10
@@ -40,11 +43,50 @@ class VertexAIModel(BaseEvalModel):
40
43
  "How the model selects tokens for output, the next token is selected from "
41
44
  "among the top-k most probable tokens. Top-k is ignored for Codey models."
42
45
 
46
+ # Deprecated fields
47
+ model_name: Optional[str] = None
48
+ """
49
+ .. deprecated:: 3.0.0
50
+ use `model` instead. This will be removed in a future release.
51
+ """
52
+ tuned_model_name: Optional[str] = None
53
+ """
54
+ .. deprecated:: 3.0.0
55
+ use `tuned_model` instead. This will be removed in a future release.
56
+ """
57
+
43
58
  def __post_init__(self) -> None:
59
+ self._migrate_model_name()
44
60
  self._init_environment()
45
61
  self._init_vertex_ai()
46
62
  self._instantiate_model()
47
63
 
64
+ def _migrate_model_name(self) -> None:
65
+ if self.model_name is not None:
66
+ warning_message = (
67
+ "The `model_name` field is deprecated. Use `model` instead. "
68
+ + "This will be removed in a future release."
69
+ )
70
+ warnings.warn(
71
+ warning_message,
72
+ DeprecationWarning,
73
+ )
74
+ print(warning_message)
75
+ self.model = self.model_name
76
+ self.model_name = None
77
+ if self.tuned_model_name is not None:
78
+ warning_message = (
79
+ "`tuned_model_name` field is deprecated. Use `tuned_model` instead. "
80
+ + "This will be removed in a future release."
81
+ )
82
+ warnings.warn(
83
+ warning_message,
84
+ DeprecationWarning,
85
+ )
86
+ print(warning_message)
87
+ self.tuned_model = self.tuned_model_name
88
+ self.tuned_model_name = None
89
+
48
90
  def _init_environment(self) -> None:
49
91
  try:
50
92
  import google.api_core.exceptions as google_exceptions # type:ignore
@@ -72,10 +114,10 @@ class VertexAIModel(BaseEvalModel):
72
114
 
73
115
  model = TextGenerationModel
74
116
 
75
- if self.tuned_model_name:
76
- self._model = model.get_tuned_model(self.tuned_model_name)
117
+ if self.tuned_model:
118
+ self._model = model.get_tuned_model(self.tuned_model)
77
119
  else:
78
- self._model = model.from_pretrained(self.model_name)
120
+ self._model = model.from_pretrained(self.model)
79
121
 
80
122
  def verbose_generation_info(self) -> str:
81
123
  return f"VertexAI invocation parameters: {self.invocation_params}"
@@ -93,7 +135,7 @@ class VertexAIModel(BaseEvalModel):
93
135
 
94
136
  @property
95
137
  def is_codey_model(self) -> bool:
96
- return is_codey_model(self.tuned_model_name or self.model_name)
138
+ return is_codey_model(self.tuned_model or self.model)
97
139
 
98
140
  @property
99
141
  def _init_params(self) -> Dict[str, Any]:
@@ -32,7 +32,7 @@ def download_benchmark_dataset(task: str, dataset_name: str) -> pd.DataFrame:
32
32
  pandas.DataFrame: A pandas dataframe containing the data.
33
33
  """
34
34
  jsonl_file_name = f"{dataset_name}.jsonl"
35
- url = f"http://storage.googleapis.com/arize-assets/phoenix/evals/{task}/{jsonl_file_name}.zip"
35
+ url = f"http://storage.googleapis.com/arize-phoenix-assets/evals/{task}/{jsonl_file_name}.zip"
36
36
  try:
37
37
  with urlopen(url) as response:
38
38
  zip_byte_stream = BytesIO(response.read())
@@ -4,6 +4,7 @@ from typing import Any, Iterable, Iterator, Optional, Protocol
4
4
 
5
5
  import pandas as pd
6
6
  import strawberry
7
+ from openinference.semconv.trace import SpanAttributes
7
8
  from strawberry import UNSET
8
9
  from typing_extensions import assert_never
9
10
 
@@ -13,7 +14,6 @@ from phoenix.core.traces import (
13
14
  START_TIME,
14
15
  )
15
16
  from phoenix.server.api.types.SortDir import SortDir
16
- from phoenix.trace import semantic_conventions
17
17
  from phoenix.trace.schemas import ComputedAttributes, Span, SpanID
18
18
 
19
19
 
@@ -22,9 +22,9 @@ class SpanColumn(Enum):
22
22
  startTime = START_TIME
23
23
  endTime = END_TIME
24
24
  latencyMs = ComputedAttributes.LATENCY_MS.value
25
- tokenCountTotal = semantic_conventions.LLM_TOKEN_COUNT_TOTAL
26
- tokenCountPrompt = semantic_conventions.LLM_TOKEN_COUNT_PROMPT
27
- tokenCountCompletion = semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
25
+ tokenCountTotal = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
26
+ tokenCountPrompt = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
27
+ tokenCountCompletion = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
28
28
  cumulativeTokenCountTotal = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_TOTAL.value
29
29
  cumulativeTokenCountPrompt = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_PROMPT.value
30
30
  cumulativeTokenCountCompletion = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_COMPLETION.value
@@ -5,6 +5,7 @@ from enum import Enum
5
5
  from typing import Any, DefaultDict, Dict, List, Mapping, Optional, Sized, cast
6
6
 
7
7
  import strawberry
8
+ from openinference.semconv.trace import EmbeddingAttributes, SpanAttributes
8
9
  from strawberry import ID, UNSET
9
10
  from strawberry.types import Info
10
11
 
@@ -15,19 +16,17 @@ from phoenix.server.api.types.DocumentRetrievalMetrics import DocumentRetrievalM
15
16
  from phoenix.server.api.types.Evaluation import DocumentEvaluation, SpanEvaluation
16
17
  from phoenix.server.api.types.MimeType import MimeType
17
18
  from phoenix.trace.schemas import ComputedAttributes, SpanID
18
- from phoenix.trace.semantic_conventions import (
19
- EMBEDDING_EMBEDDINGS,
20
- EMBEDDING_VECTOR,
21
- EXCEPTION_MESSAGE,
22
- INPUT_MIME_TYPE,
23
- INPUT_VALUE,
24
- LLM_TOKEN_COUNT_COMPLETION,
25
- LLM_TOKEN_COUNT_PROMPT,
26
- LLM_TOKEN_COUNT_TOTAL,
27
- OUTPUT_MIME_TYPE,
28
- OUTPUT_VALUE,
29
- RETRIEVAL_DOCUMENTS,
30
- )
19
+
20
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
21
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
22
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
23
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
24
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
25
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
26
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
27
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
28
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
31
30
 
32
31
 
33
32
  @strawberry.enum
@@ -87,7 +86,7 @@ class SpanEvent:
87
86
  ) -> "SpanEvent":
88
87
  return SpanEvent(
89
88
  name=event.name,
90
- message=cast(str, event.attributes.get(EXCEPTION_MESSAGE) or ""),
89
+ message=cast(str, event.attributes.get(trace_schema.EXCEPTION_MESSAGE) or ""),
91
90
  timestamp=event.timestamp,
92
91
  )
93
92
 
@@ -477,6 +477,7 @@ def launch_app(
477
477
  f"port {port} is not occupied by another process) or file an issue "
478
478
  f"with us at https://github.com/Arize-ai/phoenix"
479
479
  )
480
+ _session = None
480
481
  return None
481
482
 
482
483
  print(f"🌍 To view the Phoenix app in your browser, visit {_session.url}")
@@ -489,7 +490,9 @@ def active_session() -> Optional[Session]:
489
490
  """
490
491
  Returns the active session if one exists, otherwise returns None
491
492
  """
492
- return _session
493
+ if _session and _session.active:
494
+ return _session
495
+ return None
493
496
 
494
497
 
495
498
  def close_app() -> None: