arize-phoenix 3.0.1__tar.gz → 3.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (180) hide show
  1. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/PKG-INFO +4 -2
  2. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/pyproject.toml +5 -3
  3. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/traces.py +14 -9
  4. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/functions/classify.py +20 -24
  5. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/utils/__init__.py +1 -1
  6. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/SpanSort.py +4 -4
  7. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Span.py +13 -14
  8. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/dsl/filter.py +7 -4
  9. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/dsl/helpers.py +7 -7
  10. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/dsl/query.py +3 -1
  11. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/errors.py +4 -0
  12. arize_phoenix-3.0.3/src/phoenix/trace/llama_index/__init__.py +3 -0
  13. arize_phoenix-3.0.3/src/phoenix/trace/llama_index/callback.py +77 -0
  14. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/otel.py +52 -14
  15. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/schemas.py +4 -6
  16. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/span_json_decoder.py +6 -5
  17. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/span_json_encoder.py +1 -6
  18. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/trace_dataset.py +15 -12
  19. arize_phoenix-3.0.3/src/phoenix/version.py +1 -0
  20. arize_phoenix-3.0.1/src/phoenix/trace/llama_index/__init__.py +0 -4
  21. arize_phoenix-3.0.1/src/phoenix/trace/llama_index/callback.py +0 -42
  22. arize_phoenix-3.0.1/src/phoenix/trace/llama_index/debug_callback.py +0 -50
  23. arize_phoenix-3.0.1/src/phoenix/trace/semantic_conventions.py +0 -172
  24. arize_phoenix-3.0.1/src/phoenix/version.py +0 -1
  25. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/.gitignore +0 -0
  26. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/IP_NOTICE +0 -0
  27. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/LICENSE +0 -0
  28. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/README.md +0 -0
  29. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/__init__.py +0 -0
  30. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/config.py +0 -0
  31. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/__init__.py +0 -0
  32. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/embedding_dimension.py +0 -0
  33. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/evals.py +0 -0
  34. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/model.py +0 -0
  35. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/model_schema.py +0 -0
  36. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/core/model_schema_adapter.py +0 -0
  37. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/__init__.py +0 -0
  38. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/dataset.py +0 -0
  39. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/errors.py +0 -0
  40. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/fixtures.py +0 -0
  41. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/schema.py +0 -0
  42. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datasets/validation.py +0 -0
  43. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/datetime_utils.py +0 -0
  44. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/exceptions.py +0 -0
  45. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/__init__.py +0 -0
  46. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/__init__.py +0 -0
  47. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/evaluators.py +0 -0
  48. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/functions/__init__.py +0 -0
  49. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/functions/executor.py +0 -0
  50. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/functions/generate.py +0 -0
  51. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/functions/processing.py +0 -0
  52. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/__init__.py +0 -0
  53. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/anthropic.py +0 -0
  54. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/base.py +0 -0
  55. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/bedrock.py +0 -0
  56. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/litellm.py +0 -0
  57. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/openai.py +0 -0
  58. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/rate_limiters.py +0 -0
  59. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/vertex.py +0 -0
  60. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/models/vertexai.py +0 -0
  61. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/retrievals.py +0 -0
  62. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/templates/__init__.py +0 -0
  63. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/templates/default_templates.py +0 -0
  64. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/templates/template.py +0 -0
  65. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/experimental/evals/utils/threads.py +0 -0
  66. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/README.md +0 -0
  67. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/__init__.py +0 -0
  68. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/binning.py +0 -0
  69. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/metrics.py +0 -0
  70. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/mixins.py +0 -0
  71. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/retrieval_metrics.py +0 -0
  72. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/timeseries.py +0 -0
  73. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/metrics/wrappers.py +0 -0
  74. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/pointcloud/__init__.py +0 -0
  75. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/pointcloud/clustering.py +0 -0
  76. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/pointcloud/pointcloud.py +0 -0
  77. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/pointcloud/projectors.py +0 -0
  78. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/pointcloud/umap_parameters.py +0 -0
  79. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/py.typed +0 -0
  80. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/__init__.py +0 -0
  81. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/__init__.py +0 -0
  82. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/context.py +0 -0
  83. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/helpers.py +0 -0
  84. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/ClusterInput.py +0 -0
  85. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/Coordinates.py +0 -0
  86. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/DataQualityMetricInput.py +0 -0
  87. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/DimensionFilter.py +0 -0
  88. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/DimensionInput.py +0 -0
  89. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/Granularity.py +0 -0
  90. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/PerformanceMetricInput.py +0 -0
  91. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/TimeRange.py +0 -0
  92. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/input_types/__init__.py +0 -0
  93. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/interceptor.py +0 -0
  94. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/routers/__init__.py +0 -0
  95. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/routers/evaluation_handler.py +0 -0
  96. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/routers/span_handler.py +0 -0
  97. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/routers/trace_handler.py +0 -0
  98. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/routers/utils.py +0 -0
  99. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/schema.py +0 -0
  100. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Cluster.py +0 -0
  101. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DataQualityMetric.py +0 -0
  102. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Dataset.py +0 -0
  103. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DatasetInfo.py +0 -0
  104. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DatasetRole.py +0 -0
  105. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DatasetValues.py +0 -0
  106. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Dimension.py +0 -0
  107. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DimensionDataType.py +0 -0
  108. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DimensionShape.py +0 -0
  109. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DimensionType.py +0 -0
  110. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DimensionWithValue.py +0 -0
  111. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DocumentEvaluationSummary.py +0 -0
  112. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/DocumentRetrievalMetrics.py +0 -0
  113. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/EmbeddingDimension.py +0 -0
  114. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/EmbeddingMetadata.py +0 -0
  115. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Evaluation.py +0 -0
  116. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/EvaluationSummary.py +0 -0
  117. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Event.py +0 -0
  118. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/EventMetadata.py +0 -0
  119. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/ExportEventsMutation.py +0 -0
  120. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/ExportedFile.py +0 -0
  121. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Functionality.py +0 -0
  122. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/MimeType.py +0 -0
  123. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Model.py +0 -0
  124. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/NumericRange.py +0 -0
  125. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/PerformanceMetric.py +0 -0
  126. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/PromptResponse.py +0 -0
  127. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Retrieval.py +0 -0
  128. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/ScalarDriftMetricEnum.py +0 -0
  129. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/Segments.py +0 -0
  130. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/SortDir.py +0 -0
  131. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/TimeSeries.py +0 -0
  132. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/UMAPPoints.py +0 -0
  133. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/ValidationResult.py +0 -0
  134. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/VectorDriftMetricEnum.py +0 -0
  135. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/__init__.py +0 -0
  136. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/node.py +0 -0
  137. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/api/types/pagination.py +0 -0
  138. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/app.py +0 -0
  139. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/main.py +0 -0
  140. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-114x114.png +0 -0
  141. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-120x120.png +0 -0
  142. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-144x144.png +0 -0
  143. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-152x152.png +0 -0
  144. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-180x180.png +0 -0
  145. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-72x72.png +0 -0
  146. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon-76x76.png +0 -0
  147. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/apple-touch-icon.png +0 -0
  148. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/favicon.ico +0 -0
  149. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/index.css +0 -0
  150. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/index.js +0 -0
  151. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/static/modernizr.js +0 -0
  152. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/templates/__init__.py +0 -0
  153. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/templates/index.html +0 -0
  154. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/server/thread_server.py +0 -0
  155. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/services.py +0 -0
  156. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/session/__init__.py +0 -0
  157. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/session/client.py +0 -0
  158. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/session/data_extractor.py +0 -0
  159. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/session/evaluation.py +0 -0
  160. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/session/session.py +0 -0
  161. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/__init__.py +0 -0
  162. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/dsl/__init__.py +0 -0
  163. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/dsl/missing.py +0 -0
  164. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/evaluation_conventions.py +0 -0
  165. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/exporter.py +0 -0
  166. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/fixtures.py +0 -0
  167. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/langchain/__init__.py +0 -0
  168. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/langchain/instrumentor.py +0 -0
  169. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/langchain/tracer.py +0 -0
  170. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/openai/__init__.py +0 -0
  171. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/openai/instrumentor.py +0 -0
  172. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/span_evaluations.py +0 -0
  173. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/tracer.py +0 -0
  174. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/utils.py +0 -0
  175. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/v1/__init__.py +0 -0
  176. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/v1/evaluation_pb2.py +0 -0
  177. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/trace/v1/evaluation_pb2.pyi +0 -0
  178. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/utilities/__init__.py +0 -0
  179. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/utilities/error_handling.py +0 -0
  180. {arize_phoenix-3.0.1 → arize_phoenix-3.0.3}/src/phoenix/utilities/logging.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 3.0.1
3
+ Version: 3.0.3
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -23,6 +23,7 @@ Requires-Dist: numpy
23
23
  Requires-Dist: openinference-instrumentation-langchain
24
24
  Requires-Dist: openinference-instrumentation-llama-index
25
25
  Requires-Dist: openinference-instrumentation-openai
26
+ Requires-Dist: openinference-semantic-conventions
26
27
  Requires-Dist: opentelemetry-exporter-otlp
27
28
  Requires-Dist: opentelemetry-proto
28
29
  Requires-Dist: opentelemetry-sdk
@@ -50,7 +51,7 @@ Requires-Dist: hatch; extra == 'dev'
50
51
  Requires-Dist: jupyter; extra == 'dev'
51
52
  Requires-Dist: langchain>=0.0.334; extra == 'dev'
52
53
  Requires-Dist: litellm>=1.0.3; extra == 'dev'
53
- Requires-Dist: llama-index>=0.9.14; extra == 'dev'
54
+ Requires-Dist: llama-index<0.10.0; extra == 'dev'
54
55
  Requires-Dist: nbqa; extra == 'dev'
55
56
  Requires-Dist: pandas-stubs<=2.0.2.230605; extra == 'dev'
56
57
  Requires-Dist: pre-commit; extra == 'dev'
@@ -64,6 +65,7 @@ Provides-Extra: experimental
64
65
  Requires-Dist: tenacity; extra == 'experimental'
65
66
  Provides-Extra: llama-index
66
67
  Requires-Dist: llama-index==0.9.45; extra == 'llama-index'
68
+ Requires-Dist: openinference-instrumentation-llama-index==0.1.3; extra == 'llama-index'
67
69
  Description-Content-Type: text/markdown
68
70
 
69
71
  <p align="center">
@@ -43,6 +43,7 @@ dependencies = [
43
43
  "opentelemetry-sdk",
44
44
  "opentelemetry-proto",
45
45
  "opentelemetry-exporter-otlp",
46
+ "openinference-semantic-conventions",
46
47
  "openinference-instrumentation-langchain",
47
48
  "openinference-instrumentation-llama-index",
48
49
  "openinference-instrumentation-openai",
@@ -64,7 +65,7 @@ dev = [
64
65
  "strawberry-graphql[debug-server]==0.208.2",
65
66
  "pre-commit",
66
67
  "arize[AutoEmbeddings, LLM_Evaluation]",
67
- "llama-index>=0.9.14",
68
+ "llama-index<0.10.0",
68
69
  "langchain>=0.0.334",
69
70
  "litellm>=1.0.3",
70
71
  "google-cloud-aiplatform>=1.3",
@@ -75,6 +76,7 @@ experimental = [
75
76
  ]
76
77
  llama-index = [
77
78
  "llama-index==0.9.45", # always pin to a version that keeps our notebooks working
79
+ "openinference-instrumentation-llama-index==0.1.3",
78
80
  ]
79
81
 
80
82
  [project.urls]
@@ -109,7 +111,7 @@ dependencies = [
109
111
  "arize",
110
112
  "langchain>=0.0.334",
111
113
  "litellm>=1.0.3",
112
- "llama-index>=0.9.14",
114
+ "llama-index<0.10.0",
113
115
  "openai>=1.0.0",
114
116
  "tenacity",
115
117
  "nltk==3.8.1",
@@ -129,7 +131,7 @@ dependencies = [
129
131
  dependencies = [
130
132
  "mypy==1.5.1",
131
133
  "pydantic==v1.10.14", # for mypy
132
- "llama-index>=0.9.14",
134
+ "llama-index<0.10.0",
133
135
  "pandas-stubs<=2.0.2.230605", # version 2.0.3.230814 is causing a dependency conflict.
134
136
  "types-psutil",
135
137
  "types-tqdm",
@@ -20,12 +20,13 @@ from typing import (
20
20
 
21
21
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
22
22
  from ddsketch import DDSketch
23
+ from openinference.semconv.trace import SpanAttributes
23
24
  from sortedcontainers import SortedKeyList
24
25
  from typing_extensions import TypeAlias
25
26
  from wrapt import ObjectProxy
26
27
 
28
+ import phoenix.trace.schemas
27
29
  from phoenix.datetime_utils import right_open_time_range
28
- from phoenix.trace import semantic_conventions
29
30
  from phoenix.trace.otel import decode
30
31
  from phoenix.trace.schemas import (
31
32
  ATTRIBUTE_PREFIX,
@@ -33,12 +34,10 @@ from phoenix.trace.schemas import (
33
34
  CONTEXT_PREFIX,
34
35
  ComputedAttributes,
35
36
  Span,
36
- SpanAttributes,
37
37
  SpanID,
38
38
  SpanStatusCode,
39
39
  TraceID,
40
40
  )
41
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
42
41
 
43
42
  END_OF_QUEUE = None # sentinel value for queue termination
44
43
 
@@ -50,9 +49,9 @@ SPAN_ID = CONTEXT_PREFIX + "span_id"
50
49
  PARENT_ID = "parent_id"
51
50
  START_TIME = "start_time"
52
51
  END_TIME = "end_time"
53
- LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_TOTAL
54
- LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_PROMPT
55
- LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
52
+ LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_TOTAL
53
+ LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_PROMPT
54
+ LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
56
55
 
57
56
 
58
57
  class ReadableSpan(ObjectProxy): # type: ignore
@@ -73,7 +72,9 @@ class ReadableSpan(ObjectProxy): # type: ignore
73
72
  @property
74
73
  def span(self) -> Span:
75
74
  span = decode(self._self_otlp_span)
76
- span.attributes.update(cast(SpanAttributes, self._self_computed_values))
75
+ span.attributes.update(
76
+ cast(phoenix.trace.schemas.SpanAttributes, self._self_computed_values)
77
+ )
77
78
  # TODO: compute latency rank percent (which can change depending on how
78
79
  # many spans already ingested).
79
80
  return span
@@ -333,9 +334,13 @@ class Traces:
333
334
  self._token_count_total -= existing_span[LLM_TOKEN_COUNT_TOTAL] or 0
334
335
  self._token_count_total += new_span[LLM_TOKEN_COUNT_TOTAL] or 0
335
336
  # Update number of documents
336
- num_documents_update = len(new_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
337
+ num_documents_update = len(
338
+ new_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
339
+ )
337
340
  if existing_span:
338
- num_documents_update -= len(existing_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
341
+ num_documents_update -= len(
342
+ existing_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
343
+ )
339
344
  if num_documents_update:
340
345
  self._num_documents[span_id] += num_documents_update
341
346
  # Process previously orphaned spans, if any.
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import logging
4
4
  import warnings
5
5
  from collections import defaultdict
6
+ from itertools import product
6
7
  from typing import (
7
8
  Any,
8
9
  DefaultDict,
@@ -18,6 +19,7 @@ from typing import (
18
19
  )
19
20
 
20
21
  import pandas as pd
22
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
21
23
  from pandas import DataFrame
22
24
  from typing_extensions import TypeAlias
23
25
 
@@ -40,9 +42,12 @@ from phoenix.experimental.evals.utils import (
40
42
  parse_openai_function_call,
41
43
  snap_to_rail,
42
44
  )
43
- from phoenix.trace.semantic_conventions import DOCUMENT_CONTENT, INPUT_VALUE, RETRIEVAL_DOCUMENTS
44
45
  from phoenix.utilities.logging import printif
45
46
 
47
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
48
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
49
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
50
+
46
51
  logger = logging.getLogger(__name__)
47
52
 
48
53
 
@@ -54,8 +59,7 @@ Label: TypeAlias = str
54
59
  Score: TypeAlias = Optional[float]
55
60
  Explanation: TypeAlias = Optional[str]
56
61
  Record: TypeAlias = Mapping[str, Any]
57
- EvaluatorIndex: TypeAlias = int
58
- RowIndex: TypeAlias = Any
62
+ Index: TypeAlias = int
59
63
 
60
64
  # snapped_response, explanation, response
61
65
  ParsedLLMResponse: TypeAlias = Tuple[Optional[str], Optional[str], str]
@@ -343,8 +347,6 @@ def _get_contents_from_openinference_documents(documents: Iterable[Any]) -> List
343
347
 
344
348
 
345
349
  class RunEvalsPayload(NamedTuple):
346
- evaluator_index: EvaluatorIndex
347
- row_index: RowIndex
348
350
  evaluator: LLMEvaluator
349
351
  record: Record
350
352
 
@@ -404,23 +406,21 @@ def run_evals(
404
406
 
405
407
  async def _arun_eval(
406
408
  payload: RunEvalsPayload,
407
- ) -> Tuple[EvaluatorIndex, RowIndex, Label, Score, Explanation]:
408
- label, score, explanation = await payload.evaluator.aevaluate(
409
+ ) -> Tuple[Label, Score, Explanation]:
410
+ return await payload.evaluator.aevaluate(
409
411
  payload.record,
410
412
  provide_explanation=provide_explanation,
411
413
  use_function_calling_if_available=use_function_calling_if_available,
412
414
  )
413
- return payload.evaluator_index, payload.row_index, label, score, explanation
414
415
 
415
416
  def _run_eval(
416
417
  payload: RunEvalsPayload,
417
- ) -> Tuple[EvaluatorIndex, RowIndex, Label, Score, Explanation]:
418
- label, score, explanation = payload.evaluator.evaluate(
418
+ ) -> Tuple[Label, Score, Explanation]:
419
+ return payload.evaluator.evaluate(
419
420
  payload.record,
420
421
  provide_explanation=provide_explanation,
421
422
  use_function_calling_if_available=use_function_calling_if_available,
422
423
  )
423
- return payload.evaluator_index, payload.row_index, label, score, explanation
424
424
 
425
425
  executor = get_executor_on_sync_context(
426
426
  _run_eval,
@@ -428,24 +428,20 @@ def run_evals(
428
428
  concurrency=concurrency,
429
429
  tqdm_bar_format=get_tqdm_progress_bar_formatter("run_evals"),
430
430
  exit_on_error=True,
431
- fallback_return_value=(None, None),
431
+ fallback_return_value=(None, None, None),
432
432
  )
433
+
434
+ total_records = len(dataframe)
433
435
  payloads = [
434
- RunEvalsPayload(
435
- evaluator_index=evaluator_index,
436
- row_index=row_index,
437
- evaluator=evaluator,
438
- record=row.to_dict(),
439
- )
440
- # use the position of the row rather than the dataframe index, which is used
441
- # to ensure the output dataframe has the same row order as the input dataframe
442
- for row_index, (_, row) in enumerate(dataframe.iterrows())
443
- for evaluator_index, evaluator in enumerate(evaluators)
436
+ RunEvalsPayload(evaluator=evaluator, record=row)
437
+ for evaluator, (_, row) in product(evaluators, dataframe.iterrows())
444
438
  ]
445
- eval_results: List[DefaultDict[RowIndex, Dict[ColumnName, Union[Label, Explanation]]]] = [
439
+ eval_results: List[DefaultDict[Index, Dict[ColumnName, Union[Label, Explanation]]]] = [
446
440
  defaultdict(dict) for _ in range(len(evaluators))
447
441
  ]
448
- for evaluator_index, row_index, label, score, explanation in executor.run(payloads):
442
+ for index, (label, score, explanation) in enumerate(executor.run(payloads)):
443
+ evaluator_index = index // total_records
444
+ row_index = index % total_records
449
445
  eval_results[evaluator_index][row_index]["label"] = label
450
446
  eval_results[evaluator_index][row_index]["score"] = score
451
447
  if provide_explanation:
@@ -32,7 +32,7 @@ def download_benchmark_dataset(task: str, dataset_name: str) -> pd.DataFrame:
32
32
  pandas.DataFrame: A pandas dataframe containing the data.
33
33
  """
34
34
  jsonl_file_name = f"{dataset_name}.jsonl"
35
- url = f"http://storage.googleapis.com/arize-assets/phoenix/evals/{task}/{jsonl_file_name}.zip"
35
+ url = f"http://storage.googleapis.com/arize-phoenix-assets/evals/{task}/{jsonl_file_name}.zip"
36
36
  try:
37
37
  with urlopen(url) as response:
38
38
  zip_byte_stream = BytesIO(response.read())
@@ -4,6 +4,7 @@ from typing import Any, Iterable, Iterator, Optional, Protocol
4
4
 
5
5
  import pandas as pd
6
6
  import strawberry
7
+ from openinference.semconv.trace import SpanAttributes
7
8
  from strawberry import UNSET
8
9
  from typing_extensions import assert_never
9
10
 
@@ -13,7 +14,6 @@ from phoenix.core.traces import (
13
14
  START_TIME,
14
15
  )
15
16
  from phoenix.server.api.types.SortDir import SortDir
16
- from phoenix.trace import semantic_conventions
17
17
  from phoenix.trace.schemas import ComputedAttributes, Span, SpanID
18
18
 
19
19
 
@@ -22,9 +22,9 @@ class SpanColumn(Enum):
22
22
  startTime = START_TIME
23
23
  endTime = END_TIME
24
24
  latencyMs = ComputedAttributes.LATENCY_MS.value
25
- tokenCountTotal = semantic_conventions.LLM_TOKEN_COUNT_TOTAL
26
- tokenCountPrompt = semantic_conventions.LLM_TOKEN_COUNT_PROMPT
27
- tokenCountCompletion = semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
25
+ tokenCountTotal = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
26
+ tokenCountPrompt = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
27
+ tokenCountCompletion = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
28
28
  cumulativeTokenCountTotal = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_TOTAL.value
29
29
  cumulativeTokenCountPrompt = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_PROMPT.value
30
30
  cumulativeTokenCountCompletion = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_COMPLETION.value
@@ -5,6 +5,7 @@ from enum import Enum
5
5
  from typing import Any, DefaultDict, Dict, List, Mapping, Optional, Sized, cast
6
6
 
7
7
  import strawberry
8
+ from openinference.semconv.trace import EmbeddingAttributes, SpanAttributes
8
9
  from strawberry import ID, UNSET
9
10
  from strawberry.types import Info
10
11
 
@@ -15,19 +16,17 @@ from phoenix.server.api.types.DocumentRetrievalMetrics import DocumentRetrievalM
15
16
  from phoenix.server.api.types.Evaluation import DocumentEvaluation, SpanEvaluation
16
17
  from phoenix.server.api.types.MimeType import MimeType
17
18
  from phoenix.trace.schemas import ComputedAttributes, SpanID
18
- from phoenix.trace.semantic_conventions import (
19
- EMBEDDING_EMBEDDINGS,
20
- EMBEDDING_VECTOR,
21
- EXCEPTION_MESSAGE,
22
- INPUT_MIME_TYPE,
23
- INPUT_VALUE,
24
- LLM_TOKEN_COUNT_COMPLETION,
25
- LLM_TOKEN_COUNT_PROMPT,
26
- LLM_TOKEN_COUNT_TOTAL,
27
- OUTPUT_MIME_TYPE,
28
- OUTPUT_VALUE,
29
- RETRIEVAL_DOCUMENTS,
30
- )
19
+
20
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
21
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
22
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
23
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
24
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
25
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
26
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
27
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
28
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
31
30
 
32
31
 
33
32
  @strawberry.enum
@@ -87,7 +86,7 @@ class SpanEvent:
87
86
  ) -> "SpanEvent":
88
87
  return SpanEvent(
89
88
  name=event.name,
90
- message=cast(str, event.attributes.get(EXCEPTION_MESSAGE) or ""),
89
+ message=cast(str, event.attributes.get(trace_schema.EXCEPTION_MESSAGE) or ""),
91
90
  timestamp=event.timestamp,
92
91
  )
93
92
 
@@ -1,4 +1,5 @@
1
1
  import ast
2
+ import inspect
2
3
  import sys
3
4
  from dataclasses import dataclass, field
4
5
  from difflib import SequenceMatcher
@@ -15,10 +16,10 @@ from typing import (
15
16
  cast,
16
17
  )
17
18
 
19
+ from openinference.semconv import trace
18
20
  from typing_extensions import TypeGuard
19
21
 
20
22
  import phoenix.trace.v1 as pb
21
- from phoenix.trace import semantic_conventions
22
23
  from phoenix.trace.dsl.missing import MISSING
23
24
  from phoenix.trace.schemas import COMPUTED_PREFIX, ComputedAttributes, Span, SpanID
24
25
 
@@ -137,9 +138,11 @@ def _allowed_replacements() -> Iterator[Tuple[str, ast.expr]]:
137
138
  yield "span.context." + source_segment, ast_replacement
138
139
 
139
140
  for field_name in (
140
- getattr(semantic_conventions, variable_name)
141
- for variable_name in dir(semantic_conventions)
142
- if variable_name.isupper()
141
+ getattr(klass, attr)
142
+ for name in dir(trace)
143
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
144
+ for attr in dir(klass)
145
+ if attr.isupper()
143
146
  ):
144
147
  source_segment = field_name
145
148
  ast_replacement = _ast_replacement(f"span.attributes.get('{field_name}')")
@@ -1,15 +1,15 @@
1
1
  from typing import List, Optional, Protocol, Union, cast
2
2
 
3
3
  import pandas as pd
4
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
4
5
 
5
6
  from phoenix.trace.dsl import SpanQuery
6
- from phoenix.trace.semantic_conventions import (
7
- DOCUMENT_CONTENT,
8
- DOCUMENT_SCORE,
9
- INPUT_VALUE,
10
- OUTPUT_VALUE,
11
- RETRIEVAL_DOCUMENTS,
12
- )
7
+
8
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
9
+ DOCUMENT_SCORE = DocumentAttributes.DOCUMENT_SCORE
10
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
11
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
12
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
13
13
 
14
14
  INPUT = {"input": INPUT_VALUE}
15
15
  OUTPUT = {"output": OUTPUT_VALUE}
@@ -19,13 +19,15 @@ from typing import (
19
19
  )
20
20
 
21
21
  import pandas as pd
22
+ from openinference.semconv.trace import SpanAttributes
22
23
 
23
24
  from phoenix.trace.dsl import SpanFilter
24
25
  from phoenix.trace.dsl.filter import SupportsGetSpanEvaluation
25
26
  from phoenix.trace.schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
26
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
27
27
  from phoenix.trace.span_json_encoder import span_to_json
28
28
 
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
30
+
29
31
  _SPAN_ID = "context.span_id"
30
32
  _PRESCRIBED_POSITION_PREFIXES = {
31
33
  RETRIEVAL_DOCUMENTS: "document_",
@@ -3,3 +3,7 @@ from phoenix.exceptions import PhoenixException
3
3
 
4
4
  class InvalidParquetMetadataError(PhoenixException):
5
5
  pass
6
+
7
+
8
+ class IncompatibleLibraryVersionError(PhoenixException):
9
+ pass
@@ -0,0 +1,3 @@
1
+ from .callback import OpenInferenceTraceCallbackHandler
2
+
3
+ __all__ = ["OpenInferenceTraceCallbackHandler"]
@@ -0,0 +1,77 @@
1
+ import logging
2
+ from importlib.metadata import PackageNotFoundError, version
3
+ from importlib.util import find_spec
4
+ from typing import Any
5
+
6
+ from opentelemetry import trace as trace_api
7
+ from opentelemetry.sdk import trace as trace_sdk
8
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9
+
10
+ from phoenix.trace.errors import IncompatibleLibraryVersionError
11
+ from phoenix.trace.exporter import _OpenInferenceExporter
12
+ from phoenix.trace.tracer import _show_deprecation_warnings
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ LLAMA_INDEX_MODERN_VERSION = (0, 10, 0)
17
+ INSTRUMENTATION_MODERN_VERSION = (1, 0, 0)
18
+
19
+
20
+ def _check_instrumentation_compatibility() -> bool:
21
+ if find_spec("llama_index") is None:
22
+ raise PackageNotFoundError("Missing `llama-index`. Install with `pip install llama-index`.")
23
+ # split the version string into a tuple of integers
24
+ llama_index_version_str = version("llama-index")
25
+ llama_index_version = tuple(map(int, llama_index_version_str.split(".")[:3]))
26
+ instrumentation_version_str = version("openinference-instrumentation-llama-index")
27
+ instrumentation_version = tuple(map(int, instrumentation_version_str.split(".")[:3]))
28
+ # check if the llama_index version is compatible with the instrumentation version
29
+ if (
30
+ llama_index_version < LLAMA_INDEX_MODERN_VERSION
31
+ and instrumentation_version >= INSTRUMENTATION_MODERN_VERSION
32
+ ):
33
+ raise IncompatibleLibraryVersionError(
34
+ f"llama-index v{llama_index_version_str} is not compatible with "
35
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
36
+ "Please either migrate llama-index to at least 0.10.0 or downgrade "
37
+ "openinference-instrumentation-llama-index via "
38
+ "`pip install 'openinference-instrumentation-llama-index<1.0.0'`."
39
+ )
40
+ elif (
41
+ llama_index_version >= LLAMA_INDEX_MODERN_VERSION
42
+ and instrumentation_version < INSTRUMENTATION_MODERN_VERSION
43
+ ):
44
+ raise IncompatibleLibraryVersionError(
45
+ f"llama-index v{llama_index_version_str} is not compatible with "
46
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
47
+ "Please upgrade openinference-instrumentation-llama-index to at least 1.0.0"
48
+ "`pip install 'openinference-instrumentation-llama-index>=1.0.0'`."
49
+ )
50
+ # if the versions are compatible, return True
51
+ return True
52
+
53
+
54
+ if _check_instrumentation_compatibility():
55
+ from openinference.instrumentation.llama_index._callback import (
56
+ OpenInferenceTraceCallbackHandler as _OpenInferenceTraceCallbackHandler,
57
+ )
58
+ from openinference.instrumentation.llama_index.version import (
59
+ __version__,
60
+ )
61
+
62
+
63
+ class OpenInferenceTraceCallbackHandler(_OpenInferenceTraceCallbackHandler):
64
+ """Callback handler for storing LLM application trace data in OpenInference format.
65
+ OpenInference is an open standard for capturing and storing AI model
66
+ inferences. It enables production LLMapp servers to seamlessly integrate
67
+ with LLM observability solutions such as Arize and Phoenix.
68
+
69
+ For more information on the specification, see
70
+ https://github.com/Arize-ai/openinference
71
+ """
72
+
73
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
74
+ _show_deprecation_warnings(self, *args, **kwargs)
75
+ tracer_provider = trace_sdk.TracerProvider()
76
+ tracer_provider.add_span_processor(SimpleSpanProcessor(_OpenInferenceExporter()))
77
+ super().__init__(trace_api.get_tracer(__name__, __version__, tracer_provider))
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  import json
2
3
  from binascii import hexlify, unhexlify
3
4
  from datetime import datetime, timezone
@@ -21,12 +22,23 @@ from typing import (
21
22
 
22
23
  import numpy as np
23
24
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
25
+ from openinference.semconv import trace
26
+ from openinference.semconv.trace import (
27
+ DocumentAttributes,
28
+ EmbeddingAttributes,
29
+ MessageAttributes,
30
+ SpanAttributes,
31
+ ToolCallAttributes,
32
+ )
24
33
  from opentelemetry.proto.common.v1.common_pb2 import AnyValue, ArrayValue, KeyValue
25
34
  from opentelemetry.util.types import Attributes, AttributeValue
26
35
  from typing_extensions import TypeAlias, assert_never
27
36
 
28
- import phoenix.trace.semantic_conventions as sem_conv
29
37
  from phoenix.trace.schemas import (
38
+ EXCEPTION_ESCAPED,
39
+ EXCEPTION_MESSAGE,
40
+ EXCEPTION_STACKTRACE,
41
+ EXCEPTION_TYPE,
30
42
  MimeType,
31
43
  Span,
32
44
  SpanContext,
@@ -37,18 +49,38 @@ from phoenix.trace.schemas import (
37
49
  SpanStatusCode,
38
50
  TraceID,
39
51
  )
40
- from phoenix.trace.semantic_conventions import (
41
- DOCUMENT_METADATA,
42
- EXCEPTION_ESCAPED,
43
- EXCEPTION_MESSAGE,
44
- EXCEPTION_STACKTRACE,
45
- EXCEPTION_TYPE,
46
- INPUT_MIME_TYPE,
47
- LLM_PROMPT_TEMPLATE_VARIABLES,
48
- OPENINFERENCE_SPAN_KIND,
49
- OUTPUT_MIME_TYPE,
50
- TOOL_PARAMETERS,
51
- )
52
+
53
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
54
+ DOCUMENT_ID = DocumentAttributes.DOCUMENT_ID
55
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
56
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
57
+ EMBEDDING_MODEL_NAME = SpanAttributes.EMBEDDING_MODEL_NAME
58
+ EMBEDDING_TEXT = EmbeddingAttributes.EMBEDDING_TEXT
59
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
60
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
61
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
62
+ LLM_INPUT_MESSAGES = SpanAttributes.LLM_INPUT_MESSAGES
63
+ LLM_INVOCATION_PARAMETERS = SpanAttributes.LLM_INVOCATION_PARAMETERS
64
+ LLM_MODEL_NAME = SpanAttributes.LLM_MODEL_NAME
65
+ LLM_OUTPUT_MESSAGES = SpanAttributes.LLM_OUTPUT_MESSAGES
66
+ LLM_PROMPTS = SpanAttributes.LLM_PROMPTS
67
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
68
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
69
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
70
+ MESSAGE_CONTENT = MessageAttributes.MESSAGE_CONTENT
71
+ MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = MessageAttributes.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON
72
+ MESSAGE_FUNCTION_CALL_NAME = MessageAttributes.MESSAGE_FUNCTION_CALL_NAME
73
+ MESSAGE_ROLE = MessageAttributes.MESSAGE_ROLE
74
+ MESSAGE_TOOL_CALLS = MessageAttributes.MESSAGE_TOOL_CALLS
75
+ OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND
76
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
77
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
78
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
79
+ TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON
80
+ TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME
81
+ TOOL_PARAMETERS = SpanAttributes.TOOL_PARAMETERS
82
+ LLM_PROMPT_TEMPLATE = SpanAttributes.LLM_PROMPT_TEMPLATE
83
+ LLM_PROMPT_TEMPLATE_VARIABLES = SpanAttributes.LLM_PROMPT_TEMPLATE_VARIABLES
52
84
 
53
85
 
54
86
  def decode(otlp_span: otlp.Span) -> Span:
@@ -186,7 +218,13 @@ def _decode_status(otlp_status: otlp.Status) -> Tuple[SpanStatusCode, StatusMess
186
218
 
187
219
 
188
220
  _SEMANTIC_CONVENTIONS: List[str] = sorted(
189
- (getattr(sem_conv, name) for name in dir(sem_conv) if name.isupper()),
221
+ (
222
+ getattr(klass, attr)
223
+ for name in dir(trace)
224
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
225
+ for attr in dir(klass)
226
+ if attr.isupper()
227
+ ),
190
228
  reverse=True,
191
229
  ) # sorted so the longer strings go first
192
230
 
@@ -4,12 +4,10 @@ from enum import Enum
4
4
  from typing import Any, Dict, List, Optional, Union
5
5
  from uuid import UUID
6
6
 
7
- from phoenix.trace.semantic_conventions import (
8
- EXCEPTION_ESCAPED,
9
- EXCEPTION_MESSAGE,
10
- EXCEPTION_STACKTRACE,
11
- EXCEPTION_TYPE,
12
- )
7
+ EXCEPTION_TYPE = "exception.type"
8
+ EXCEPTION_MESSAGE = "exception.message"
9
+ EXCEPTION_ESCAPED = "exception.escaped"
10
+ EXCEPTION_STACKTRACE = "exception.stacktrace"
13
11
 
14
12
 
15
13
  class SpanStatusCode(Enum):
@@ -2,7 +2,10 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import Any, Dict, Optional
4
4
 
5
+ from openinference.semconv.trace import SpanAttributes
6
+
5
7
  from phoenix.trace.schemas import (
8
+ EXCEPTION_MESSAGE,
6
9
  MimeType,
7
10
  Span,
8
11
  SpanContext,
@@ -14,11 +17,9 @@ from phoenix.trace.schemas import (
14
17
  SpanStatusCode,
15
18
  TraceID,
16
19
  )
17
- from phoenix.trace.semantic_conventions import (
18
- EXCEPTION_MESSAGE,
19
- INPUT_MIME_TYPE,
20
- OUTPUT_MIME_TYPE,
21
- )
20
+
21
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
22
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
22
23
 
23
24
 
24
25
  def json_to_attributes(obj: Optional[Dict[str, Any]]) -> Dict[str, Any]:
@@ -5,12 +5,7 @@ from enum import Enum
5
5
  from typing import Any, List
6
6
  from uuid import UUID
7
7
 
8
- from .schemas import (
9
- Span,
10
- SpanContext,
11
- SpanConversationAttributes,
12
- SpanEvent,
13
- )
8
+ from phoenix.trace.schemas import Span, SpanContext, SpanConversationAttributes, SpanEvent
14
9
 
15
10
 
16
11
  class SpanJSONEncoder(json.JSONEncoder):