arize-phoenix 0.0.39__tar.gz → 0.0.41__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (143) hide show
  1. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/PKG-INFO +13 -12
  2. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/README.md +12 -11
  3. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/__init__.py +1 -1
  4. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/traces.py +1 -1
  5. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/functions/binary.py +8 -0
  6. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/retrievals.py +3 -1
  7. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/index.js +357 -357
  8. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/fixtures.py +9 -2
  9. arize_phoenix-0.0.41/src/phoenix/trace/langchain/__init__.py +4 -0
  10. arize_phoenix-0.0.41/src/phoenix/trace/langchain/instrumentor.py +37 -0
  11. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/llama_index/callback.py +26 -11
  12. arize_phoenix-0.0.39/src/phoenix/trace/langchain/__init__.py +0 -3
  13. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/.gitignore +0 -0
  14. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/IP_NOTICE +0 -0
  15. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/LICENSE +0 -0
  16. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/pyproject.toml +0 -0
  17. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/config.py +0 -0
  18. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/__init__.py +0 -0
  19. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/embedding_dimension.py +0 -0
  20. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/model.py +0 -0
  21. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/model_schema.py +0 -0
  22. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/core/model_schema_adapter.py +0 -0
  23. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/__init__.py +0 -0
  24. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/dataset.py +0 -0
  25. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/errors.py +0 -0
  26. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/fixtures.py +0 -0
  27. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/schema.py +0 -0
  28. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datasets/validation.py +0 -0
  29. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/datetime_utils.py +0 -0
  30. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/__init__.py +0 -0
  31. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/__init__.py +0 -0
  32. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/functions/__init__.py +0 -0
  33. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/functions/common.py +0 -0
  34. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/functions/generate.py +0 -0
  35. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/models/__init__.py +0 -0
  36. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/models/base.py +0 -0
  37. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/models/openai.py +0 -0
  38. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/models/vertexai.py +0 -0
  39. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/templates/__init__.py +0 -0
  40. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/templates/default_templates.py +0 -0
  41. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/templates/template.py +0 -0
  42. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/utils/__init__.py +0 -0
  43. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/utils/downloads.py +0 -0
  44. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/utils/threads.py +0 -0
  45. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/utils/types.py +0 -0
  46. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/experimental/evals/utils.py +0 -0
  47. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/README.md +0 -0
  48. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/__init__.py +0 -0
  49. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/binning.py +0 -0
  50. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/metrics.py +0 -0
  51. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/mixins.py +0 -0
  52. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/timeseries.py +0 -0
  53. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/metrics/wrappers.py +0 -0
  54. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/pointcloud/__init__.py +0 -0
  55. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/pointcloud/clustering.py +0 -0
  56. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/pointcloud/pointcloud.py +0 -0
  57. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/pointcloud/projectors.py +0 -0
  58. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/py.typed +0 -0
  59. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/__init__.py +0 -0
  60. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/__init__.py +0 -0
  61. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/context.py +0 -0
  62. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/helpers.py +0 -0
  63. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/ClusterInput.py +0 -0
  64. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/Coordinates.py +0 -0
  65. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/DataQualityMetricInput.py +0 -0
  66. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/DimensionFilter.py +0 -0
  67. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/DimensionInput.py +0 -0
  68. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/Granularity.py +0 -0
  69. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/PerformanceMetricInput.py +0 -0
  70. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/SpanSort.py +0 -0
  71. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/TimeRange.py +0 -0
  72. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/input_types/__init__.py +0 -0
  73. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/interceptor.py +0 -0
  74. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/schema.py +0 -0
  75. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Cluster.py +0 -0
  76. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DataQualityMetric.py +0 -0
  77. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Dataset.py +0 -0
  78. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DatasetInfo.py +0 -0
  79. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DatasetRole.py +0 -0
  80. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DatasetValues.py +0 -0
  81. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Dimension.py +0 -0
  82. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DimensionDataType.py +0 -0
  83. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DimensionShape.py +0 -0
  84. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DimensionType.py +0 -0
  85. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/DimensionWithValue.py +0 -0
  86. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/EmbeddingDimension.py +0 -0
  87. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/EmbeddingMetadata.py +0 -0
  88. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Event.py +0 -0
  89. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/EventMetadata.py +0 -0
  90. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/ExportEventsMutation.py +0 -0
  91. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/ExportedFile.py +0 -0
  92. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Functionality.py +0 -0
  93. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/MimeType.py +0 -0
  94. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Model.py +0 -0
  95. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/NumericRange.py +0 -0
  96. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/PerformanceMetric.py +0 -0
  97. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/PromptResponse.py +0 -0
  98. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Retrieval.py +0 -0
  99. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/ScalarDriftMetricEnum.py +0 -0
  100. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Segments.py +0 -0
  101. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/SortDir.py +0 -0
  102. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/Span.py +0 -0
  103. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/TimeSeries.py +0 -0
  104. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/UMAPPoints.py +0 -0
  105. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/VectorDriftMetricEnum.py +0 -0
  106. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/__init__.py +0 -0
  107. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/node.py +0 -0
  108. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/api/types/pagination.py +0 -0
  109. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/app.py +0 -0
  110. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/main.py +0 -0
  111. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/span_handler.py +0 -0
  112. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-114x114.png +0 -0
  113. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-120x120.png +0 -0
  114. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-144x144.png +0 -0
  115. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-152x152.png +0 -0
  116. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-180x180.png +0 -0
  117. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-72x72.png +0 -0
  118. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon-76x76.png +0 -0
  119. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/apple-touch-icon.png +0 -0
  120. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/favicon.ico +0 -0
  121. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/index.css +0 -0
  122. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/static/modernizr.js +0 -0
  123. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/templates/__init__.py +0 -0
  124. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/templates/index.html +0 -0
  125. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/server/thread_server.py +0 -0
  126. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/services.py +0 -0
  127. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/session/__init__.py +0 -0
  128. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/session/session.py +0 -0
  129. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/__init__.py +0 -0
  130. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/exporter.py +0 -0
  131. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/filter.py +0 -0
  132. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/langchain/tracer.py +0 -0
  133. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/llama_index/__init__.py +0 -0
  134. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/schemas.py +0 -0
  135. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/semantic_conventions.py +0 -0
  136. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/span_json_decoder.py +0 -0
  137. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/span_json_encoder.py +0 -0
  138. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/trace_dataset.py +0 -0
  139. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/tracer.py +0 -0
  140. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/utils.py +0 -0
  141. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/v1/__init__.py +0 -0
  142. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/v1/trace_pb2.py +0 -0
  143. {arize_phoenix-0.0.39 → arize_phoenix-0.0.41}/src/phoenix/trace/v1/trace_pb2.pyi +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 0.0.39
3
+ Version: 0.0.41
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -82,16 +82,16 @@ Description-Content-Type: text/markdown
82
82
 
83
83
  Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config observability. Phoenix provides a notebook-first experience for monitoring your models and LLM Applications by providing:
84
84
 
85
- - **LLM App Tracing** - Trace through the execution of your LLM Application to understand the internals of your LLM Application and to troubleshoot problems related to things like retrieval and tool execution.
85
+ - **LLM Traces** - Trace through the execution of your LLM Application to understand the internals of your LLM Application and to troubleshoot problems related to things like retrieval and tool execution.
86
86
  - **LLM Evals** - Leverage the power of large language models to evaluate your generative model or application's relevance, toxicity, and more.
87
87
  - **Embedding Analysis** - Explore embedding point-clouds and identify clusters of high drift and performance degradation.
88
- - **RAG Introspection** - Visualize your generative application's search and retrieval process to solve improve your retrieval augmented generation.
88
+ - **RAG Analysis** - Visualize your generative application's search and retrieval process to solve improve your retrieval-augmented generation.
89
89
  - **Structured Data Analysis** - Statistically analyze your structured data by performing A/B analysis, temporal drift analysis, and more.
90
90
 
91
91
  **Table of Contents**
92
92
 
93
93
  - [Installation](#installation)
94
- - [LLM App Tracing](#llm-app-tracing)
94
+ - [LLM Traces](#llm-traces)
95
95
  - [Tracing with LlamaIndex](#tracing-with-llamaindex)
96
96
  - [Tracing with LangChain](#tracing-with-langchain)
97
97
  - [LLM Evals](#llm-evals)
@@ -99,7 +99,7 @@ Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config o
99
99
  - [UMAP-based Exploratory Data Analysis](#umap-based-exploratory-data-analysis)
100
100
  - [Cluster-driven Drift and Performance Analysis](#cluster-driven-drift-and-performance-analysis)
101
101
  - [Exportable Clusters](#exportable-clusters)
102
- - [RAG Introspection](#rag-introspection)
102
+ - [Retrieval-Augmented Generation Analysis](#retrieval-augmented-generation-analysis)
103
103
  - [Structured Data Analysis](#structured-data-analysis)
104
104
  - [Community](#community)
105
105
  - [Thanks](#thanks)
@@ -119,7 +119,7 @@ Some functionality such as LLM evals are under the `experimental` subpackage.
119
119
  pip install arize-phoenix[experimental]
120
120
  ```
121
121
 
122
- ## LLM App Tracing
122
+ ## LLM Traces
123
123
 
124
124
  ![LLM Application Tracing](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/langchain_rag_stuff_documents_chain_10mb.gif?raw=true)
125
125
 
@@ -129,7 +129,7 @@ With the advent of powerful LLMs, it is now possible to build LLM Applications t
129
129
 
130
130
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/tracing/llama_index_tracing_tutorial.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/tracing/llama_index_tracing_tutorial.ipynb)
131
131
 
132
- ![LLM App Tracing UI](https://storage.googleapis.com/arize-assets/phoenix/assets/images/trace_details_view.png)
132
+ ![LLM Traces UI](https://storage.googleapis.com/arize-assets/phoenix/assets/images/trace_details_view.png)
133
133
 
134
134
  To extract traces from your LlamaIndex application, you will have to add Phoenix's `OpenInferenceTraceCallback` to your LlamaIndex application. A callback (in this case an OpenInference `Tracer`) is a class that automatically accumulates `spans` that trac your application as it executes. The OpenInference `Tracer` is a tracer that is specifically designed to work with Phoenix and by default exports the traces to a locally running phoenix server.
135
135
 
@@ -199,12 +199,13 @@ import pandas as pd
199
199
  # Launch phoenix
200
200
  session = px.launch_app()
201
201
 
202
- # Once you have started a Phoenix server, you can start your LangChain application with the OpenInference Tracer as a callback. To do this, you will have to add the tracer to the initialization of your LangChain application:
202
+ # Once you have started a Phoenix server, you can start your LangChain application with the OpenInferenceTracer as a callback. To do this, you will have to instrument your LangChain application with the tracer:
203
203
 
204
- from phoenix.trace.langchain import OpenInferenceTracer
204
+ from phoenix.trace.langchain import OpenInferenceTracer, LangChainInstrumentor
205
205
 
206
206
  # If no exporter is specified, the tracer will export to the locally running Phoenix server
207
207
  tracer = OpenInferenceTracer()
208
+ LangChainInstrumentor(tracer).instrument()
208
209
 
209
210
  # Initialize your LangChain application
210
211
  from langchain.chains import RetrievalQA
@@ -367,13 +368,13 @@ Break-apart your data into clusters of high drift or bad performance using HDBSC
367
368
 
368
369
  Export your clusters to `parquet` files or dataframes for further analysis and fine-tuning.
369
370
 
370
- ## RAG Introspection
371
+ ## Retrieval-Augmented Generation Analysis
371
372
 
372
373
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/llama_index_search_and_retrieval_tutorial.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/llama_index_search_and_retrieval_tutorial.ipynb)
373
374
 
374
- ![RAG Introspection](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/corpus_search_and_retrieval.gif?raw=true)
375
+ ![RAG Analysis](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/corpus_search_and_retrieval.gif?raw=true)
375
376
 
376
- Search and retrieval is a critical component of many LLM Applications as it allows you to extend the LLM's capabilities to encompass knowledge about private data. This process is known as RAG (retrieval augmented generation) and often times a vector store is leveraged to store chunks of documents encoded as embeddings so that they can be retrieved at inference time.
377
+ Search and retrieval is a critical component of many LLM Applications as it allows you to extend the LLM's capabilities to encompass knowledge about private data. This process is known as RAG (retrieval-augmented generation) and often times a vector store is leveraged to store chunks of documents encoded as embeddings so that they can be retrieved at inference time.
377
378
 
378
379
  To help you better understand your RAG application, Phoenix allows you to upload a corpus of your knowledge base along with your LLM application's inferences to help you troubleshoot hard to find bugs with retrieval.
379
380
 
@@ -28,16 +28,16 @@
28
28
 
29
29
  Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config observability. Phoenix provides a notebook-first experience for monitoring your models and LLM Applications by providing:
30
30
 
31
- - **LLM App Tracing** - Trace through the execution of your LLM Application to understand the internals of your LLM Application and to troubleshoot problems related to things like retrieval and tool execution.
31
+ - **LLM Traces** - Trace through the execution of your LLM Application to understand the internals of your LLM Application and to troubleshoot problems related to things like retrieval and tool execution.
32
32
  - **LLM Evals** - Leverage the power of large language models to evaluate your generative model or application's relevance, toxicity, and more.
33
33
  - **Embedding Analysis** - Explore embedding point-clouds and identify clusters of high drift and performance degradation.
34
- - **RAG Introspection** - Visualize your generative application's search and retrieval process to solve improve your retrieval augmented generation.
34
+ - **RAG Analysis** - Visualize your generative application's search and retrieval process to solve improve your retrieval-augmented generation.
35
35
  - **Structured Data Analysis** - Statistically analyze your structured data by performing A/B analysis, temporal drift analysis, and more.
36
36
 
37
37
  **Table of Contents**
38
38
 
39
39
  - [Installation](#installation)
40
- - [LLM App Tracing](#llm-app-tracing)
40
+ - [LLM Traces](#llm-traces)
41
41
  - [Tracing with LlamaIndex](#tracing-with-llamaindex)
42
42
  - [Tracing with LangChain](#tracing-with-langchain)
43
43
  - [LLM Evals](#llm-evals)
@@ -45,7 +45,7 @@ Phoenix provides MLOps and LLMOps insights at lightning speed with zero-config o
45
45
  - [UMAP-based Exploratory Data Analysis](#umap-based-exploratory-data-analysis)
46
46
  - [Cluster-driven Drift and Performance Analysis](#cluster-driven-drift-and-performance-analysis)
47
47
  - [Exportable Clusters](#exportable-clusters)
48
- - [RAG Introspection](#rag-introspection)
48
+ - [Retrieval-Augmented Generation Analysis](#retrieval-augmented-generation-analysis)
49
49
  - [Structured Data Analysis](#structured-data-analysis)
50
50
  - [Community](#community)
51
51
  - [Thanks](#thanks)
@@ -65,7 +65,7 @@ Some functionality such as LLM evals are under the `experimental` subpackage.
65
65
  pip install arize-phoenix[experimental]
66
66
  ```
67
67
 
68
- ## LLM App Tracing
68
+ ## LLM Traces
69
69
 
70
70
  ![LLM Application Tracing](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/langchain_rag_stuff_documents_chain_10mb.gif?raw=true)
71
71
 
@@ -75,7 +75,7 @@ With the advent of powerful LLMs, it is now possible to build LLM Applications t
75
75
 
76
76
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/tracing/llama_index_tracing_tutorial.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/tracing/llama_index_tracing_tutorial.ipynb)
77
77
 
78
- ![LLM App Tracing UI](https://storage.googleapis.com/arize-assets/phoenix/assets/images/trace_details_view.png)
78
+ ![LLM Traces UI](https://storage.googleapis.com/arize-assets/phoenix/assets/images/trace_details_view.png)
79
79
 
80
80
  To extract traces from your LlamaIndex application, you will have to add Phoenix's `OpenInferenceTraceCallback` to your LlamaIndex application. A callback (in this case an OpenInference `Tracer`) is a class that automatically accumulates `spans` that trac your application as it executes. The OpenInference `Tracer` is a tracer that is specifically designed to work with Phoenix and by default exports the traces to a locally running phoenix server.
81
81
 
@@ -145,12 +145,13 @@ import pandas as pd
145
145
  # Launch phoenix
146
146
  session = px.launch_app()
147
147
 
148
- # Once you have started a Phoenix server, you can start your LangChain application with the OpenInference Tracer as a callback. To do this, you will have to add the tracer to the initialization of your LangChain application:
148
+ # Once you have started a Phoenix server, you can start your LangChain application with the OpenInferenceTracer as a callback. To do this, you will have to instrument your LangChain application with the tracer:
149
149
 
150
- from phoenix.trace.langchain import OpenInferenceTracer
150
+ from phoenix.trace.langchain import OpenInferenceTracer, LangChainInstrumentor
151
151
 
152
152
  # If no exporter is specified, the tracer will export to the locally running Phoenix server
153
153
  tracer = OpenInferenceTracer()
154
+ LangChainInstrumentor(tracer).instrument()
154
155
 
155
156
  # Initialize your LangChain application
156
157
  from langchain.chains import RetrievalQA
@@ -313,13 +314,13 @@ Break-apart your data into clusters of high drift or bad performance using HDBSC
313
314
 
314
315
  Export your clusters to `parquet` files or dataframes for further analysis and fine-tuning.
315
316
 
316
- ## RAG Introspection
317
+ ## Retrieval-Augmented Generation Analysis
317
318
 
318
319
  [![Open in Colab](https://img.shields.io/static/v1?message=Open%20in%20Colab&logo=googlecolab&labelColor=grey&color=blue&logoColor=orange&label=%20)](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/llama_index_search_and_retrieval_tutorial.ipynb) [![Open in GitHub](https://img.shields.io/static/v1?message=Open%20in%20GitHub&logo=github&labelColor=grey&color=blue&logoColor=white&label=%20)](https://github.com/Arize-ai/phoenix/blob/main/tutorials/llama_index_search_and_retrieval_tutorial.ipynb)
319
320
 
320
- ![RAG Introspection](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/corpus_search_and_retrieval.gif?raw=true)
321
+ ![RAG Analysis](https://github.com/Arize-ai/phoenix-assets/blob/main/gifs/corpus_search_and_retrieval.gif?raw=true)
321
322
 
322
- Search and retrieval is a critical component of many LLM Applications as it allows you to extend the LLM's capabilities to encompass knowledge about private data. This process is known as RAG (retrieval augmented generation) and often times a vector store is leveraged to store chunks of documents encoded as embeddings so that they can be retrieved at inference time.
323
+ Search and retrieval is a critical component of many LLM Applications as it allows you to extend the LLM's capabilities to encompass knowledge about private data. This process is known as RAG (retrieval-augmented generation) and often times a vector store is leveraged to store chunks of documents encoded as embeddings so that they can be retrieved at inference time.
323
324
 
324
325
  To help you better understand your RAG application, Phoenix allows you to upload a corpus of your knowledge base along with your LLM application's inferences to help you troubleshoot hard to find bugs with retrieval.
325
326
 
@@ -5,7 +5,7 @@ from .session.session import Session, active_session, close_app, launch_app
5
5
  from .trace.fixtures import load_example_traces
6
6
  from .trace.trace_dataset import TraceDataset
7
7
 
8
- __version__ = "0.0.39"
8
+ __version__ = "0.0.41"
9
9
 
10
10
  # module level doc-string
11
11
  __doc__ = """
@@ -188,7 +188,7 @@ class Traces:
188
188
  def token_count_total(self) -> int:
189
189
  count = 0
190
190
  for span in self._spans.values():
191
- count += span[LLM_TOKEN_COUNT_COMPLETION] or 0
191
+ count += span[LLM_TOKEN_COUNT_TOTAL] or 0
192
192
  return count
193
193
 
194
194
  @property
@@ -183,8 +183,16 @@ def _extract_rail(string: str, positive_rail: str, negative_rail: str) -> Option
183
183
 
184
184
  string = "regular..irregular" - contains both rails
185
185
  Output: None
186
+
187
+ string = "Irregular"
188
+ Output: "irregular"
186
189
  """
187
190
 
191
+ # Convert the inputs to lowercase for case-insensitive matching
192
+ string = string.lower()
193
+ positive_rail = positive_rail.lower()
194
+ negative_rail = negative_rail.lower()
195
+
188
196
  positive_pos, negative_pos = string.find(positive_rail), string.find(negative_rail)
189
197
 
190
198
  # If both positive and negative rails are in the string
@@ -4,7 +4,6 @@ Helper functions for evaluating the retrieval step of retrieval-augmented genera
4
4
 
5
5
  from typing import List, Optional
6
6
 
7
- from openai import ChatCompletion
8
7
  from tenacity import (
9
8
  retry,
10
9
  stop_after_attempt,
@@ -75,6 +74,9 @@ def classify_relevance(query: str, document: str, model_name: str) -> Optional[b
75
74
  (True meaning relevant, False meaning irrelevant), or None if the LLM produces an
76
75
  unparseable output.
77
76
  """
77
+
78
+ from openai import ChatCompletion
79
+
78
80
  prompt = _QUERY_CONTEXT_PROMPT_TEMPLATE.format(
79
81
  query=query,
80
82
  reference=document,