arize-phoenix 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arize-phoenix
3
- Version: 3.0.1
3
+ Version: 3.0.3
4
4
  Summary: ML Observability in your notebook
5
5
  Project-URL: Documentation, https://docs.arize.com/phoenix/
6
6
  Project-URL: Issues, https://github.com/Arize-ai/phoenix/issues
@@ -23,6 +23,7 @@ Requires-Dist: numpy
23
23
  Requires-Dist: openinference-instrumentation-langchain
24
24
  Requires-Dist: openinference-instrumentation-llama-index
25
25
  Requires-Dist: openinference-instrumentation-openai
26
+ Requires-Dist: openinference-semantic-conventions
26
27
  Requires-Dist: opentelemetry-exporter-otlp
27
28
  Requires-Dist: opentelemetry-proto
28
29
  Requires-Dist: opentelemetry-sdk
@@ -50,7 +51,7 @@ Requires-Dist: hatch; extra == 'dev'
50
51
  Requires-Dist: jupyter; extra == 'dev'
51
52
  Requires-Dist: langchain>=0.0.334; extra == 'dev'
52
53
  Requires-Dist: litellm>=1.0.3; extra == 'dev'
53
- Requires-Dist: llama-index>=0.9.14; extra == 'dev'
54
+ Requires-Dist: llama-index<0.10.0; extra == 'dev'
54
55
  Requires-Dist: nbqa; extra == 'dev'
55
56
  Requires-Dist: pandas-stubs<=2.0.2.230605; extra == 'dev'
56
57
  Requires-Dist: pre-commit; extra == 'dev'
@@ -64,6 +65,7 @@ Provides-Extra: experimental
64
65
  Requires-Dist: tenacity; extra == 'experimental'
65
66
  Provides-Extra: llama-index
66
67
  Requires-Dist: llama-index==0.9.45; extra == 'llama-index'
68
+ Requires-Dist: openinference-instrumentation-llama-index==0.1.3; extra == 'llama-index'
67
69
  Description-Content-Type: text/markdown
68
70
 
69
71
  <p align="center">
@@ -4,14 +4,14 @@ phoenix/datetime_utils.py,sha256=D955QLrkgrrSdUM6NyqbCeAu2SMsjhR5rHVQEsVUdng,277
4
4
  phoenix/exceptions.py,sha256=X5k9ipUDfwSCwZB-H5zFJLas86Gf9tAx0W4l5TZxp5k,108
5
5
  phoenix/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
6
6
  phoenix/services.py,sha256=f6AeyKTuOpy9RCcTCjVH3gx5nYZhbTMFOuv1WSUOB5o,4992
7
- phoenix/version.py,sha256=E3P6AbnCwaWk6ndR1zNqlOTVebX9z5rv9voltc71dos,22
7
+ phoenix/version.py,sha256=3PslnGRHLeT8kAWbhtBM110cQkzH_QzfQO5_B6lHOuU,22
8
8
  phoenix/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  phoenix/core/embedding_dimension.py,sha256=zKGbcvwOXgLf-yrJBpQyKtd-LEOPRKHnUToyAU8Owis,87
10
10
  phoenix/core/evals.py,sha256=gJyqQzpud5YjtoY8h4pgXvHDsdubGfqmEewLuZHPPmQ,10224
11
11
  phoenix/core/model.py,sha256=vQ6RxpUPlncezJvur5u6xBN0Lkrk2gW0cTyb-qqaSqA,4713
12
12
  phoenix/core/model_schema.py,sha256=rR9VdhL_oXxbprDTPQJBXs5hw5sMPQmzx__m6Kwsxug,50394
13
13
  phoenix/core/model_schema_adapter.py,sha256=3GkyzqUST4fYi-Bgs8qAam5hwMCdQRZTDLjZ9Bnzdm4,8268
14
- phoenix/core/traces.py,sha256=0-CApYIFoppwKGss70_-xX_s8FPI2ABfTyT7pyeI_eE,14007
14
+ phoenix/core/traces.py,sha256=I9xsQvEd4s7fZa1lquzBfnY7ZQoLcWwnT51wuuglnyM,14065
15
15
  phoenix/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  phoenix/datasets/dataset.py,sha256=scKVZ7zc6Dpc_ntt-pWhzY-KWqOJEwKePuyNnKSVTGE,30515
17
17
  phoenix/datasets/errors.py,sha256=cGp9vxnw4SewFoWBV3ZGMkhE0Kh73lPIv3Ppz_H_RoA,8261
@@ -23,7 +23,7 @@ phoenix/experimental/evals/__init__.py,sha256=q96YKLMt2GJD9zL8sjugvWx1INfw40Wa7E
23
23
  phoenix/experimental/evals/evaluators.py,sha256=r7fXrS-l4gn58SUhLAZSfY3P8lxysouSVJwHddrZJ_Q,15956
24
24
  phoenix/experimental/evals/retrievals.py,sha256=o3fqrsYbYZjyGj_jWkN_9VQVyXjLkDKDw5Ws7l8bwdI,3828
25
25
  phoenix/experimental/evals/functions/__init__.py,sha256=NNd0-_cmIopdV7vm3rspjfgM726qoQJ4DPq_vqbnaxQ,180
26
- phoenix/experimental/evals/functions/classify.py,sha256=6yCajPT9i98b4_2qYn9ZxGhdI3CLhfUSrEyUUcqQqmQ,19517
26
+ phoenix/experimental/evals/functions/classify.py,sha256=rIQLf0GRXPNT2d5Oni0lrpFrubBorAscjN0ievjWx9A,19092
27
27
  phoenix/experimental/evals/functions/executor.py,sha256=bM7PI2rcPukQQzZ2rWqN_-Kfo_a935YJj0bh1Red8Ps,13406
28
28
  phoenix/experimental/evals/functions/generate.py,sha256=8LnnPAjBM9yxitdkaGZ67OabuDTOWBF3fvinJ_uCFRg,5584
29
29
  phoenix/experimental/evals/functions/processing.py,sha256=F4xtLsulLV4a8CkuLldRddsCim75dSTIShEJUYN6I6w,1823
@@ -39,7 +39,7 @@ phoenix/experimental/evals/models/vertexai.py,sha256=_txsOP2RHyR3AnugeJRFUNvYm3x
39
39
  phoenix/experimental/evals/templates/__init__.py,sha256=GSJSoWJ4jwyoUANniidmWMUtXQhNQYbTJbfFqCvuYuo,1470
40
40
  phoenix/experimental/evals/templates/default_templates.py,sha256=dVKmoLwqgAyGcRuezz9WKnXSHhw7-qk1R8j6wSmqh0s,20722
41
41
  phoenix/experimental/evals/templates/template.py,sha256=ImFSaTPo9oalPNwq7cNdOCndrvuwLuIyIFKsgDVcoJE,6715
42
- phoenix/experimental/evals/utils/__init__.py,sha256=608EX7sG0f5oDG__II16J8xnFJiNpY9dI9AC8vXwR00,5601
42
+ phoenix/experimental/evals/utils/__init__.py,sha256=FE1tbb022t3pwn0CrprR7QXcIsgpsdsotV7-iB3khsI,5601
43
43
  phoenix/experimental/evals/utils/threads.py,sha256=ksI-egarPnlxit0qKKjtjZ2L82qGLxqxZ6s92O0eBA4,1005
44
44
  phoenix/metrics/README.md,sha256=5gekqTU-5gGdMwvcfNp2Wlu8p1ul9kGY_jq0XXQusoI,1964
45
45
  phoenix/metrics/__init__.py,sha256=sLp7td1GIt_0Z8dPUyP4L0-_4x9c871yAaGX30oMsvg,2433
@@ -70,7 +70,7 @@ phoenix/server/api/input_types/DimensionFilter.py,sha256=vcXgglSnZcB5pGh-6oEtRmG
70
70
  phoenix/server/api/input_types/DimensionInput.py,sha256=Vfx5FmiMKey4-EHDQsQRPzSAMRJMN5oVMLDUl4NKAa8,164
71
71
  phoenix/server/api/input_types/Granularity.py,sha256=6SVfZ5yTZYq1PI6vdpjfkBUc4YilLSkF-k6okuSNbbQ,2301
72
72
  phoenix/server/api/input_types/PerformanceMetricInput.py,sha256=fElsLTSEYYgGFGMYTEGcYid39tXUKFdV_JkdHavMcbA,591
73
- phoenix/server/api/input_types/SpanSort.py,sha256=9OxxFseY-9pwZ9rUJ71P-h_vpwYg5UAiy76x_pzHvPo,3537
73
+ phoenix/server/api/input_types/SpanSort.py,sha256=72oWhYLLIZLy7K5_tFhMg7RcmDgatqMB-ZoRJhHGIR0,3527
74
74
  phoenix/server/api/input_types/TimeRange.py,sha256=yzx-gxj8mDeGLft1FzU_x1MVEgIG5Pt6-f8PUVDgipQ,522
75
75
  phoenix/server/api/input_types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  phoenix/server/api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -109,7 +109,7 @@ phoenix/server/api/types/Retrieval.py,sha256=OhMK2ncjoyp5h1yjKhjlKpoTbQrMHuxmgSF
109
109
  phoenix/server/api/types/ScalarDriftMetricEnum.py,sha256=IUAcRPpgL41WdoIgK6cNk2Te38SspXGyEs-S1fY23_A,232
110
110
  phoenix/server/api/types/Segments.py,sha256=zogJI9MdmctBL7J-fDSR_8tUJLvuISlVYgCLnTaigKE,2937
111
111
  phoenix/server/api/types/SortDir.py,sha256=OUpXhlCzCxPoXSDkJJygEs9Rw9pMymfaZUG5zPTrw4Y,152
112
- phoenix/server/api/types/Span.py,sha256=QWWW2MmWD0bwiR-jAUtJJZ-GvZJhTFYq7qhus7tsz3A,11304
112
+ phoenix/server/api/types/Span.py,sha256=OvY4ffZa3KMnT3J9s0oLB0ghurYSJCWlK0YfoKkg-6s,11633
113
113
  phoenix/server/api/types/TimeSeries.py,sha256=QbLfxHnwYsMsirpq4tx9us6ha7YtAVzK4m8mAL3fMt0,5200
114
114
  phoenix/server/api/types/UMAPPoints.py,sha256=8l9RJXi308qty4MdHb2pBbiU6ZuLbrRRxXNbPhXoxKI,1639
115
115
  phoenix/server/api/types/ValidationResult.py,sha256=pHwdYk4J7SJ5xhlWWHg_6qWkfk4rjOx-bSkGHvkDE3Q,142
@@ -137,30 +137,28 @@ phoenix/session/data_extractor.py,sha256=V4ntT2FcmbV_-zobcbPR51HKWaDyMnXHj4Pmu7M
137
137
  phoenix/session/evaluation.py,sha256=DaAtA0XYJbXRJO_StGywa-9APlz2ORSmCXzxrtn3rvI,4997
138
138
  phoenix/session/session.py,sha256=mrTAQkUFMGjN65ApYqRjhGXExbfxu2ODJcJ14MSDOLE,21719
139
139
  phoenix/trace/__init__.py,sha256=4d_MqzUIFmlY9WWcFeTONJ4xL5mPGoWZaPM2TJ0ZDBQ,266
140
- phoenix/trace/errors.py,sha256=DbXSJnNErV7305tKv7pUWLD6jcVHJ6EBdSu4mZJ6IM4,112
140
+ phoenix/trace/errors.py,sha256=wB1z8qdPckngdfU-TORToekvg3344oNFAA83_hC2yFY,180
141
141
  phoenix/trace/evaluation_conventions.py,sha256=t8jydM3U0-T5YpiQKRJ3tWdWGlHtzKyttYdw-ddvPOk,1048
142
142
  phoenix/trace/exporter.py,sha256=O-9Arn-S_B9Me-jy4Qa84y6lvxKNoa8pczrsamPl3_Q,4871
143
143
  phoenix/trace/fixtures.py,sha256=LokNedhbGYxpzXznteO4m5QehvNYjzvoh231-CMJQeY,7113
144
- phoenix/trace/otel.py,sha256=lJoTvkCSFiPT4dFRLMZZgZfsOoTSATctW9lQk9J30ZI,14821
145
- phoenix/trace/schemas.py,sha256=QDBlM94faceTnloY_ZVo2BHWWnE5UGymefC0jmfj4ew,6011
146
- phoenix/trace/semantic_conventions.py,sha256=u6NG85ZhbreriZr8cqJaddldM_jUcew7JilszY7JUk8,4652
144
+ phoenix/trace/otel.py,sha256=naFWTwrqyIoeGJ5YMT9Fyhd44CEtQvpscVluDOXD6Fo,16744
145
+ phoenix/trace/schemas.py,sha256=zlAY8v-PLgEDqTgbmDxY3NJlCAmzj_3ZCJFebQOBz3M,6028
147
146
  phoenix/trace/span_evaluations.py,sha256=FvY9YRnKuYIzCa-H9P5SuDaI2DeqGnVCjNgclC2v3HA,12909
148
- phoenix/trace/span_json_decoder.py,sha256=nrIPkcgbCcNML-0OSjWC6fxIfBEMiP0n67yM_m-vegg,3068
149
- phoenix/trace/span_json_encoder.py,sha256=C5y7rkyOcV08oJC5t8TZqVxsKCZMJKad7bBQzAgLoDs,1763
150
- phoenix/trace/trace_dataset.py,sha256=KW0TzmhlKuX8PUPLV172iTK08myYE0QXUC75KiIqJ7k,13204
147
+ phoenix/trace/span_json_decoder.py,sha256=IAFakPRqSMYxTPKYFMiXYxm7U-FipdN8_xbvapDS0Qc,3131
148
+ phoenix/trace/span_json_encoder.py,sha256=hIDd1I6xm01kaNmeKjHOHyxUGI3uTg5J_Os1kXtAb6g,1755
149
+ phoenix/trace/trace_dataset.py,sha256=DF4JH3lq7ULsw6sGo6c4SULChoxNSRJA4knQXfSrTR8,13485
151
150
  phoenix/trace/tracer.py,sha256=JDKlyvjy6AsQmaA60ycJ1hKXoUQU61jqPx3nvYr8xUc,3647
152
151
  phoenix/trace/utils.py,sha256=7LurVGXn245cjj4MJsc7v6jq4DSJkpK6YGBfIaSywuw,1307
153
152
  phoenix/trace/dsl/__init__.py,sha256=WIQIjJg362XD3s50OsPJJ0xbDsGp41bSv7vDllLrPuA,144
154
- phoenix/trace/dsl/filter.py,sha256=VwlzL4KsIoyYjjCSKXlOSyVm49WyYRlkK5zedXqZlcc,13136
155
- phoenix/trace/dsl/helpers.py,sha256=qxRS65NFUFVpLDd8IZVrZUH891MamLeXyfIIN1BJJ_c,1552
153
+ phoenix/trace/dsl/filter.py,sha256=7m1v9RtZsAzg-E8iXKLvVSlCJLPtnUapzEIb-nrygsM,13208
154
+ phoenix/trace/dsl/helpers.py,sha256=U71HmCecJQ_zHq0g2mFKbxrRuiJDkopskWMpFoBNP-Y,1722
156
155
  phoenix/trace/dsl/missing.py,sha256=BWPOHr2_tBkPDgVeq8GVXXVbNbJiBelu4NtwHBg6mTE,1435
157
- phoenix/trace/dsl/query.py,sha256=W6_e1o_JbucGWWv6pSUQFyIHcTuj7QuCBSuKXodRfCE,14773
156
+ phoenix/trace/dsl/query.py,sha256=XoFwKEALzGqUERy7B5fgD-n0s87zN6jRVrZgW6-jqRo,14819
158
157
  phoenix/trace/langchain/__init__.py,sha256=aTKMFmEOgjx_6dnyplalgYi7PQnetablwrwpcUZqcGE,764
159
158
  phoenix/trace/langchain/instrumentor.py,sha256=rmqdaEh2rwyZnaddns4RC2WQnj7TbCnJjETgoiwWvzs,1163
160
159
  phoenix/trace/langchain/tracer.py,sha256=fSjtPc5VxaZK63x3ob3DAW7eM2kfOZWgbSQ9dmhhtsw,1424
161
- phoenix/trace/llama_index/__init__.py,sha256=wCcQgD9CG5TA8i-1XsSed4ZzwHTUmqZwegQAV_FqEng,178
162
- phoenix/trace/llama_index/callback.py,sha256=MHgUNJSG6ecxZ8SZ_9LdcKnAAWmNI-U5pySFyXqubjM,1694
163
- phoenix/trace/llama_index/debug_callback.py,sha256=SKToD9q_QADSGTJ5lhilqRVKaUnUSRXUvURCzN4by2U,1367
160
+ phoenix/trace/llama_index/__init__.py,sha256=4fpR5702Qh2t5TaXIx584EkA-BveCPftXPOKvI0Oi3I,105
161
+ phoenix/trace/llama_index/callback.py,sha256=YKCZZtFwAwasZt9qNSJO6dj97Jt2eN_PBakUYWrCleE,3574
164
162
  phoenix/trace/openai/__init__.py,sha256=J3G0uqCxGdksUpaQVHds_Egv2drvh8UEqoLjiQAOveg,79
165
163
  phoenix/trace/openai/instrumentor.py,sha256=Lre96YfPxs5iM3frZfwdAh94LUpyYw_RZpSF0XmPtKw,1061
166
164
  phoenix/trace/v1/__init__.py,sha256=-IbAD0ruESMjvQLvGAg9CTfjBUATFDx1OXseDPis6-0,88
@@ -169,8 +167,8 @@ phoenix/trace/v1/evaluation_pb2.pyi,sha256=cCbbx06gwQmaH14s3J1X25TtaARh-k1abbxQd
169
167
  phoenix/utilities/__init__.py,sha256=3TVirVnjIGyaCFuJCqeZO4tjlzQ_chZgYM0itIwsEpE,656
170
168
  phoenix/utilities/error_handling.py,sha256=7b5rpGFj9EWZ8yrZK1IHvxB89suWk3lggDayUQcvZds,1946
171
169
  phoenix/utilities/logging.py,sha256=lDXd6EGaamBNcQxL4vP1au9-i_SXe0OraUDiJOcszSw,222
172
- arize_phoenix-3.0.1.dist-info/METADATA,sha256=MIwob2psMophui1KBYcjRPTmKinVvD9YIY0aDqRE5V0,28556
173
- arize_phoenix-3.0.1.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87
174
- arize_phoenix-3.0.1.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
175
- arize_phoenix-3.0.1.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
176
- arize_phoenix-3.0.1.dist-info/RECORD,,
170
+ arize_phoenix-3.0.3.dist-info/METADATA,sha256=LZ4BcSMrV3NFDcOfufwwutdTnKMYt6cBBB7t_BJ02b8,28693
171
+ arize_phoenix-3.0.3.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87
172
+ arize_phoenix-3.0.3.dist-info/licenses/IP_NOTICE,sha256=JBqyyCYYxGDfzQ0TtsQgjts41IJoa-hiwDrBjCb9gHM,469
173
+ arize_phoenix-3.0.3.dist-info/licenses/LICENSE,sha256=HFkW9REuMOkvKRACuwLPT0hRydHb3zNg-fdFt94td18,3794
174
+ arize_phoenix-3.0.3.dist-info/RECORD,,
phoenix/core/traces.py CHANGED
@@ -20,12 +20,13 @@ from typing import (
20
20
 
21
21
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
22
22
  from ddsketch import DDSketch
23
+ from openinference.semconv.trace import SpanAttributes
23
24
  from sortedcontainers import SortedKeyList
24
25
  from typing_extensions import TypeAlias
25
26
  from wrapt import ObjectProxy
26
27
 
28
+ import phoenix.trace.schemas
27
29
  from phoenix.datetime_utils import right_open_time_range
28
- from phoenix.trace import semantic_conventions
29
30
  from phoenix.trace.otel import decode
30
31
  from phoenix.trace.schemas import (
31
32
  ATTRIBUTE_PREFIX,
@@ -33,12 +34,10 @@ from phoenix.trace.schemas import (
33
34
  CONTEXT_PREFIX,
34
35
  ComputedAttributes,
35
36
  Span,
36
- SpanAttributes,
37
37
  SpanID,
38
38
  SpanStatusCode,
39
39
  TraceID,
40
40
  )
41
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
42
41
 
43
42
  END_OF_QUEUE = None # sentinel value for queue termination
44
43
 
@@ -50,9 +49,9 @@ SPAN_ID = CONTEXT_PREFIX + "span_id"
50
49
  PARENT_ID = "parent_id"
51
50
  START_TIME = "start_time"
52
51
  END_TIME = "end_time"
53
- LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_TOTAL
54
- LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_PROMPT
55
- LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
52
+ LLM_TOKEN_COUNT_TOTAL = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_TOTAL
53
+ LLM_TOKEN_COUNT_PROMPT = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_PROMPT
54
+ LLM_TOKEN_COUNT_COMPLETION = ATTRIBUTE_PREFIX + SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
56
55
 
57
56
 
58
57
  class ReadableSpan(ObjectProxy): # type: ignore
@@ -73,7 +72,9 @@ class ReadableSpan(ObjectProxy): # type: ignore
73
72
  @property
74
73
  def span(self) -> Span:
75
74
  span = decode(self._self_otlp_span)
76
- span.attributes.update(cast(SpanAttributes, self._self_computed_values))
75
+ span.attributes.update(
76
+ cast(phoenix.trace.schemas.SpanAttributes, self._self_computed_values)
77
+ )
77
78
  # TODO: compute latency rank percent (which can change depending on how
78
79
  # many spans already ingested).
79
80
  return span
@@ -333,9 +334,13 @@ class Traces:
333
334
  self._token_count_total -= existing_span[LLM_TOKEN_COUNT_TOTAL] or 0
334
335
  self._token_count_total += new_span[LLM_TOKEN_COUNT_TOTAL] or 0
335
336
  # Update number of documents
336
- num_documents_update = len(new_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
337
+ num_documents_update = len(
338
+ new_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
339
+ )
337
340
  if existing_span:
338
- num_documents_update -= len(existing_span.attributes.get(RETRIEVAL_DOCUMENTS) or ())
341
+ num_documents_update -= len(
342
+ existing_span.attributes.get(SpanAttributes.RETRIEVAL_DOCUMENTS) or ()
343
+ )
339
344
  if num_documents_update:
340
345
  self._num_documents[span_id] += num_documents_update
341
346
  # Process previously orphaned spans, if any.
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import logging
4
4
  import warnings
5
5
  from collections import defaultdict
6
+ from itertools import product
6
7
  from typing import (
7
8
  Any,
8
9
  DefaultDict,
@@ -18,6 +19,7 @@ from typing import (
18
19
  )
19
20
 
20
21
  import pandas as pd
22
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
21
23
  from pandas import DataFrame
22
24
  from typing_extensions import TypeAlias
23
25
 
@@ -40,9 +42,12 @@ from phoenix.experimental.evals.utils import (
40
42
  parse_openai_function_call,
41
43
  snap_to_rail,
42
44
  )
43
- from phoenix.trace.semantic_conventions import DOCUMENT_CONTENT, INPUT_VALUE, RETRIEVAL_DOCUMENTS
44
45
  from phoenix.utilities.logging import printif
45
46
 
47
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
48
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
49
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
50
+
46
51
  logger = logging.getLogger(__name__)
47
52
 
48
53
 
@@ -54,8 +59,7 @@ Label: TypeAlias = str
54
59
  Score: TypeAlias = Optional[float]
55
60
  Explanation: TypeAlias = Optional[str]
56
61
  Record: TypeAlias = Mapping[str, Any]
57
- EvaluatorIndex: TypeAlias = int
58
- RowIndex: TypeAlias = Any
62
+ Index: TypeAlias = int
59
63
 
60
64
  # snapped_response, explanation, response
61
65
  ParsedLLMResponse: TypeAlias = Tuple[Optional[str], Optional[str], str]
@@ -343,8 +347,6 @@ def _get_contents_from_openinference_documents(documents: Iterable[Any]) -> List
343
347
 
344
348
 
345
349
  class RunEvalsPayload(NamedTuple):
346
- evaluator_index: EvaluatorIndex
347
- row_index: RowIndex
348
350
  evaluator: LLMEvaluator
349
351
  record: Record
350
352
 
@@ -404,23 +406,21 @@ def run_evals(
404
406
 
405
407
  async def _arun_eval(
406
408
  payload: RunEvalsPayload,
407
- ) -> Tuple[EvaluatorIndex, RowIndex, Label, Score, Explanation]:
408
- label, score, explanation = await payload.evaluator.aevaluate(
409
+ ) -> Tuple[Label, Score, Explanation]:
410
+ return await payload.evaluator.aevaluate(
409
411
  payload.record,
410
412
  provide_explanation=provide_explanation,
411
413
  use_function_calling_if_available=use_function_calling_if_available,
412
414
  )
413
- return payload.evaluator_index, payload.row_index, label, score, explanation
414
415
 
415
416
  def _run_eval(
416
417
  payload: RunEvalsPayload,
417
- ) -> Tuple[EvaluatorIndex, RowIndex, Label, Score, Explanation]:
418
- label, score, explanation = payload.evaluator.evaluate(
418
+ ) -> Tuple[Label, Score, Explanation]:
419
+ return payload.evaluator.evaluate(
419
420
  payload.record,
420
421
  provide_explanation=provide_explanation,
421
422
  use_function_calling_if_available=use_function_calling_if_available,
422
423
  )
423
- return payload.evaluator_index, payload.row_index, label, score, explanation
424
424
 
425
425
  executor = get_executor_on_sync_context(
426
426
  _run_eval,
@@ -428,24 +428,20 @@ def run_evals(
428
428
  concurrency=concurrency,
429
429
  tqdm_bar_format=get_tqdm_progress_bar_formatter("run_evals"),
430
430
  exit_on_error=True,
431
- fallback_return_value=(None, None),
431
+ fallback_return_value=(None, None, None),
432
432
  )
433
+
434
+ total_records = len(dataframe)
433
435
  payloads = [
434
- RunEvalsPayload(
435
- evaluator_index=evaluator_index,
436
- row_index=row_index,
437
- evaluator=evaluator,
438
- record=row.to_dict(),
439
- )
440
- # use the position of the row rather than the dataframe index, which is used
441
- # to ensure the output dataframe has the same row order as the input dataframe
442
- for row_index, (_, row) in enumerate(dataframe.iterrows())
443
- for evaluator_index, evaluator in enumerate(evaluators)
436
+ RunEvalsPayload(evaluator=evaluator, record=row)
437
+ for evaluator, (_, row) in product(evaluators, dataframe.iterrows())
444
438
  ]
445
- eval_results: List[DefaultDict[RowIndex, Dict[ColumnName, Union[Label, Explanation]]]] = [
439
+ eval_results: List[DefaultDict[Index, Dict[ColumnName, Union[Label, Explanation]]]] = [
446
440
  defaultdict(dict) for _ in range(len(evaluators))
447
441
  ]
448
- for evaluator_index, row_index, label, score, explanation in executor.run(payloads):
442
+ for index, (label, score, explanation) in enumerate(executor.run(payloads)):
443
+ evaluator_index = index // total_records
444
+ row_index = index % total_records
449
445
  eval_results[evaluator_index][row_index]["label"] = label
450
446
  eval_results[evaluator_index][row_index]["score"] = score
451
447
  if provide_explanation:
@@ -32,7 +32,7 @@ def download_benchmark_dataset(task: str, dataset_name: str) -> pd.DataFrame:
32
32
  pandas.DataFrame: A pandas dataframe containing the data.
33
33
  """
34
34
  jsonl_file_name = f"{dataset_name}.jsonl"
35
- url = f"http://storage.googleapis.com/arize-assets/phoenix/evals/{task}/{jsonl_file_name}.zip"
35
+ url = f"http://storage.googleapis.com/arize-phoenix-assets/evals/{task}/{jsonl_file_name}.zip"
36
36
  try:
37
37
  with urlopen(url) as response:
38
38
  zip_byte_stream = BytesIO(response.read())
@@ -4,6 +4,7 @@ from typing import Any, Iterable, Iterator, Optional, Protocol
4
4
 
5
5
  import pandas as pd
6
6
  import strawberry
7
+ from openinference.semconv.trace import SpanAttributes
7
8
  from strawberry import UNSET
8
9
  from typing_extensions import assert_never
9
10
 
@@ -13,7 +14,6 @@ from phoenix.core.traces import (
13
14
  START_TIME,
14
15
  )
15
16
  from phoenix.server.api.types.SortDir import SortDir
16
- from phoenix.trace import semantic_conventions
17
17
  from phoenix.trace.schemas import ComputedAttributes, Span, SpanID
18
18
 
19
19
 
@@ -22,9 +22,9 @@ class SpanColumn(Enum):
22
22
  startTime = START_TIME
23
23
  endTime = END_TIME
24
24
  latencyMs = ComputedAttributes.LATENCY_MS.value
25
- tokenCountTotal = semantic_conventions.LLM_TOKEN_COUNT_TOTAL
26
- tokenCountPrompt = semantic_conventions.LLM_TOKEN_COUNT_PROMPT
27
- tokenCountCompletion = semantic_conventions.LLM_TOKEN_COUNT_COMPLETION
25
+ tokenCountTotal = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
26
+ tokenCountPrompt = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
27
+ tokenCountCompletion = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
28
28
  cumulativeTokenCountTotal = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_TOTAL.value
29
29
  cumulativeTokenCountPrompt = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_PROMPT.value
30
30
  cumulativeTokenCountCompletion = ComputedAttributes.CUMULATIVE_LLM_TOKEN_COUNT_COMPLETION.value
@@ -5,6 +5,7 @@ from enum import Enum
5
5
  from typing import Any, DefaultDict, Dict, List, Mapping, Optional, Sized, cast
6
6
 
7
7
  import strawberry
8
+ from openinference.semconv.trace import EmbeddingAttributes, SpanAttributes
8
9
  from strawberry import ID, UNSET
9
10
  from strawberry.types import Info
10
11
 
@@ -15,19 +16,17 @@ from phoenix.server.api.types.DocumentRetrievalMetrics import DocumentRetrievalM
15
16
  from phoenix.server.api.types.Evaluation import DocumentEvaluation, SpanEvaluation
16
17
  from phoenix.server.api.types.MimeType import MimeType
17
18
  from phoenix.trace.schemas import ComputedAttributes, SpanID
18
- from phoenix.trace.semantic_conventions import (
19
- EMBEDDING_EMBEDDINGS,
20
- EMBEDDING_VECTOR,
21
- EXCEPTION_MESSAGE,
22
- INPUT_MIME_TYPE,
23
- INPUT_VALUE,
24
- LLM_TOKEN_COUNT_COMPLETION,
25
- LLM_TOKEN_COUNT_PROMPT,
26
- LLM_TOKEN_COUNT_TOTAL,
27
- OUTPUT_MIME_TYPE,
28
- OUTPUT_VALUE,
29
- RETRIEVAL_DOCUMENTS,
30
- )
19
+
20
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
21
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
22
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
23
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
24
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
25
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
26
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
27
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
28
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
31
30
 
32
31
 
33
32
  @strawberry.enum
@@ -87,7 +86,7 @@ class SpanEvent:
87
86
  ) -> "SpanEvent":
88
87
  return SpanEvent(
89
88
  name=event.name,
90
- message=cast(str, event.attributes.get(EXCEPTION_MESSAGE) or ""),
89
+ message=cast(str, event.attributes.get(trace_schema.EXCEPTION_MESSAGE) or ""),
91
90
  timestamp=event.timestamp,
92
91
  )
93
92
 
@@ -1,4 +1,5 @@
1
1
  import ast
2
+ import inspect
2
3
  import sys
3
4
  from dataclasses import dataclass, field
4
5
  from difflib import SequenceMatcher
@@ -15,10 +16,10 @@ from typing import (
15
16
  cast,
16
17
  )
17
18
 
19
+ from openinference.semconv import trace
18
20
  from typing_extensions import TypeGuard
19
21
 
20
22
  import phoenix.trace.v1 as pb
21
- from phoenix.trace import semantic_conventions
22
23
  from phoenix.trace.dsl.missing import MISSING
23
24
  from phoenix.trace.schemas import COMPUTED_PREFIX, ComputedAttributes, Span, SpanID
24
25
 
@@ -137,9 +138,11 @@ def _allowed_replacements() -> Iterator[Tuple[str, ast.expr]]:
137
138
  yield "span.context." + source_segment, ast_replacement
138
139
 
139
140
  for field_name in (
140
- getattr(semantic_conventions, variable_name)
141
- for variable_name in dir(semantic_conventions)
142
- if variable_name.isupper()
141
+ getattr(klass, attr)
142
+ for name in dir(trace)
143
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
144
+ for attr in dir(klass)
145
+ if attr.isupper()
143
146
  ):
144
147
  source_segment = field_name
145
148
  ast_replacement = _ast_replacement(f"span.attributes.get('{field_name}')")
@@ -1,15 +1,15 @@
1
1
  from typing import List, Optional, Protocol, Union, cast
2
2
 
3
3
  import pandas as pd
4
+ from openinference.semconv.trace import DocumentAttributes, SpanAttributes
4
5
 
5
6
  from phoenix.trace.dsl import SpanQuery
6
- from phoenix.trace.semantic_conventions import (
7
- DOCUMENT_CONTENT,
8
- DOCUMENT_SCORE,
9
- INPUT_VALUE,
10
- OUTPUT_VALUE,
11
- RETRIEVAL_DOCUMENTS,
12
- )
7
+
8
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
9
+ DOCUMENT_SCORE = DocumentAttributes.DOCUMENT_SCORE
10
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
11
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
12
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
13
13
 
14
14
  INPUT = {"input": INPUT_VALUE}
15
15
  OUTPUT = {"output": OUTPUT_VALUE}
@@ -19,13 +19,15 @@ from typing import (
19
19
  )
20
20
 
21
21
  import pandas as pd
22
+ from openinference.semconv.trace import SpanAttributes
22
23
 
23
24
  from phoenix.trace.dsl import SpanFilter
24
25
  from phoenix.trace.dsl.filter import SupportsGetSpanEvaluation
25
26
  from phoenix.trace.schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
26
- from phoenix.trace.semantic_conventions import RETRIEVAL_DOCUMENTS
27
27
  from phoenix.trace.span_json_encoder import span_to_json
28
28
 
29
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
30
+
29
31
  _SPAN_ID = "context.span_id"
30
32
  _PRESCRIBED_POSITION_PREFIXES = {
31
33
  RETRIEVAL_DOCUMENTS: "document_",
phoenix/trace/errors.py CHANGED
@@ -3,3 +3,7 @@ from phoenix.exceptions import PhoenixException
3
3
 
4
4
  class InvalidParquetMetadataError(PhoenixException):
5
5
  pass
6
+
7
+
8
+ class IncompatibleLibraryVersionError(PhoenixException):
9
+ pass
@@ -1,4 +1,3 @@
1
1
  from .callback import OpenInferenceTraceCallbackHandler
2
- from .debug_callback import DebugCallbackHandler
3
2
 
4
- __all__ = ["OpenInferenceTraceCallbackHandler", "DebugCallbackHandler"]
3
+ __all__ = ["OpenInferenceTraceCallbackHandler"]
@@ -1,25 +1,64 @@
1
1
  import logging
2
- from importlib.metadata import PackageNotFoundError
2
+ from importlib.metadata import PackageNotFoundError, version
3
3
  from importlib.util import find_spec
4
- from typing import (
5
- Any,
6
- )
7
-
8
- from openinference.instrumentation.llama_index._callback import (
9
- OpenInferenceTraceCallbackHandler as _OpenInferenceTraceCallbackHandler,
10
- )
11
- from openinference.instrumentation.llama_index.version import (
12
- __version__,
13
- )
4
+ from typing import Any
5
+
14
6
  from opentelemetry import trace as trace_api
15
7
  from opentelemetry.sdk import trace as trace_sdk
16
8
  from opentelemetry.sdk.trace.export import SimpleSpanProcessor
17
9
 
10
+ from phoenix.trace.errors import IncompatibleLibraryVersionError
18
11
  from phoenix.trace.exporter import _OpenInferenceExporter
19
12
  from phoenix.trace.tracer import _show_deprecation_warnings
20
13
 
21
14
  logger = logging.getLogger(__name__)
22
15
 
16
+ LLAMA_INDEX_MODERN_VERSION = (0, 10, 0)
17
+ INSTRUMENTATION_MODERN_VERSION = (1, 0, 0)
18
+
19
+
20
+ def _check_instrumentation_compatibility() -> bool:
21
+ if find_spec("llama_index") is None:
22
+ raise PackageNotFoundError("Missing `llama-index`. Install with `pip install llama-index`.")
23
+ # split the version string into a tuple of integers
24
+ llama_index_version_str = version("llama-index")
25
+ llama_index_version = tuple(map(int, llama_index_version_str.split(".")[:3]))
26
+ instrumentation_version_str = version("openinference-instrumentation-llama-index")
27
+ instrumentation_version = tuple(map(int, instrumentation_version_str.split(".")[:3]))
28
+ # check if the llama_index version is compatible with the instrumentation version
29
+ if (
30
+ llama_index_version < LLAMA_INDEX_MODERN_VERSION
31
+ and instrumentation_version >= INSTRUMENTATION_MODERN_VERSION
32
+ ):
33
+ raise IncompatibleLibraryVersionError(
34
+ f"llama-index v{llama_index_version_str} is not compatible with "
35
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
36
+ "Please either migrate llama-index to at least 0.10.0 or downgrade "
37
+ "openinference-instrumentation-llama-index via "
38
+ "`pip install 'openinference-instrumentation-llama-index<1.0.0'`."
39
+ )
40
+ elif (
41
+ llama_index_version >= LLAMA_INDEX_MODERN_VERSION
42
+ and instrumentation_version < INSTRUMENTATION_MODERN_VERSION
43
+ ):
44
+ raise IncompatibleLibraryVersionError(
45
+ f"llama-index v{llama_index_version_str} is not compatible with "
46
+ f"openinference-instrumentation-llama-index v{instrumentation_version_str}."
47
+ "Please upgrade openinference-instrumentation-llama-index to at least 1.0.0"
48
+ "`pip install 'openinference-instrumentation-llama-index>=1.0.0'`."
49
+ )
50
+ # if the versions are compatible, return True
51
+ return True
52
+
53
+
54
+ if _check_instrumentation_compatibility():
55
+ from openinference.instrumentation.llama_index._callback import (
56
+ OpenInferenceTraceCallbackHandler as _OpenInferenceTraceCallbackHandler,
57
+ )
58
+ from openinference.instrumentation.llama_index.version import (
59
+ __version__,
60
+ )
61
+
23
62
 
24
63
  class OpenInferenceTraceCallbackHandler(_OpenInferenceTraceCallbackHandler):
25
64
  """Callback handler for storing LLM application trace data in OpenInference format.
@@ -33,10 +72,6 @@ class OpenInferenceTraceCallbackHandler(_OpenInferenceTraceCallbackHandler):
33
72
 
34
73
  def __init__(self, *args: Any, **kwargs: Any) -> None:
35
74
  _show_deprecation_warnings(self, *args, **kwargs)
36
- if find_spec("llama_index") is None:
37
- raise PackageNotFoundError(
38
- "Missing `llama-index`. Install with `pip install llama-index`."
39
- )
40
75
  tracer_provider = trace_sdk.TracerProvider()
41
76
  tracer_provider.add_span_processor(SimpleSpanProcessor(_OpenInferenceExporter()))
42
77
  super().__init__(trace_api.get_tracer(__name__, __version__, tracer_provider))
phoenix/trace/otel.py CHANGED
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  import json
2
3
  from binascii import hexlify, unhexlify
3
4
  from datetime import datetime, timezone
@@ -21,12 +22,23 @@ from typing import (
21
22
 
22
23
  import numpy as np
23
24
  import opentelemetry.proto.trace.v1.trace_pb2 as otlp
25
+ from openinference.semconv import trace
26
+ from openinference.semconv.trace import (
27
+ DocumentAttributes,
28
+ EmbeddingAttributes,
29
+ MessageAttributes,
30
+ SpanAttributes,
31
+ ToolCallAttributes,
32
+ )
24
33
  from opentelemetry.proto.common.v1.common_pb2 import AnyValue, ArrayValue, KeyValue
25
34
  from opentelemetry.util.types import Attributes, AttributeValue
26
35
  from typing_extensions import TypeAlias, assert_never
27
36
 
28
- import phoenix.trace.semantic_conventions as sem_conv
29
37
  from phoenix.trace.schemas import (
38
+ EXCEPTION_ESCAPED,
39
+ EXCEPTION_MESSAGE,
40
+ EXCEPTION_STACKTRACE,
41
+ EXCEPTION_TYPE,
30
42
  MimeType,
31
43
  Span,
32
44
  SpanContext,
@@ -37,18 +49,38 @@ from phoenix.trace.schemas import (
37
49
  SpanStatusCode,
38
50
  TraceID,
39
51
  )
40
- from phoenix.trace.semantic_conventions import (
41
- DOCUMENT_METADATA,
42
- EXCEPTION_ESCAPED,
43
- EXCEPTION_MESSAGE,
44
- EXCEPTION_STACKTRACE,
45
- EXCEPTION_TYPE,
46
- INPUT_MIME_TYPE,
47
- LLM_PROMPT_TEMPLATE_VARIABLES,
48
- OPENINFERENCE_SPAN_KIND,
49
- OUTPUT_MIME_TYPE,
50
- TOOL_PARAMETERS,
51
- )
52
+
53
+ DOCUMENT_CONTENT = DocumentAttributes.DOCUMENT_CONTENT
54
+ DOCUMENT_ID = DocumentAttributes.DOCUMENT_ID
55
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
56
+ EMBEDDING_EMBEDDINGS = SpanAttributes.EMBEDDING_EMBEDDINGS
57
+ EMBEDDING_MODEL_NAME = SpanAttributes.EMBEDDING_MODEL_NAME
58
+ EMBEDDING_TEXT = EmbeddingAttributes.EMBEDDING_TEXT
59
+ EMBEDDING_VECTOR = EmbeddingAttributes.EMBEDDING_VECTOR
60
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
61
+ INPUT_VALUE = SpanAttributes.INPUT_VALUE
62
+ LLM_INPUT_MESSAGES = SpanAttributes.LLM_INPUT_MESSAGES
63
+ LLM_INVOCATION_PARAMETERS = SpanAttributes.LLM_INVOCATION_PARAMETERS
64
+ LLM_MODEL_NAME = SpanAttributes.LLM_MODEL_NAME
65
+ LLM_OUTPUT_MESSAGES = SpanAttributes.LLM_OUTPUT_MESSAGES
66
+ LLM_PROMPTS = SpanAttributes.LLM_PROMPTS
67
+ LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
68
+ LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
69
+ LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
70
+ MESSAGE_CONTENT = MessageAttributes.MESSAGE_CONTENT
71
+ MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = MessageAttributes.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON
72
+ MESSAGE_FUNCTION_CALL_NAME = MessageAttributes.MESSAGE_FUNCTION_CALL_NAME
73
+ MESSAGE_ROLE = MessageAttributes.MESSAGE_ROLE
74
+ MESSAGE_TOOL_CALLS = MessageAttributes.MESSAGE_TOOL_CALLS
75
+ OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND
76
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
77
+ OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
78
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
79
+ TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON
80
+ TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME
81
+ TOOL_PARAMETERS = SpanAttributes.TOOL_PARAMETERS
82
+ LLM_PROMPT_TEMPLATE = SpanAttributes.LLM_PROMPT_TEMPLATE
83
+ LLM_PROMPT_TEMPLATE_VARIABLES = SpanAttributes.LLM_PROMPT_TEMPLATE_VARIABLES
52
84
 
53
85
 
54
86
  def decode(otlp_span: otlp.Span) -> Span:
@@ -186,7 +218,13 @@ def _decode_status(otlp_status: otlp.Status) -> Tuple[SpanStatusCode, StatusMess
186
218
 
187
219
 
188
220
  _SEMANTIC_CONVENTIONS: List[str] = sorted(
189
- (getattr(sem_conv, name) for name in dir(sem_conv) if name.isupper()),
221
+ (
222
+ getattr(klass, attr)
223
+ for name in dir(trace)
224
+ if name.endswith("Attributes") and inspect.isclass(klass := getattr(trace, name))
225
+ for attr in dir(klass)
226
+ if attr.isupper()
227
+ ),
190
228
  reverse=True,
191
229
  ) # sorted so the longer strings go first
192
230
 
phoenix/trace/schemas.py CHANGED
@@ -4,12 +4,10 @@ from enum import Enum
4
4
  from typing import Any, Dict, List, Optional, Union
5
5
  from uuid import UUID
6
6
 
7
- from phoenix.trace.semantic_conventions import (
8
- EXCEPTION_ESCAPED,
9
- EXCEPTION_MESSAGE,
10
- EXCEPTION_STACKTRACE,
11
- EXCEPTION_TYPE,
12
- )
7
+ EXCEPTION_TYPE = "exception.type"
8
+ EXCEPTION_MESSAGE = "exception.message"
9
+ EXCEPTION_ESCAPED = "exception.escaped"
10
+ EXCEPTION_STACKTRACE = "exception.stacktrace"
13
11
 
14
12
 
15
13
  class SpanStatusCode(Enum):
@@ -2,7 +2,10 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import Any, Dict, Optional
4
4
 
5
+ from openinference.semconv.trace import SpanAttributes
6
+
5
7
  from phoenix.trace.schemas import (
8
+ EXCEPTION_MESSAGE,
6
9
  MimeType,
7
10
  Span,
8
11
  SpanContext,
@@ -14,11 +17,9 @@ from phoenix.trace.schemas import (
14
17
  SpanStatusCode,
15
18
  TraceID,
16
19
  )
17
- from phoenix.trace.semantic_conventions import (
18
- EXCEPTION_MESSAGE,
19
- INPUT_MIME_TYPE,
20
- OUTPUT_MIME_TYPE,
21
- )
20
+
21
+ INPUT_MIME_TYPE = SpanAttributes.INPUT_MIME_TYPE
22
+ OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
22
23
 
23
24
 
24
25
  def json_to_attributes(obj: Optional[Dict[str, Any]]) -> Dict[str, Any]:
@@ -5,12 +5,7 @@ from enum import Enum
5
5
  from typing import Any, List
6
6
  from uuid import UUID
7
7
 
8
- from .schemas import (
9
- Span,
10
- SpanContext,
11
- SpanConversationAttributes,
12
- SpanEvent,
13
- )
8
+ from phoenix.trace.schemas import Span, SpanContext, SpanConversationAttributes, SpanEvent
14
9
 
15
10
 
16
11
  class SpanJSONEncoder(json.JSONEncoder):
@@ -6,23 +6,26 @@ from uuid import UUID, uuid4
6
6
  from warnings import warn
7
7
 
8
8
  import pandas as pd
9
+ from openinference.semconv.trace import (
10
+ DocumentAttributes,
11
+ RerankerAttributes,
12
+ SpanAttributes,
13
+ )
9
14
  from pandas import DataFrame, read_parquet
10
15
  from pyarrow import Schema, Table, parquet
11
16
 
17
+ from phoenix.config import DATASET_DIR, GENERATED_DATASET_NAME_PREFIX, TRACE_DATASET_DIR
12
18
  from phoenix.datetime_utils import normalize_timestamps
13
19
  from phoenix.trace.errors import InvalidParquetMetadataError
14
-
15
- from ..config import DATASET_DIR, GENERATED_DATASET_NAME_PREFIX, TRACE_DATASET_DIR
16
- from .schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
17
- from .semantic_conventions import (
18
- DOCUMENT_METADATA,
19
- RERANKER_INPUT_DOCUMENTS,
20
- RERANKER_OUTPUT_DOCUMENTS,
21
- RETRIEVAL_DOCUMENTS,
22
- )
23
- from .span_evaluations import Evaluations, SpanEvaluations
24
- from .span_json_decoder import json_to_span
25
- from .span_json_encoder import span_to_json
20
+ from phoenix.trace.schemas import ATTRIBUTE_PREFIX, CONTEXT_PREFIX, Span
21
+ from phoenix.trace.span_evaluations import Evaluations, SpanEvaluations
22
+ from phoenix.trace.span_json_decoder import json_to_span
23
+ from phoenix.trace.span_json_encoder import span_to_json
24
+
25
+ DOCUMENT_METADATA = DocumentAttributes.DOCUMENT_METADATA
26
+ RERANKER_INPUT_DOCUMENTS = RerankerAttributes.RERANKER_INPUT_DOCUMENTS
27
+ RERANKER_OUTPUT_DOCUMENTS = RerankerAttributes.RERANKER_OUTPUT_DOCUMENTS
28
+ RETRIEVAL_DOCUMENTS = SpanAttributes.RETRIEVAL_DOCUMENTS
26
29
 
27
30
  # A set of columns that is required
28
31
  REQUIRED_COLUMNS = [
phoenix/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "3.0.1"
1
+ __version__ = "3.0.3"
@@ -1,50 +0,0 @@
1
- import logging
2
- from typing import Any, Dict, List, Optional
3
-
4
- from llama_index.callbacks.base_handler import BaseCallbackHandler
5
- from llama_index.callbacks.schema import CBEventType
6
-
7
- logger = logging.getLogger(__name__)
8
- logger.addHandler(logging.NullHandler())
9
-
10
- CBEventID = str
11
-
12
-
13
- class DebugCallbackHandler(BaseCallbackHandler):
14
- def _print_event(self, payload: Dict[Any, Any]) -> None:
15
- for k, v in payload.items():
16
- print(f"**{k}: **\n{v}")
17
- print("*" * 50)
18
-
19
- def __init__(self) -> None:
20
- super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
21
-
22
- def on_event_start(
23
- self,
24
- event_type: CBEventType,
25
- payload: Optional[Dict[str, Any]] = None,
26
- event_id: CBEventID = "",
27
- parent_id: CBEventID = "",
28
- **kwargs: Any,
29
- ) -> CBEventID:
30
- return event_id
31
-
32
- def on_event_end(
33
- self,
34
- event_type: CBEventType,
35
- payload: Optional[Dict[str, Any]] = None,
36
- event_id: CBEventID = "",
37
- **kwargs: Any,
38
- ) -> None:
39
- if payload is not None:
40
- self._print_event(payload)
41
-
42
- def start_trace(self, trace_id: Optional[str] = None) -> None:
43
- return
44
-
45
- def end_trace(
46
- self,
47
- trace_id: Optional[str] = None,
48
- trace_map: Optional[Dict[str, List[str]]] = None,
49
- ) -> None:
50
- return
@@ -1,172 +0,0 @@
1
- """
2
- Semantic conventions for the attributes of a span
3
- https://github.com/Arize-ai/open-inference-spec/blob/main/trace/spec/semantic_conventions.md
4
- """
5
-
6
- EXCEPTION_TYPE = "exception.type"
7
- EXCEPTION_MESSAGE = "exception.message"
8
- EXCEPTION_ESCAPED = "exception.escaped"
9
- EXCEPTION_STACKTRACE = "exception.stacktrace"
10
-
11
-
12
- OUTPUT_VALUE = "output.value"
13
- OUTPUT_MIME_TYPE = "output.mime_type"
14
- """
15
- The type of output.value. If unspecified, the type is plain text by default.
16
- If type is JSON, the value is a string representing a JSON object.
17
- """
18
- INPUT_VALUE = "input.value"
19
- INPUT_MIME_TYPE = "input.mime_type"
20
- """
21
- The type of input.value. If unspecified, the type is plain text by default.
22
- If type is JSON, the value is a string representing a JSON object.
23
- """
24
-
25
- EMBEDDING_EMBEDDINGS = "embedding.embeddings"
26
- """
27
- A list of objects containing embedding data, including the vector and represented piece of text.
28
- """
29
- EMBEDDING_MODEL_NAME = "embedding.model_name"
30
- """
31
- The name of the embedding model.
32
- """
33
- EMBEDDING_TEXT = "embedding.text"
34
- """
35
- The text represented by the embedding.
36
- """
37
- EMBEDDING_VECTOR = "embedding.vector"
38
- """
39
- The embedding vector.
40
- """
41
-
42
- MESSAGE_ROLE = "message.role"
43
- """
44
- The role of the message, such as "user", "agent", "function".
45
- """
46
- MESSAGE_NAME = "message.name"
47
- """
48
- The name of the message, often used to identify the function
49
- that was used to generate the message.
50
- """
51
- MESSAGE_TOOL_CALLS = "message.tool_calls"
52
- """
53
- The tool calls generated by the model, such as function calls.
54
- """
55
- TOOL_CALL_FUNCTION_NAME = "tool_call.function.name"
56
- """
57
- The name of function that is being called during a tool call.
58
- """
59
- TOOL_CALL_FUNCTION_ARGUMENTS_JSON = "tool_call.function.arguments"
60
- """
61
- The JSON string representing the arguments passed to the function
62
- during a tool call.
63
- """
64
- MESSAGE_FUNCTION_CALL_NAME = "message.function_call_name"
65
- """
66
- The function name that is a part of the message list.
67
- This is populated for role 'function' or 'agent' as a mechanism to identify
68
- the function that was called during the execution of a tool
69
- """
70
- MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = "message.function_call_arguments_json"
71
- """
72
- The JSON string representing the arguments passed to the function
73
- during a function call
74
- """
75
- MESSAGE_CONTENT = "message.content"
76
- """
77
- The content of the message to the llm
78
- """
79
- LLM_FUNCTION_CALL = "llm.function_call"
80
- """
81
- For models and APIs that support function calling. Records attributes such as the function name and
82
- arguments to the called function.
83
- """
84
- LLM_INVOCATION_PARAMETERS = "llm.invocation_parameters"
85
- """
86
- Invocation parameters passed to the LLM or API, such as the model name, temperature, etc.
87
- """
88
- LLM_INPUT_MESSAGES = "llm.input_messages"
89
- """
90
- Messages provided to a chat API.
91
- """
92
- LLM_OUTPUT_MESSAGES = "llm.output_messages"
93
- """
94
- Messages received from a chat API.
95
- """
96
- LLM_MODEL_NAME = "llm.model_name"
97
- """
98
- The name of the model being used.
99
- """
100
- LLM_PROMPTS = "llm.prompts"
101
- """
102
- Prompts provided to a completions API.
103
- """
104
- LLM_PROMPT_TEMPLATE = "llm.prompt_template.template"
105
- """
106
- The prompt template as a Python f-string.
107
- """
108
- LLM_PROMPT_TEMPLATE_VARIABLES = "llm.prompt_template.variables"
109
- """
110
- A list of input variables to the prompt template.
111
- """
112
- LLM_PROMPT_TEMPLATE_VERSION = "llm.prompt_template.version"
113
- """
114
- The version of the prompt template being used.
115
- """
116
- LLM_TOKEN_COUNT_PROMPT = "llm.token_count.prompt"
117
- """
118
- Number of tokens in the prompt.
119
- """
120
- LLM_TOKEN_COUNT_COMPLETION = "llm.token_count.completion"
121
- """
122
- Number of tokens in the completion.
123
- """
124
- LLM_TOKEN_COUNT_TOTAL = "llm.token_count.total"
125
- """
126
- Total number of tokens, including both prompt and completion.
127
- """
128
-
129
- TOOL_NAME = "tool.name"
130
- """
131
- Name of the tool being used.
132
- """
133
- TOOL_DESCRIPTION = "tool.description"
134
- """
135
- Description of the tool's purpose, typically used to select the tool.
136
- """
137
- TOOL_PARAMETERS = "tool.parameters"
138
- """
139
- Parameters of the tool, e.g. see https://platform.openai.com/docs/guides/gpt/function-calling
140
- """
141
-
142
- RETRIEVAL_DOCUMENTS = "retrieval.documents"
143
- DOCUMENT_ID = "document.id"
144
- DOCUMENT_SCORE = "document.score"
145
- DOCUMENT_CONTENT = "document.content"
146
- DOCUMENT_METADATA = "document.metadata"
147
- """
148
- Document metadata as a string representing a JSON object
149
- """
150
-
151
- RERANKER_INPUT_DOCUMENTS = "reranker.input_documents"
152
- """
153
- List of documents as input to the reranker
154
- """
155
- RERANKER_OUTPUT_DOCUMENTS = "reranker.output_documents"
156
- """
157
- List of documents as output from the reranker
158
- """
159
- RERANKER_QUERY = "reranker.query"
160
- """
161
- Query string for the reranker
162
- """
163
- RERANKER_MODEL_NAME = "reranker.model_name"
164
- """
165
- Model name of the reranker
166
- """
167
- RERANKER_TOP_K = "reranker.top_k"
168
- """
169
- Top K parameter of the reranker
170
- """
171
-
172
- OPENINFERENCE_SPAN_KIND = "openinference.span.kind"