openinference-instrumentation-openllmetry 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openinference_instrumentation_openllmetry-0.1.1/.gitignore +5 -0
- openinference_instrumentation_openllmetry-0.1.1/LICENSE +201 -0
- openinference_instrumentation_openllmetry-0.1.1/PKG-INFO +120 -0
- openinference_instrumentation_openllmetry-0.1.1/README.md +89 -0
- openinference_instrumentation_openllmetry-0.1.1/pyproject.toml +94 -0
- openinference_instrumentation_openllmetry-0.1.1/src/openinference/instrumentation/openllmetry/__init__.py +3 -0
- openinference_instrumentation_openllmetry-0.1.1/src/openinference/instrumentation/openllmetry/_span_processor.py +312 -0
- openinference_instrumentation_openllmetry-0.1.1/src/openinference/instrumentation/openllmetry/version.py +1 -0
- openinference_instrumentation_openllmetry-0.1.1/tests/openinference/instrumentation/openllmetry/cassettes/test_instrumentor/TestOpenLLMetryInstrumentor.test_openllmetry_instrumentor.yaml +27 -0
- openinference_instrumentation_openllmetry-0.1.1/tests/openinference/instrumentation/openllmetry/test_instrumentor.py +120 -0
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
Apache License
|
|
2
|
+
Version 2.0, January 2004
|
|
3
|
+
http://www.apache.org/licenses/
|
|
4
|
+
|
|
5
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
6
|
+
|
|
7
|
+
1. Definitions.
|
|
8
|
+
|
|
9
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
|
10
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
|
11
|
+
|
|
12
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
|
13
|
+
the copyright owner that is granting the License.
|
|
14
|
+
|
|
15
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
|
16
|
+
other entities that control, are controlled by, or are under common
|
|
17
|
+
control with that entity. For the purposes of this definition,
|
|
18
|
+
"control" means (i) the power, direct or indirect, to cause the
|
|
19
|
+
direction or management of such entity, whether by contract or
|
|
20
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
21
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
22
|
+
|
|
23
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
|
24
|
+
exercising permissions granted by this License.
|
|
25
|
+
|
|
26
|
+
"Source" form shall mean the preferred form for making modifications,
|
|
27
|
+
including but not limited to software source code, documentation
|
|
28
|
+
source, and configuration files.
|
|
29
|
+
|
|
30
|
+
"Object" form shall mean any form resulting from mechanical
|
|
31
|
+
transformation or translation of a Source form, including but
|
|
32
|
+
not limited to compiled object code, generated documentation,
|
|
33
|
+
and conversions to other media types.
|
|
34
|
+
|
|
35
|
+
"Work" shall mean the work of authorship, whether in Source or
|
|
36
|
+
Object form, made available under the License, as indicated by a
|
|
37
|
+
copyright notice that is included in or attached to the work
|
|
38
|
+
(an example is provided in the Appendix below).
|
|
39
|
+
|
|
40
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
|
41
|
+
form, that is based on (or derived from) the Work and for which the
|
|
42
|
+
editorial revisions, annotations, elaborations, or other modifications
|
|
43
|
+
represent, as a whole, an original work of authorship. For the purposes
|
|
44
|
+
of this License, Derivative Works shall not include works that remain
|
|
45
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
|
46
|
+
the Work and Derivative Works thereof.
|
|
47
|
+
|
|
48
|
+
"Contribution" shall mean any work of authorship, including
|
|
49
|
+
the original version of the Work and any modifications or additions
|
|
50
|
+
to that Work or Derivative Works thereof, that is intentionally
|
|
51
|
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
52
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
|
53
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
|
54
|
+
means any form of electronic, verbal, or written communication sent
|
|
55
|
+
to the Licensor or its representatives, including but not limited to
|
|
56
|
+
communication on electronic mailing lists, source code control systems,
|
|
57
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
|
58
|
+
Licensor for the purpose of discussing and improving the Work, but
|
|
59
|
+
excluding communication that is conspicuously marked or otherwise
|
|
60
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
|
61
|
+
|
|
62
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
63
|
+
on behalf of whom a Contribution has been received by Licensor and
|
|
64
|
+
subsequently incorporated within the Work.
|
|
65
|
+
|
|
66
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
67
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
68
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
69
|
+
copyright license to reproduce, prepare Derivative Works of,
|
|
70
|
+
publicly display, publicly perform, sublicense, and distribute the
|
|
71
|
+
Work and such Derivative Works in Source or Object form.
|
|
72
|
+
|
|
73
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
|
74
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
75
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
76
|
+
(except as stated in this section) patent license to make, have made,
|
|
77
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
78
|
+
where such license applies only to those patent claims licensable
|
|
79
|
+
by such Contributor that are necessarily infringed by their
|
|
80
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
|
81
|
+
with the Work to which such Contribution(s) was submitted. If You
|
|
82
|
+
institute patent litigation against any entity (including a
|
|
83
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
84
|
+
or a Contribution incorporated within the Work constitutes direct
|
|
85
|
+
or contributory patent infringement, then any patent licenses
|
|
86
|
+
granted to You under this License for that Work shall terminate
|
|
87
|
+
as of the date such litigation is filed.
|
|
88
|
+
|
|
89
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
|
90
|
+
Work or Derivative Works thereof in any medium, with or without
|
|
91
|
+
modifications, and in Source or Object form, provided that You
|
|
92
|
+
meet the following conditions:
|
|
93
|
+
|
|
94
|
+
(a) You must give any other recipients of the Work or
|
|
95
|
+
Derivative Works a copy of this License; and
|
|
96
|
+
|
|
97
|
+
(b) You must cause any modified files to carry prominent notices
|
|
98
|
+
stating that You changed the files; and
|
|
99
|
+
|
|
100
|
+
(c) You must retain, in the Source form of any Derivative Works
|
|
101
|
+
that You distribute, all copyright, patent, trademark, and
|
|
102
|
+
attribution notices from the Source form of the Work,
|
|
103
|
+
excluding those notices that do not pertain to any part of
|
|
104
|
+
the Derivative Works; and
|
|
105
|
+
|
|
106
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
|
107
|
+
distribution, then any Derivative Works that You distribute must
|
|
108
|
+
include a readable copy of the attribution notices contained
|
|
109
|
+
within such NOTICE file, excluding those notices that do not
|
|
110
|
+
pertain to any part of the Derivative Works, in at least one
|
|
111
|
+
of the following places: within a NOTICE text file distributed
|
|
112
|
+
as part of the Derivative Works; within the Source form or
|
|
113
|
+
documentation, if provided along with the Derivative Works; or,
|
|
114
|
+
within a display generated by the Derivative Works, if and
|
|
115
|
+
wherever such third-party notices normally appear. The contents
|
|
116
|
+
of the NOTICE file are for informational purposes only and
|
|
117
|
+
do not modify the License. You may add Your own attribution
|
|
118
|
+
notices within Derivative Works that You distribute, alongside
|
|
119
|
+
or as an addendum to the NOTICE text from the Work, provided
|
|
120
|
+
that such additional attribution notices cannot be construed
|
|
121
|
+
as modifying the License.
|
|
122
|
+
|
|
123
|
+
You may add Your own copyright statement to Your modifications and
|
|
124
|
+
may provide additional or different license terms and conditions
|
|
125
|
+
for use, reproduction, or distribution of Your modifications, or
|
|
126
|
+
for any such Derivative Works as a whole, provided Your use,
|
|
127
|
+
reproduction, and distribution of the Work otherwise complies with
|
|
128
|
+
the conditions stated in this License.
|
|
129
|
+
|
|
130
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
131
|
+
any Contribution intentionally submitted for inclusion in the Work
|
|
132
|
+
by You to the Licensor shall be under the terms and conditions of
|
|
133
|
+
this License, without any additional terms or conditions.
|
|
134
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
|
135
|
+
the terms of any separate license agreement you may have executed
|
|
136
|
+
with Licensor regarding such Contributions.
|
|
137
|
+
|
|
138
|
+
6. Trademarks. This License does not grant permission to use the trade
|
|
139
|
+
names, trademarks, service marks, or product names of the Licensor,
|
|
140
|
+
except as required for reasonable and customary use in describing the
|
|
141
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
|
142
|
+
|
|
143
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
144
|
+
agreed to in writing, Licensor provides the Work (and each
|
|
145
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
146
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
147
|
+
implied, including, without limitation, any warranties or conditions
|
|
148
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
149
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
150
|
+
appropriateness of using or redistributing the Work and assume any
|
|
151
|
+
risks associated with Your exercise of permissions under this License.
|
|
152
|
+
|
|
153
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
|
154
|
+
whether in tort (including negligence), contract, or otherwise,
|
|
155
|
+
unless required by applicable law (such as deliberate and grossly
|
|
156
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
|
157
|
+
liable to You for damages, including any direct, indirect, special,
|
|
158
|
+
incidental, or consequential damages of any character arising as a
|
|
159
|
+
result of this License or out of the use or inability to use the
|
|
160
|
+
Work (including but not limited to damages for loss of goodwill,
|
|
161
|
+
work stoppage, computer failure or malfunction, or any and all
|
|
162
|
+
other commercial damages or losses), even if such Contributor
|
|
163
|
+
has been advised of the possibility of such damages.
|
|
164
|
+
|
|
165
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
|
166
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
|
167
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
168
|
+
or other liability obligations and/or rights consistent with this
|
|
169
|
+
License. However, in accepting such obligations, You may act only
|
|
170
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
|
171
|
+
of any other Contributor, and only if You agree to indemnify,
|
|
172
|
+
defend, and hold each Contributor harmless for any liability
|
|
173
|
+
incurred by, or claims asserted against, such Contributor by reason
|
|
174
|
+
of your accepting any such warranty or additional liability.
|
|
175
|
+
|
|
176
|
+
END OF TERMS AND CONDITIONS
|
|
177
|
+
|
|
178
|
+
APPENDIX: How to apply the Apache License to your work.
|
|
179
|
+
|
|
180
|
+
To apply the Apache License to your work, attach the following
|
|
181
|
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
182
|
+
replaced with your own identifying information. (Don't include
|
|
183
|
+
the brackets!) The text should be enclosed in the appropriate
|
|
184
|
+
comment syntax for the file format. We also recommend that a
|
|
185
|
+
file or class name and description of purpose be included on the
|
|
186
|
+
same "printed page" as the copyright notice for easier
|
|
187
|
+
identification within third-party archives.
|
|
188
|
+
|
|
189
|
+
Copyright The OpenInference Authors
|
|
190
|
+
|
|
191
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
192
|
+
you may not use this file except in compliance with the License.
|
|
193
|
+
You may obtain a copy of the License at
|
|
194
|
+
|
|
195
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
196
|
+
|
|
197
|
+
Unless required by applicable law or agreed to in writing, software
|
|
198
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
199
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
200
|
+
See the License for the specific language governing permissions and
|
|
201
|
+
limitations under the License.
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: openinference-instrumentation-openllmetry
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: OpenInference OpenLLMetry Instrumentation
|
|
5
|
+
Project-URL: Homepage, https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-openllmetry
|
|
6
|
+
Author-email: OpenInference Authors <oss@arize.com>
|
|
7
|
+
License-Expression: Apache-2.0
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
12
|
+
Classifier: Programming Language :: Python
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Requires-Python: <3.14,>=3.9
|
|
20
|
+
Requires-Dist: openinference-instrumentation>=0.1.34
|
|
21
|
+
Requires-Dist: openinference-semantic-conventions>=0.1.21
|
|
22
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
23
|
+
Requires-Dist: opentelemetry-semantic-conventions-ai>=0.4.9
|
|
24
|
+
Provides-Extra: test
|
|
25
|
+
Requires-Dist: openai>=1.90.0; extra == 'test'
|
|
26
|
+
Requires-Dist: opentelemetry-exporter-otlp-proto-http; extra == 'test'
|
|
27
|
+
Requires-Dist: opentelemetry-instrumentation-openai>=0.40.9; extra == 'test'
|
|
28
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0; extra == 'test'
|
|
29
|
+
Requires-Dist: pytest-recording; extra == 'test'
|
|
30
|
+
Description-Content-Type: text/markdown
|
|
31
|
+
|
|
32
|
+
# OpenInference OpenLLMetry (Traceloop)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
Python auto-instrumentation library for OpenLLMetry. This library allows you to convert OpenLLMetry traces to OpenInference, which is OpenTelemetry compatible, and view those traces in [Arize Phoenix](https://github.com/Arize-ai/phoenix).
|
|
36
|
+
|
|
37
|
+
## Installation
|
|
38
|
+
|
|
39
|
+
```shell
|
|
40
|
+
pip install openinference-instrumentation-openllmetry
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Quickstart
|
|
44
|
+
|
|
45
|
+
This quickstart shows you how to view your OpenLLMetry traces in Phoenix.
|
|
46
|
+
|
|
47
|
+
Install required packages.
|
|
48
|
+
|
|
49
|
+
```shell
|
|
50
|
+
pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp opentelemetry-instrumentation-openai
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Start Phoenix in the background as a collector. By default, it listens on `http://localhost:6006`. You can visit the app via a browser at the same address. (Phoenix does not send data over the internet. It only operates locally on your machine.)
|
|
54
|
+
|
|
55
|
+
```shell
|
|
56
|
+
phoenix serve
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Here's a simple example that demonstrates how to view convert OpenLLMetry traces into OpenInference and view those traces in Phoenix:
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
import os
|
|
63
|
+
import grpc
|
|
64
|
+
import openai
|
|
65
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
66
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
|
67
|
+
from phoenix.otel import register
|
|
68
|
+
from openinference.instrumentation.openllmetry import OpenInferenceSpanProcessor
|
|
69
|
+
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
|
70
|
+
|
|
71
|
+
# Set your OpenAI API key
|
|
72
|
+
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
|
|
73
|
+
|
|
74
|
+
# Set up the tracer provider
|
|
75
|
+
tracer_provider = register(
|
|
76
|
+
project_name="default" #Phoenix project name
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
tracer_provider.add_span_processor(OpenInferenceSpanProcessor())
|
|
80
|
+
|
|
81
|
+
tracer_provider.add_span_processor(
|
|
82
|
+
BatchSpanProcessor(
|
|
83
|
+
OTLPSpanExporter(
|
|
84
|
+
endpoint="http://localhost:4317", #if using phoenix cloud, change to phoenix cloud endpoint (phoenix cloud space -> settings -> endpoint/hostname)
|
|
85
|
+
headers={},
|
|
86
|
+
compression=grpc.Compression.Gzip, # use enum instead of string
|
|
87
|
+
)
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
|
|
93
|
+
|
|
94
|
+
# Define and invoke your OpenAI model
|
|
95
|
+
client = openai.OpenAI()
|
|
96
|
+
|
|
97
|
+
messages = [
|
|
98
|
+
{"role": "user", "content": "What is the national food of Yemen?"}
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
response = client.chat.completions.create(
|
|
102
|
+
model="gpt-4",
|
|
103
|
+
messages=messages,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Now view your converted OpenLLMetry traces in Phoenix!
|
|
107
|
+
```
|
|
108
|
+
## This example:
|
|
109
|
+
|
|
110
|
+
1. Uses OpenLLMetry Instrumentor to instrument the application.
|
|
111
|
+
2. Defines a simple OpenAI model and runs a query
|
|
112
|
+
3. Queries are exported to Phoenix using a span processor.
|
|
113
|
+
|
|
114
|
+
The traces will be visible in the Phoenix UI at `http://localhost:6006`.
|
|
115
|
+
|
|
116
|
+
## More Info
|
|
117
|
+
|
|
118
|
+
- [More info on OpenInference and Phoenix](https://docs.arize.com/phoenix)
|
|
119
|
+
- [How to customize spans to track sessions, metadata, etc.](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#customizing-spans)
|
|
120
|
+
- [How to account for private information and span payload customization](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#tracing-configuration)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# OpenInference OpenLLMetry (Traceloop)
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
Python auto-instrumentation library for OpenLLMetry. This library allows you to convert OpenLLMetry traces to OpenInference, which is OpenTelemetry compatible, and view those traces in [Arize Phoenix](https://github.com/Arize-ai/phoenix).
|
|
5
|
+
|
|
6
|
+
## Installation
|
|
7
|
+
|
|
8
|
+
```shell
|
|
9
|
+
pip install openinference-instrumentation-openllmetry
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
## Quickstart
|
|
13
|
+
|
|
14
|
+
This quickstart shows you how to view your OpenLLMetry traces in Phoenix.
|
|
15
|
+
|
|
16
|
+
Install required packages.
|
|
17
|
+
|
|
18
|
+
```shell
|
|
19
|
+
pip install arize-phoenix opentelemetry-sdk opentelemetry-exporter-otlp opentelemetry-instrumentation-openai
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
Start Phoenix in the background as a collector. By default, it listens on `http://localhost:6006`. You can visit the app via a browser at the same address. (Phoenix does not send data over the internet. It only operates locally on your machine.)
|
|
23
|
+
|
|
24
|
+
```shell
|
|
25
|
+
phoenix serve
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Here's a simple example that demonstrates how to view convert OpenLLMetry traces into OpenInference and view those traces in Phoenix:
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
import os
|
|
32
|
+
import grpc
|
|
33
|
+
import openai
|
|
34
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
35
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
|
36
|
+
from phoenix.otel import register
|
|
37
|
+
from openinference.instrumentation.openllmetry import OpenInferenceSpanProcessor
|
|
38
|
+
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
|
39
|
+
|
|
40
|
+
# Set your OpenAI API key
|
|
41
|
+
os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"
|
|
42
|
+
|
|
43
|
+
# Set up the tracer provider
|
|
44
|
+
tracer_provider = register(
|
|
45
|
+
project_name="default" #Phoenix project name
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
tracer_provider.add_span_processor(OpenInferenceSpanProcessor())
|
|
49
|
+
|
|
50
|
+
tracer_provider.add_span_processor(
|
|
51
|
+
BatchSpanProcessor(
|
|
52
|
+
OTLPSpanExporter(
|
|
53
|
+
endpoint="http://localhost:4317", #if using phoenix cloud, change to phoenix cloud endpoint (phoenix cloud space -> settings -> endpoint/hostname)
|
|
54
|
+
headers={},
|
|
55
|
+
compression=grpc.Compression.Gzip, # use enum instead of string
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
|
|
62
|
+
|
|
63
|
+
# Define and invoke your OpenAI model
|
|
64
|
+
client = openai.OpenAI()
|
|
65
|
+
|
|
66
|
+
messages = [
|
|
67
|
+
{"role": "user", "content": "What is the national food of Yemen?"}
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
response = client.chat.completions.create(
|
|
71
|
+
model="gpt-4",
|
|
72
|
+
messages=messages,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Now view your converted OpenLLMetry traces in Phoenix!
|
|
76
|
+
```
|
|
77
|
+
## This example:
|
|
78
|
+
|
|
79
|
+
1. Uses OpenLLMetry Instrumentor to instrument the application.
|
|
80
|
+
2. Defines a simple OpenAI model and runs a query
|
|
81
|
+
3. Queries are exported to Phoenix using a span processor.
|
|
82
|
+
|
|
83
|
+
The traces will be visible in the Phoenix UI at `http://localhost:6006`.
|
|
84
|
+
|
|
85
|
+
## More Info
|
|
86
|
+
|
|
87
|
+
- [More info on OpenInference and Phoenix](https://docs.arize.com/phoenix)
|
|
88
|
+
- [How to customize spans to track sessions, metadata, etc.](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#customizing-spans)
|
|
89
|
+
- [How to account for private information and span payload customization](https://github.com/Arize-ai/openinference/tree/main/python/openinference-instrumentation#tracing-configuration)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "openinference-instrumentation-openllmetry"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "OpenInference OpenLLMetry Instrumentation"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "Apache-2.0"
|
|
11
|
+
requires-python = ">=3.9, <3.14"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "OpenInference Authors", email = "oss@arize.com" },
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 5 - Production/Stable",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: Apache Software License",
|
|
19
|
+
"Programming Language :: Python",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
"Programming Language :: Python :: 3.12",
|
|
25
|
+
"Programming Language :: Python :: 3.13",
|
|
26
|
+
]
|
|
27
|
+
dependencies = [
|
|
28
|
+
"opentelemetry-sdk>=1.20.0",
|
|
29
|
+
"openinference-instrumentation>=0.1.34",
|
|
30
|
+
"openinference-semantic-conventions>=0.1.21",
|
|
31
|
+
"opentelemetry-semantic-conventions-ai>=0.4.9"
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
[project.optional-dependencies]
|
|
35
|
+
test = [
|
|
36
|
+
"opentelemetry-instrumentation-openai>=0.40.9",
|
|
37
|
+
"opentelemetry-sdk>=1.20.0",
|
|
38
|
+
"opentelemetry-exporter-otlp-proto-http",
|
|
39
|
+
"openai>=1.90.0",
|
|
40
|
+
"pytest-recording",
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
[project.entry-points.openinference_instrumentor]
|
|
44
|
+
openllmetry = "openinference.instrumentation.openllmetry:OpenInferenceSpanProcessor"
|
|
45
|
+
|
|
46
|
+
[project.urls]
|
|
47
|
+
Homepage = "https://github.com/Arize-ai/openinference/tree/main/python/instrumentation/openinference-instrumentation-openllmetry"
|
|
48
|
+
|
|
49
|
+
[tool.hatch.version]
|
|
50
|
+
path = "src/openinference/instrumentation/openllmetry/version.py"
|
|
51
|
+
|
|
52
|
+
[tool.hatch.build.targets.sdist]
|
|
53
|
+
include = [
|
|
54
|
+
"/src",
|
|
55
|
+
"/tests",
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
[tool.hatch.build.targets.wheel]
|
|
59
|
+
packages = ["src/openinference"]
|
|
60
|
+
|
|
61
|
+
[tool.pytest.ini_options]
|
|
62
|
+
asyncio_mode = "auto"
|
|
63
|
+
asyncio_default_fixture_loop_scope = "function"
|
|
64
|
+
testpaths = [
|
|
65
|
+
"tests",
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
[tool.mypy]
|
|
69
|
+
strict = true
|
|
70
|
+
explicit_package_bases = true
|
|
71
|
+
exclude = [
|
|
72
|
+
"examples",
|
|
73
|
+
"dist",
|
|
74
|
+
"sdist",
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
[[tool.mypy.overrides]]
|
|
78
|
+
ignore_missing_imports = true
|
|
79
|
+
module = [
|
|
80
|
+
"wrapt",
|
|
81
|
+
]
|
|
82
|
+
|
|
83
|
+
[tool.ruff]
|
|
84
|
+
line-length = 100
|
|
85
|
+
target-version = "py38"
|
|
86
|
+
|
|
87
|
+
[tool.ruff.lint.per-file-ignores]
|
|
88
|
+
"*.ipynb" = ["E402", "E501"]
|
|
89
|
+
|
|
90
|
+
[tool.ruff.lint]
|
|
91
|
+
select = ["E", "F", "W", "I"]
|
|
92
|
+
|
|
93
|
+
[tool.ruff.lint.isort]
|
|
94
|
+
force-single-line = false
|
|
@@ -0,0 +1,312 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenLLMetry → OpenInference Span Processor
|
|
3
|
+
|
|
4
|
+
This module provides a SpanProcessor that converts OpenLLMetry (TraceLoop) spans
|
|
5
|
+
into OpenInference semantic conventions for Phoenix observability.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from collections import defaultdict
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from opentelemetry.sdk.trace import SpanProcessor
|
|
13
|
+
|
|
14
|
+
# Import OpenLLMetry constants from the official package
|
|
15
|
+
from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues
|
|
16
|
+
|
|
17
|
+
import openinference.instrumentation as oi
|
|
18
|
+
|
|
19
|
+
# Import OpenInference constants
|
|
20
|
+
import openinference.semconv.trace as sc
|
|
21
|
+
from openinference.instrumentation import (
|
|
22
|
+
get_input_attributes,
|
|
23
|
+
get_llm_attributes,
|
|
24
|
+
get_output_attributes,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = ["OpenInferenceSpanProcessor"]
|
|
28
|
+
|
|
29
|
+
# ---------------------------------------------------------------------------
|
|
30
|
+
# Constants
|
|
31
|
+
# ---------------------------------------------------------------------------
|
|
32
|
+
_SPAN_KIND_MAPPING: Dict[str, str] = {
|
|
33
|
+
TraceloopSpanKindValues.WORKFLOW.value: sc.OpenInferenceSpanKindValues.CHAIN.value,
|
|
34
|
+
TraceloopSpanKindValues.TASK.value: sc.OpenInferenceSpanKindValues.TOOL.value,
|
|
35
|
+
TraceloopSpanKindValues.AGENT.value: sc.OpenInferenceSpanKindValues.AGENT.value,
|
|
36
|
+
TraceloopSpanKindValues.TOOL.value: sc.OpenInferenceSpanKindValues.TOOL.value,
|
|
37
|
+
"llm": sc.OpenInferenceSpanKindValues.LLM.value,
|
|
38
|
+
TraceloopSpanKindValues.UNKNOWN.value: sc.OpenInferenceSpanKindValues.UNKNOWN.value,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
_INVOCATION_PARAMETER_KEYS: List[str] = [
|
|
42
|
+
SpanAttributes.LLM_REQUEST_MAX_TOKENS,
|
|
43
|
+
SpanAttributes.LLM_REQUEST_TEMPERATURE,
|
|
44
|
+
SpanAttributes.LLM_REQUEST_TOP_P,
|
|
45
|
+
SpanAttributes.LLM_TOP_K,
|
|
46
|
+
SpanAttributes.LLM_CHAT_STOP_SEQUENCES,
|
|
47
|
+
SpanAttributes.LLM_REQUEST_REPETITION_PENALTY,
|
|
48
|
+
SpanAttributes.LLM_FREQUENCY_PENALTY,
|
|
49
|
+
SpanAttributes.LLM_PRESENCE_PENALTY,
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
_OPENINF_TOOL_LIST_KEY = "llm.tools"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# ---------------------------------------------------------------------------
|
|
56
|
+
# Helpers
|
|
57
|
+
# ---------------------------------------------------------------------------
|
|
58
|
+
def _as_json_str(value: Any) -> str:
|
|
59
|
+
"""
|
|
60
|
+
Ensure the given value is serialized as a compact JSON string.
|
|
61
|
+
"""
|
|
62
|
+
if value is None:
|
|
63
|
+
return ""
|
|
64
|
+
if isinstance(value, str):
|
|
65
|
+
return value
|
|
66
|
+
return json.dumps(value, separators=(",", ":"))
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _safe_int(value: Any) -> Optional[int]:
|
|
70
|
+
try:
|
|
71
|
+
return int(value) if value is not None else None
|
|
72
|
+
except (TypeError, ValueError):
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _map_generic_span(attrs: Dict[str, Any]) -> Dict[str, Any]:
|
|
77
|
+
"""
|
|
78
|
+
Convert TraceLoop 'workflow' / 'task' / 'agent' / 'tool' spans
|
|
79
|
+
to OpenInference semantic conventions.
|
|
80
|
+
"""
|
|
81
|
+
raw_kind = str(attrs.get(SpanAttributes.TRACELOOP_SPAN_KIND, "unknown")).lower()
|
|
82
|
+
kind_val = _SPAN_KIND_MAPPING.get(raw_kind, sc.OpenInferenceSpanKindValues.UNKNOWN.value)
|
|
83
|
+
|
|
84
|
+
mapped: Dict[str, Any] = {"openinference.span.kind": kind_val}
|
|
85
|
+
|
|
86
|
+
input_raw = attrs.get(SpanAttributes.TRACELOOP_ENTITY_INPUT)
|
|
87
|
+
if input_raw is not None:
|
|
88
|
+
mapped.update(
|
|
89
|
+
{
|
|
90
|
+
"input.mime_type": "application/json",
|
|
91
|
+
"input.value": _as_json_str(input_raw),
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
output_raw = attrs.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT)
|
|
96
|
+
if output_raw is not None:
|
|
97
|
+
mapped.update(
|
|
98
|
+
{
|
|
99
|
+
"output.mime_type": "application/json",
|
|
100
|
+
"output.value": _as_json_str(output_raw),
|
|
101
|
+
}
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
return mapped
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _collect_oi_messages(
|
|
108
|
+
attrs: Dict[str, Any], prefix: str
|
|
109
|
+
) -> tuple[List[oi.Message], List[Optional[str]]]:
|
|
110
|
+
"""
|
|
111
|
+
Reconstruct a list of OpenInference Messages from span attributes
|
|
112
|
+
using the given prefix ("gen_ai.prompt." or "gen_ai.completion.").
|
|
113
|
+
"""
|
|
114
|
+
buckets: Dict[int, Dict[str, Any]] = defaultdict(dict)
|
|
115
|
+
|
|
116
|
+
for key, val in attrs.items():
|
|
117
|
+
if not key.startswith(prefix):
|
|
118
|
+
continue
|
|
119
|
+
parts = key.split(".")
|
|
120
|
+
if len(parts) < 4 or not parts[2].isdigit():
|
|
121
|
+
continue
|
|
122
|
+
idx = int(parts[2])
|
|
123
|
+
field = parts[3]
|
|
124
|
+
|
|
125
|
+
# Handle tool_calls.* entries
|
|
126
|
+
if field == "tool_calls" and len(parts) >= 6 and parts[4].isdigit():
|
|
127
|
+
tool_idx = int(parts[4])
|
|
128
|
+
sub_field = ".".join(parts[5:])
|
|
129
|
+
tc_bucket = buckets[idx].setdefault("tool_calls", defaultdict(dict))
|
|
130
|
+
tc_bucket[tool_idx][sub_field] = val
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
buckets[idx][field] = val
|
|
134
|
+
|
|
135
|
+
messages: List[oi.Message] = []
|
|
136
|
+
finish_reasons: List[Optional[str]] = []
|
|
137
|
+
|
|
138
|
+
for idx in sorted(buckets):
|
|
139
|
+
raw = buckets[idx]
|
|
140
|
+
role = raw.get("role", "user")
|
|
141
|
+
msg = oi.Message(role=role)
|
|
142
|
+
if "content" in raw:
|
|
143
|
+
msg["content"] = raw["content"]
|
|
144
|
+
# Note: finish_reason is not part of the Message TypedDict, so we handle it separately
|
|
145
|
+
finish_reason = raw.get("finish_reason")
|
|
146
|
+
|
|
147
|
+
# Build tool_calls if present
|
|
148
|
+
|
|
149
|
+
if calls := raw.get("tool_calls"):
|
|
150
|
+
oi_calls: List[oi.ToolCall] = []
|
|
151
|
+
for tidx in sorted(calls):
|
|
152
|
+
entry = calls[tidx]
|
|
153
|
+
name = entry.get("function.name") or entry.get("name")
|
|
154
|
+
args = entry.get("function.arguments") or entry.get("arguments")
|
|
155
|
+
call_id = entry.get("id") or entry.get("tool_call.id")
|
|
156
|
+
if not name:
|
|
157
|
+
continue
|
|
158
|
+
oi_calls.append(
|
|
159
|
+
oi.ToolCall(id=call_id, function=oi.ToolCallFunction(name=name, arguments=args))
|
|
160
|
+
)
|
|
161
|
+
if oi_calls:
|
|
162
|
+
msg["tool_calls"] = oi_calls
|
|
163
|
+
|
|
164
|
+
messages.append(msg)
|
|
165
|
+
finish_reasons.append(finish_reason)
|
|
166
|
+
|
|
167
|
+
return messages, finish_reasons
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _handle_tool_list(raw: Any, dst: Dict[str, Any]) -> List[oi.Tool]:
|
|
171
|
+
"""
|
|
172
|
+
Convert OpenLLMetry functions/tools list into OpenInference tools list
|
|
173
|
+
and set the appropriate span attributes in dst.
|
|
174
|
+
"""
|
|
175
|
+
try:
|
|
176
|
+
tools_py = json.loads(raw) if isinstance(raw, str) else raw
|
|
177
|
+
except Exception:
|
|
178
|
+
return []
|
|
179
|
+
if not isinstance(tools_py, list):
|
|
180
|
+
tools_py = [tools_py]
|
|
181
|
+
|
|
182
|
+
dst[_OPENINF_TOOL_LIST_KEY] = json.dumps(tools_py, separators=(",", ":"))
|
|
183
|
+
oi_tools: List[oi.Tool] = []
|
|
184
|
+
|
|
185
|
+
for idx, tool in enumerate(tools_py):
|
|
186
|
+
if isinstance(tool, dict):
|
|
187
|
+
base = f"{_OPENINF_TOOL_LIST_KEY}.{idx}"
|
|
188
|
+
for k, v in tool.items():
|
|
189
|
+
key = f"{base}.{k}"
|
|
190
|
+
dst[key] = (
|
|
191
|
+
json.dumps(v, separators=(",", ":")) if isinstance(v, (list, dict)) else v
|
|
192
|
+
)
|
|
193
|
+
oi_tools.append(oi.Tool(json_schema=tool))
|
|
194
|
+
|
|
195
|
+
return oi_tools
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
class OpenInferenceSpanProcessor(SpanProcessor):
|
|
199
|
+
"""
|
|
200
|
+
SpanProcessor that converts OpenLLMetry spans to OpenInference attributes.
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
def on_end(self, span: Any) -> None:
|
|
204
|
+
attrs: Dict[str, Any] = getattr(span, "_attributes", {})
|
|
205
|
+
|
|
206
|
+
kind = attrs.get(SpanAttributes.TRACELOOP_SPAN_KIND)
|
|
207
|
+
if kind and kind.lower() != "llm":
|
|
208
|
+
generic = _map_generic_span(attrs)
|
|
209
|
+
attrs.clear()
|
|
210
|
+
attrs.update(generic)
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
# Skip if no LLM prompt data
|
|
214
|
+
if not any(k.startswith("gen_ai.prompt.") for k in attrs):
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
# Reconstruct messages
|
|
218
|
+
inputs, input_finish_reasons = _collect_oi_messages(attrs, "gen_ai.prompt.")
|
|
219
|
+
outputs, output_finish_reasons = _collect_oi_messages(attrs, "gen_ai.completion.")
|
|
220
|
+
|
|
221
|
+
# Token usage
|
|
222
|
+
prompt_toks = _safe_int(attrs.get(SpanAttributes.LLM_USAGE_PROMPT_TOKENS)) or 0
|
|
223
|
+
comp_toks = _safe_int(attrs.get(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS)) or 0
|
|
224
|
+
total_toks = _safe_int(attrs.get(SpanAttributes.LLM_USAGE_TOTAL_TOKENS)) or (
|
|
225
|
+
prompt_toks + comp_toks
|
|
226
|
+
)
|
|
227
|
+
cache_read = _safe_int(attrs.get(SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS)) or 0
|
|
228
|
+
token_count = oi.TokenCount(
|
|
229
|
+
prompt=prompt_toks,
|
|
230
|
+
completion=comp_toks,
|
|
231
|
+
total=total_toks,
|
|
232
|
+
prompt_details={"cache_read": cache_read},
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Invocation parameters
|
|
236
|
+
invocation_params: Dict[str, Any] = {}
|
|
237
|
+
for key in _INVOCATION_PARAMETER_KEYS:
|
|
238
|
+
if key in attrs:
|
|
239
|
+
invocation_params[key.rsplit(".", 1)[-1]] = attrs[key]
|
|
240
|
+
if SpanAttributes.LLM_REQUEST_MODEL in attrs:
|
|
241
|
+
invocation_params.setdefault("model", attrs[SpanAttributes.LLM_REQUEST_MODEL])
|
|
242
|
+
# Tools
|
|
243
|
+
tool_key = (
|
|
244
|
+
SpanAttributes.LLM_REQUEST_FUNCTIONS
|
|
245
|
+
if SpanAttributes.LLM_REQUEST_FUNCTIONS in attrs
|
|
246
|
+
else ("llm.request.tools" if "llm.request.tools" in attrs else None)
|
|
247
|
+
)
|
|
248
|
+
oi_tools: List[oi.Tool] = []
|
|
249
|
+
if tool_key:
|
|
250
|
+
oi_tools = _handle_tool_list(attrs[tool_key], attrs)
|
|
251
|
+
|
|
252
|
+
# Build bodies for OpenInference
|
|
253
|
+
request_body = {
|
|
254
|
+
"messages": [{"role": m.get("role"), "content": m.get("content", "")} for m in inputs],
|
|
255
|
+
"model": attrs.get(SpanAttributes.LLM_REQUEST_MODEL),
|
|
256
|
+
"max_tokens": invocation_params.get("max_tokens"),
|
|
257
|
+
"temperature": invocation_params.get("temperature"),
|
|
258
|
+
"top_p": invocation_params.get("top_p"),
|
|
259
|
+
"tools": json.loads(attrs[tool_key]) if tool_key else None,
|
|
260
|
+
}
|
|
261
|
+
assistant_text = outputs[0].get("content", "") if outputs else ""
|
|
262
|
+
finish_reason = output_finish_reasons[0] if output_finish_reasons else "stop"
|
|
263
|
+
response_body = {
|
|
264
|
+
"id": attrs.get("gen_ai.response.id"),
|
|
265
|
+
"choices": [
|
|
266
|
+
{
|
|
267
|
+
"index": 0,
|
|
268
|
+
"finish_reason": finish_reason,
|
|
269
|
+
"message": {
|
|
270
|
+
"role": outputs[0].get("role", "assistant") if outputs else "assistant",
|
|
271
|
+
"content": assistant_text,
|
|
272
|
+
"annotations": [],
|
|
273
|
+
},
|
|
274
|
+
}
|
|
275
|
+
],
|
|
276
|
+
"model": attrs.get(SpanAttributes.LLM_RESPONSE_MODEL)
|
|
277
|
+
or attrs.get(SpanAttributes.LLM_REQUEST_MODEL),
|
|
278
|
+
"usage": {
|
|
279
|
+
"prompt_tokens": prompt_toks,
|
|
280
|
+
"completion_tokens": comp_toks,
|
|
281
|
+
"total_tokens": total_toks,
|
|
282
|
+
},
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
system_val = attrs.get(SpanAttributes.LLM_SYSTEM, "unknown")
|
|
286
|
+
provider_val = attrs.get("gen_ai.provider", system_val)
|
|
287
|
+
|
|
288
|
+
# Span kind
|
|
289
|
+
span_val = (
|
|
290
|
+
_SPAN_KIND_MAPPING.get(str(kind).lower(), sc.OpenInferenceSpanKindValues.LLM.value)
|
|
291
|
+
if kind
|
|
292
|
+
else sc.OpenInferenceSpanKindValues.LLM.value
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Assemble OpenInference attributes
|
|
296
|
+
oi_attrs = {
|
|
297
|
+
sc.SpanAttributes.OPENINFERENCE_SPAN_KIND: span_val,
|
|
298
|
+
**get_llm_attributes(
|
|
299
|
+
provider=provider_val,
|
|
300
|
+
system=system_val,
|
|
301
|
+
model_name=request_body["model"],
|
|
302
|
+
input_messages=inputs,
|
|
303
|
+
output_messages=outputs,
|
|
304
|
+
token_count=token_count,
|
|
305
|
+
invocation_parameters=invocation_params,
|
|
306
|
+
tools=oi_tools,
|
|
307
|
+
),
|
|
308
|
+
**get_input_attributes(request_body),
|
|
309
|
+
**get_output_attributes(response_body),
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
attrs.update(oi_attrs)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.1"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
body: '{"messages":[{"role":"user","content":"What is the capital of Yemen?"}],"model":"gpt-4.1"}'
|
|
4
|
+
headers: {}
|
|
5
|
+
method: POST
|
|
6
|
+
uri: https://api.openai.com/v1/chat/completions
|
|
7
|
+
response:
|
|
8
|
+
body:
|
|
9
|
+
string: "{\n \"id\": \"chatcmpl-BoZ5bsWOSiprchAunIwkrttGXLDYr\",\n \"object\":
|
|
10
|
+
\"chat.completion\",\n \"created\": 1751390983,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
|
11
|
+
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
12
|
+
\"assistant\",\n \"content\": \"The capital of Yemen is **Sana'a**.
|
|
13
|
+
However, due to the ongoing conflict in Yemen, the seat of the internationally
|
|
14
|
+
recognized government has been temporarily moved to **Aden**. Officially,
|
|
15
|
+
Sana'a remains the constitutional capital.\",\n \"refusal\": null,\n
|
|
16
|
+
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
|
17
|
+
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 14,\n \"completion_tokens\":
|
|
18
|
+
48,\n \"total_tokens\": 62,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
|
19
|
+
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
20
|
+
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
21
|
+
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
|
22
|
+
\"default\",\n \"system_fingerprint\": \"fp_51e1070cf2\"\n}\n"
|
|
23
|
+
headers: {}
|
|
24
|
+
status:
|
|
25
|
+
code: 200
|
|
26
|
+
message: OK
|
|
27
|
+
version: 1
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from typing import Any, Mapping, cast
|
|
2
|
+
|
|
3
|
+
import openai
|
|
4
|
+
import pytest
|
|
5
|
+
from openai.types.chat import ChatCompletionUserMessageParam
|
|
6
|
+
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
|
7
|
+
from opentelemetry.sdk.trace import ReadableSpan, TracerProvider
|
|
8
|
+
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
|
|
9
|
+
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
|
10
|
+
from opentelemetry.util.types import AttributeValue
|
|
11
|
+
|
|
12
|
+
from openinference.instrumentation.openllmetry import OpenInferenceSpanProcessor
|
|
13
|
+
from openinference.semconv.trace import SpanAttributes
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@pytest.fixture
|
|
17
|
+
def openai_api_key(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
18
|
+
monkeypatch.setenv("OPENAI_API_KEY", "sk-0123456789")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def is_openinference_span(span: ReadableSpan) -> bool:
|
|
22
|
+
"""Check if a span is an OpenInference span."""
|
|
23
|
+
if span.attributes is None:
|
|
24
|
+
return False
|
|
25
|
+
return SpanAttributes.OPENINFERENCE_SPAN_KIND in span.attributes
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def remove_all_vcr_request_headers(request: Any) -> Any:
|
|
29
|
+
"""
|
|
30
|
+
Removes all request headers.
|
|
31
|
+
|
|
32
|
+
Example:
|
|
33
|
+
```
|
|
34
|
+
@pytest.mark.vcr(
|
|
35
|
+
before_record_response=remove_all_vcr_request_headers
|
|
36
|
+
)
|
|
37
|
+
def test_openai() -> None:
|
|
38
|
+
# make request to OpenAI
|
|
39
|
+
"""
|
|
40
|
+
request.headers.clear()
|
|
41
|
+
return request
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def remove_all_vcr_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Removes all response headers.
|
|
47
|
+
|
|
48
|
+
Example:
|
|
49
|
+
```
|
|
50
|
+
@pytest.mark.vcr(
|
|
51
|
+
before_record_response=remove_all_vcr_response_headers
|
|
52
|
+
)
|
|
53
|
+
def test_openai() -> None:
|
|
54
|
+
# make request to OpenAI
|
|
55
|
+
"""
|
|
56
|
+
response["headers"] = {}
|
|
57
|
+
return response
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TestOpenLLMetryInstrumentor:
|
|
61
|
+
@pytest.mark.vcr(
|
|
62
|
+
decode_compressed_response=True,
|
|
63
|
+
before_record_request=remove_all_vcr_request_headers,
|
|
64
|
+
before_record_response=remove_all_vcr_response_headers,
|
|
65
|
+
)
|
|
66
|
+
def test_openllmetry_instrumentor(
|
|
67
|
+
self,
|
|
68
|
+
openai_api_key: str,
|
|
69
|
+
) -> None:
|
|
70
|
+
in_memory_span_exporter = InMemorySpanExporter()
|
|
71
|
+
in_memory_span_exporter.clear()
|
|
72
|
+
|
|
73
|
+
# Set up the tracer provider
|
|
74
|
+
tracer_provider = TracerProvider()
|
|
75
|
+
|
|
76
|
+
tracer_provider.add_span_processor(OpenInferenceSpanProcessor())
|
|
77
|
+
|
|
78
|
+
tracer_provider.add_span_processor(SimpleSpanProcessor(in_memory_span_exporter))
|
|
79
|
+
|
|
80
|
+
OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
|
|
81
|
+
|
|
82
|
+
# Define and invoke your OpenAI model
|
|
83
|
+
client = openai.OpenAI()
|
|
84
|
+
|
|
85
|
+
messages: list[ChatCompletionUserMessageParam] = [
|
|
86
|
+
{"role": "user", "content": "What is the capital of Yemen?"}
|
|
87
|
+
]
|
|
88
|
+
|
|
89
|
+
response = client.chat.completions.create(
|
|
90
|
+
model="gpt-4.1",
|
|
91
|
+
messages=messages,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Basic assertion on the result
|
|
95
|
+
assert response.choices[0].message.content is not None
|
|
96
|
+
assert (
|
|
97
|
+
"Sanaa" in response.choices[0].message.content
|
|
98
|
+
or "Sana'a" in response.choices[0].message.content
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Get spans
|
|
102
|
+
spans = in_memory_span_exporter.get_finished_spans()
|
|
103
|
+
assert len(spans) > 0
|
|
104
|
+
for span in spans:
|
|
105
|
+
# Check span attributes
|
|
106
|
+
attributes = dict(cast(Mapping[str, AttributeValue], span.attributes))
|
|
107
|
+
|
|
108
|
+
# Verify it's an OpenInference span
|
|
109
|
+
assert is_openinference_span(span)
|
|
110
|
+
|
|
111
|
+
# Check that we have input and output values
|
|
112
|
+
assert SpanAttributes.INPUT_VALUE in attributes
|
|
113
|
+
assert SpanAttributes.OUTPUT_VALUE in attributes
|
|
114
|
+
|
|
115
|
+
# Verify the model name is captured (exact attribute may vary)
|
|
116
|
+
model_name_attr = next(
|
|
117
|
+
(k for k in attributes if "model" in k.lower() and "name" in k.lower()), None
|
|
118
|
+
)
|
|
119
|
+
assert model_name_attr is not None, "Model name attribute not found"
|
|
120
|
+
assert "gpt-4" in str(attributes[model_name_attr])
|