@arizeai/phoenix-client 3.0.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/dist/esm/__generated__/api/v1.d.ts +3 -3
  2. package/dist/esm/__generated__/api/v1.d.ts.map +1 -1
  3. package/dist/esm/experiments/instrumention.d.ts +6 -1
  4. package/dist/esm/experiments/instrumention.d.ts.map +1 -1
  5. package/dist/esm/experiments/instrumention.js +15 -15
  6. package/dist/esm/experiments/instrumention.js.map +1 -1
  7. package/dist/esm/experiments/runExperiment.d.ts +12 -2
  8. package/dist/esm/experiments/runExperiment.d.ts.map +1 -1
  9. package/dist/esm/experiments/runExperiment.js +5 -2
  10. package/dist/esm/experiments/runExperiment.js.map +1 -1
  11. package/dist/esm/schemas/llm/converters.js +4 -4
  12. package/dist/esm/schemas/llm/converters.js.map +1 -1
  13. package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
  14. package/dist/src/__generated__/api/v1.d.ts +3 -3
  15. package/dist/src/__generated__/api/v1.d.ts.map +1 -1
  16. package/dist/src/experiments/instrumention.d.ts +6 -1
  17. package/dist/src/experiments/instrumention.d.ts.map +1 -1
  18. package/dist/src/experiments/instrumention.js +13 -13
  19. package/dist/src/experiments/instrumention.js.map +1 -1
  20. package/dist/src/experiments/runExperiment.d.ts +12 -2
  21. package/dist/src/experiments/runExperiment.d.ts.map +1 -1
  22. package/dist/src/experiments/runExperiment.js +5 -2
  23. package/dist/src/experiments/runExperiment.js.map +1 -1
  24. package/dist/src/schemas/llm/converters.js +4 -4
  25. package/dist/src/schemas/llm/converters.js.map +1 -1
  26. package/dist/tsconfig.tsbuildinfo +1 -1
  27. package/package.json +1 -1
  28. package/src/__generated__/api/v1.ts +3 -3
  29. package/src/experiments/instrumention.ts +27 -14
  30. package/src/experiments/runExperiment.ts +15 -0
  31. package/src/schemas/llm/converters.ts +4 -4
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@arizeai/phoenix-client",
3
- "version": "3.0.0",
3
+ "version": "3.1.0",
4
4
  "description": "A client for the Phoenix API",
5
5
  "main": "dist/src/index.js",
6
6
  "module": "dist/esm/index.js",
@@ -2351,11 +2351,11 @@ export interface components {
2351
2351
  * @description The end time of the evaluation in ISO format
2352
2352
  */
2353
2353
  end_time: string;
2354
- /** @description The result of the evaluation */
2355
- result: components["schemas"]["ExperimentEvaluationResult"];
2354
+ /** @description The result of the evaluation. Either result or error must be provided. */
2355
+ result?: components["schemas"]["ExperimentEvaluationResult"] | null;
2356
2356
  /**
2357
2357
  * Error
2358
- * @description Optional error message if the evaluation encountered an error
2358
+ * @description Error message if the evaluation encountered an error. Either result or error must be provided.
2359
2359
  */
2360
2360
  error?: string | null;
2361
2361
  /**
@@ -1,10 +1,16 @@
1
1
  import { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api";
2
2
  import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";
3
3
  import { resourceFromAttributes } from "@opentelemetry/resources";
4
- import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
4
+ import {
5
+ NodeTracerProvider,
6
+ SpanProcessor,
7
+ } from "@opentelemetry/sdk-trace-node";
5
8
  import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions";
6
9
  import { HeadersOptions } from "openapi-fetch";
7
- import { OpenInferenceSimpleSpanProcessor } from "@arizeai/openinference-vercel";
10
+ import {
11
+ OpenInferenceBatchSpanProcessor,
12
+ OpenInferenceSimpleSpanProcessor,
13
+ } from "@arizeai/openinference-vercel";
8
14
 
9
15
  /**
10
16
  * Creates a provider that exports traces to Phoenix.
@@ -13,9 +19,15 @@ export function createProvider({
13
19
  projectName,
14
20
  baseUrl,
15
21
  headers,
22
+ useBatchSpanProcessor = true,
16
23
  }: {
17
24
  projectName: string;
18
25
  headers: HeadersOptions;
26
+ /**
27
+ * Whether to use batching for the span processor.
28
+ * @default true
29
+ */
30
+ useBatchSpanProcessor: boolean;
19
31
  /**
20
32
  * The base URL of the Phoenix. Doesn't include the /v1/traces path.
21
33
  */
@@ -23,22 +35,23 @@ export function createProvider({
23
35
  }) {
24
36
  diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.ERROR);
25
37
 
38
+ const exporter = new OTLPTraceExporter({
39
+ url: `${baseUrl}/v1/traces`,
40
+ headers: Array.isArray(headers) ? Object.fromEntries(headers) : headers,
41
+ });
42
+
43
+ let spanProcessor: SpanProcessor;
44
+ if (useBatchSpanProcessor) {
45
+ spanProcessor = new OpenInferenceBatchSpanProcessor({ exporter });
46
+ } else {
47
+ spanProcessor = new OpenInferenceSimpleSpanProcessor({ exporter });
48
+ }
49
+
26
50
  const provider = new NodeTracerProvider({
27
51
  resource: resourceFromAttributes({
28
52
  [SEMRESATTRS_PROJECT_NAME]: projectName,
29
53
  }),
30
- spanProcessors: [
31
- // We opt to use the OpenInferenceSimpleSpanProcessor instead of the SimpleSpanProcessor
32
- // Since so many AI applications use the AI SDK
33
- new OpenInferenceSimpleSpanProcessor({
34
- exporter: new OTLPTraceExporter({
35
- url: `${baseUrl}/v1/traces`,
36
- headers: Array.isArray(headers)
37
- ? Object.fromEntries(headers)
38
- : headers,
39
- }),
40
- }),
41
- ],
54
+ spanProcessors: [spanProcessor],
42
55
  });
43
56
 
44
57
  return provider;
@@ -94,6 +94,11 @@ export type RunExperimentParams = ClientFn & {
94
94
  * @default true
95
95
  */
96
96
  setGlobalTracerProvider?: boolean;
97
+ /**
98
+ * Whether to use batching for the span processor.
99
+ * @default true
100
+ */
101
+ useBatchSpanProcessor?: boolean;
97
102
  };
98
103
 
99
104
  /**
@@ -141,6 +146,7 @@ export async function runExperiment({
141
146
  concurrency = 5,
142
147
  dryRun = false,
143
148
  setGlobalTracerProvider = true,
149
+ useBatchSpanProcessor = true,
144
150
  }: RunExperimentParams): Promise<RanExperiment> {
145
151
  let provider: NodeTracerProvider | undefined;
146
152
  const isDryRun = typeof dryRun === "number" || dryRun === true;
@@ -201,6 +207,7 @@ export async function runExperiment({
201
207
  projectName,
202
208
  baseUrl,
203
209
  headers: client.config.headers ?? {},
210
+ useBatchSpanProcessor,
204
211
  });
205
212
  // Register the provider
206
213
  if (setGlobalTracerProvider) {
@@ -276,6 +283,7 @@ export async function runExperiment({
276
283
  concurrency,
277
284
  dryRun,
278
285
  setGlobalTracerProvider,
286
+ useBatchSpanProcessor,
279
287
  });
280
288
  ranExperiment.evaluationRuns = evaluationRuns;
281
289
 
@@ -421,6 +429,7 @@ export async function evaluateExperiment({
421
429
  concurrency = 5,
422
430
  dryRun = false,
423
431
  setGlobalTracerProvider = true,
432
+ useBatchSpanProcessor = true,
424
433
  }: {
425
434
  /**
426
435
  * The experiment to evaluate
@@ -445,6 +454,11 @@ export async function evaluateExperiment({
445
454
  * @default true
446
455
  */
447
456
  setGlobalTracerProvider?: boolean;
457
+ /**
458
+ * Whether to use batching for the span processor.
459
+ * @default true
460
+ */
461
+ useBatchSpanProcessor?: boolean;
448
462
  }): Promise<RanExperiment> {
449
463
  const isDryRun = typeof dryRun === "number" || dryRun === true;
450
464
  const client = _client ?? createClient();
@@ -459,6 +473,7 @@ export async function evaluateExperiment({
459
473
  projectName: "evaluators",
460
474
  baseUrl,
461
475
  headers: client.config.headers ?? {},
476
+ useBatchSpanProcessor,
462
477
  });
463
478
  if (setGlobalTracerProvider) {
464
479
  provider.register();
@@ -38,7 +38,7 @@ export const safelyConvertMessageToProvider = <
38
38
  );
39
39
  // convert the OpenAI format to the target provider format
40
40
  return fromOpenAIMessage({ message: openAIMessage, targetProvider });
41
- } catch (e) {
41
+ } catch {
42
42
  return null;
43
43
  }
44
44
  };
@@ -64,7 +64,7 @@ export const safelyConvertToolCallToProvider = <
64
64
  toolCall: openAIToolCall,
65
65
  targetProvider,
66
66
  });
67
- } catch (e) {
67
+ } catch {
68
68
  return null;
69
69
  }
70
70
  };
@@ -90,7 +90,7 @@ export const safelyConvertToolDefinitionToProvider = <
90
90
  toolDefinition: openAIToolDefinition,
91
91
  targetProvider,
92
92
  });
93
- } catch (e) {
93
+ } catch {
94
94
  return null;
95
95
  }
96
96
  };
@@ -116,7 +116,7 @@ export const safelyConvertToolChoiceToProvider = <
116
116
  toolChoice: openAIToolChoice,
117
117
  targetProvider,
118
118
  });
119
- } catch (e) {
119
+ } catch {
120
120
  return null;
121
121
  }
122
122
  };