@overmind-lab/trace-sdk 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -31,7 +31,7 @@ const overmindClient = new OvermindClient({
31
31
  // 2. Initialize tracing — must be called before any OpenAI calls
32
32
  overmindClient.initTracing({
33
33
  enableBatching: false,
34
- enabledProviders: { openai: true },
34
+ enabledProviders: { openai: OpenAI }, // this is important to patch the correct client
35
35
  instrumentations: [],
36
36
  });
37
37
 
@@ -64,7 +64,7 @@ Traces are sent automatically to `https://api.overmindlab.ai` and will appear in
64
64
 
65
65
  | Option | Type | Required | Description |
66
66
  |---|---|---|---|
67
- | `enabledProviders` | `{ openai?: boolean; anthropic?: boolean }` | Yes | Which LLM providers to instrument. |
67
+ | `enabledProviders` | `{ openai?: typeof OpenAI }` | Yes | Pass the imported provider class to monkey-patch. e.g. `{ openai: OpenAI }` where `OpenAI` is imported from `"openai"`. |
68
68
  | `enableBatching` | `boolean` | Yes | `true` to batch spans before export (recommended for production), `false` to export immediately. |
69
69
  | `instrumentations` | `Instrumentation[]` | No | Additional OpenTelemetry instrumentations to register. |
70
70
  | `spanProcessors` | `SpanProcessor[]` | No | Additional span processors (e.g. custom exporters). |
@@ -84,7 +84,7 @@ Traces are sent automatically to `https://api.overmindlab.ai` and will appear in
84
84
 
85
85
  ## What Gets Traced
86
86
 
87
- When `enabledProviders: { openai: true }` is set, the SDK automatically captures:
87
+ When `enabledProviders: { openai: OpenAI }` is set, the SDK automatically captures:
88
88
 
89
89
  - Prompts and completions
90
90
  - Model name, temperature, top-p, max tokens
@@ -103,7 +103,7 @@ Enable batching in production to reduce network overhead:
103
103
  ```ts
104
104
  overmindClient.initTracing({
105
105
  enableBatching: true, // buffer spans and flush in batches
106
- enabledProviders: { openai: true },
106
+ enabledProviders: { openai: OpenAI },
107
107
  });
108
108
  ```
109
109
 
@@ -0,0 +1,124 @@
1
+ # @overmind-lab/trace-sdk
2
+
3
+ JavaScript/TypeScript SDK for [Overmind](https://overmindlab.ai) — automatic LLM observability powered by OpenTelemetry.
4
+
5
+ Instrument your OpenAI calls with a single `initTracing()` call. Traces are exported to the Overmind platform with zero changes to your existing AI code.
6
+
7
+ ---
8
+
9
+ ## Installation
10
+
11
+ ```bash
12
+ bun add @overmind-lab/trace-sdk openai
13
+ # or
14
+ npm install @overmind-lab/trace-sdk openai
15
+ ```
16
+
17
+ ---
18
+
19
+ ## Quick Start with OpenAI
20
+
21
+ ```ts
22
+ import { OpenAI } from "openai";
23
+ import { OvermindClient } from "@overmind-lab/trace-sdk";
24
+
25
+ // 1. Create the client
26
+ const overmindClient = new OvermindClient({
27
+ apiKey: process.env.OVERMIND_API_KEY!,
28
+ appName: "my fintech app",
29
+ });
30
+
31
+ // 2. Initialize tracing — must be called before any OpenAI calls
32
+ overmindClient.initTracing({
33
+ enableBatching: false,
34
+ enabledProviders: { openai: OpenAI }, // this is important to patch the correct client
35
+ instrumentations: [],
36
+ });
37
+
38
+ // 3. Use OpenAI as normal — all calls are automatically traced
39
+ const openai = new OpenAI({
40
+ apiKey: process.env.OPENAI_API_KEY,
41
+ });
42
+
43
+ const response = await openai.chat.completions.create({
44
+ model: "gpt-4o-mini",
45
+ messages: [{ role: "user", content: "Hello, how are you?" }],
46
+ });
47
+ ```
48
+
49
+ Traces are sent automatically to `https://api.overmindlab.ai` and will appear in your Overmind dashboard.
50
+
51
+ ---
52
+
53
+ ## Configuration
54
+
55
+ ### `OvermindClient(config)`
56
+
57
+ | Option | Type | Required | Description |
58
+ |---|---|---|---|
59
+ | `apiKey` | `string` | Yes | Your Overmind API key. Falls back to `OVERMIND_API_KEY` env var. |
60
+ | `appName` | `string` | No | Name of your service, shown in the dashboard. Defaults to `"overmind-js"`. |
61
+ | `baseUrl` | `string` | No | Override the Overmind ingest endpoint. Defaults to `OVERMIND_TRACES_URL` env var or `https://api.overmindlab.ai`. |
62
+
63
+ ### `initTracing(options)`
64
+
65
+ | Option | Type | Required | Description |
66
+ |---|---|---|---|
67
+ | `enabledProviders` | `{ openai?: typeof OpenAI }` | Yes | Pass the imported provider class to monkey-patch. e.g. `{ openai: OpenAI }` where `OpenAI` is imported from `"openai"`. |
68
+ | `enableBatching` | `boolean` | Yes | `true` to batch spans before export (recommended for production), `false` to export immediately. |
69
+ | `instrumentations` | `Instrumentation[]` | No | Additional OpenTelemetry instrumentations to register. |
70
+ | `spanProcessors` | `SpanProcessor[]` | No | Additional span processors (e.g. custom exporters). |
71
+
72
+ ---
73
+
74
+ ## Environment Variables
75
+
76
+ | Variable | Description |
77
+ |---|---|
78
+ | `OVERMIND_API_KEY` | Your Overmind API key |
79
+ | `OVERMIND_TRACES_URL` | Override the traces ingest base URL |
80
+ | `DEPLOYMENT_ENVIRONMENT` | Tag traces with an environment (e.g. `production`, `staging`). Defaults to `development`. |
81
+ | `OPENAI_API_KEY` | Your OpenAI API key |
82
+
83
+ ---
84
+
85
+ ## What Gets Traced
86
+
87
+ When `enabledProviders: { openai: OpenAI }` is set, the SDK automatically captures:
88
+
89
+ - Prompts and completions
90
+ - Model name, temperature, top-p, max tokens
91
+ - Token usage
92
+ - Latency per request
93
+ - Errors and exceptions
94
+
95
+ All data is attached to OpenTelemetry spans and exported to Overmind.
96
+
97
+ ---
98
+
99
+ ## Production Recommendations
100
+
101
+ Enable batching in production to reduce network overhead:
102
+
103
+ ```ts
104
+ overmindClient.initTracing({
105
+ enableBatching: true, // buffer spans and flush in batches
106
+ enabledProviders: { openai: OpenAI },
107
+ });
108
+ ```
109
+
110
+ Use `enableBatching: false` during local development to see traces immediately.
111
+
112
+ ---
113
+
114
+ ## Resource Attributes
115
+
116
+ Every trace is tagged with the following attributes automatically:
117
+
118
+ | Attribute | Value |
119
+ |---|---|
120
+ | `service.name` | Value of `appName` |
121
+ | `service.version` | SDK version |
122
+ | `deployment.environment` | `DEPLOYMENT_ENVIRONMENT` env var or `"development"` |
123
+ | `overmind.sdk.name` | `overmind-js` |
124
+ | `overmind.sdk.version` | SDK version |
@@ -0,0 +1,74 @@
1
+ {
2
+ "$schema": "https://biomejs.dev/schemas/2.4.3/schema.json",
3
+ "assist": {
4
+ "actions": {
5
+ "source": {
6
+ "organizeImports": {
7
+ "level": "on",
8
+ "options": {
9
+ "groups": [
10
+ ["@opentelemetry/**"],
11
+ ":BLANK_LINE:",
12
+ [":PACKAGE:", ":PACKAGE_WITH_PROTOCOL:"],
13
+ ":BLANK_LINE:",
14
+ [":PATH:"],
15
+ ":BLANK_LINE:"
16
+ ]
17
+ }
18
+ },
19
+ "useSortedKeys": {
20
+ "level": "on",
21
+ "options": {}
22
+ },
23
+ "useSortedProperties": {
24
+ "level": "on",
25
+ "options": {}
26
+ }
27
+ }
28
+ },
29
+ "enabled": true
30
+ },
31
+ "files": {
32
+ "ignoreUnknown": true,
33
+ "includes": ["**"]
34
+ },
35
+ "formatter": {
36
+ "attributePosition": "auto",
37
+ "enabled": true,
38
+ "formatWithErrors": false,
39
+ "indentStyle": "space",
40
+ "indentWidth": 2,
41
+ "lineEnding": "lf",
42
+ "lineWidth": 100
43
+ },
44
+ "javascript": {
45
+ "formatter": {
46
+ "lineWidth": 100,
47
+ "quoteStyle": "double",
48
+ "trailingCommas": "es5"
49
+ }
50
+ },
51
+ "json": {
52
+ "formatter": {
53
+ "trailingCommas": "none"
54
+ }
55
+ },
56
+ "linter": {
57
+ "domains": {
58
+ "project": "recommended",
59
+ "test": "recommended"
60
+ },
61
+ "enabled": true,
62
+ "rules": {
63
+ "suspicious": {
64
+ "noArrayIndexKey": "off",
65
+ "noUnknownAtRules": "off"
66
+ }
67
+ }
68
+ },
69
+ "vcs": {
70
+ "clientKind": "git",
71
+ "enabled": true,
72
+ "useIgnoreFile": true
73
+ }
74
+ }
@@ -0,0 +1,39 @@
1
+ {
2
+ "dependencies": {
3
+ "@opentelemetry/auto-instrumentations-node": "^0.70.0",
4
+ "@opentelemetry/exporter-trace-otlp-proto": "^0.212.0",
5
+ "@opentelemetry/instrumentation-openai": "^0.10.0",
6
+ "@opentelemetry/sdk-node": "^0.212.0",
7
+ "@opentelemetry/sdk-trace-node": "^2.5.1",
8
+ "@traceloop/ai-semantic-conventions": "^0.22.5",
9
+ "js-tiktoken": "^1.0.21"
10
+ },
11
+ "optionalDependencies": {
12
+ "@biomejs/biome": "^2.4.3"
13
+ },
14
+ "devDependencies": {},
15
+ "peerDependencies": {
16
+ "openai": "*"
17
+ },
18
+ "name": "@overmind-lab/trace-sdk",
19
+ "main": "src/index.ts",
20
+ "exports": {
21
+ ".": {
22
+ "import": "./src/index.ts",
23
+ "require": "./src/index.ts"
24
+ }
25
+ },
26
+ "packageManager": "bun@1.3.5",
27
+ "private": false,
28
+ "publishConfig": {
29
+ "access": "public",
30
+ "tag": "latest"
31
+ },
32
+ "scripts": {
33
+ "build": "tsc",
34
+ "check": "biome check --write",
35
+ "dev": "tsx watch src/index.ts",
36
+ "format": "biome format --write"
37
+ },
38
+ "version": "0.0.4"
39
+ }
package/package.json CHANGED
@@ -5,18 +5,29 @@
5
5
  "@opentelemetry/instrumentation-openai": "^0.10.0",
6
6
  "@opentelemetry/sdk-node": "^0.212.0",
7
7
  "@opentelemetry/sdk-trace-node": "^2.5.1",
8
- "@traceloop/instrumentation-openai": "^0.22.5",
9
- "openai": "^6.22.0"
8
+ "@traceloop/ai-semantic-conventions": "^0.22.5",
9
+ "js-tiktoken": "^1.0.21"
10
10
  },
11
- "devDependencies": {
11
+ "optionalDependencies": {
12
12
  "@biomejs/biome": "^2.4.3"
13
13
  },
14
+ "devDependencies": {},
15
+ "peerDependencies": {
16
+ "openai": "*"
17
+ },
14
18
  "name": "@overmind-lab/trace-sdk",
19
+ "main": "src/index.ts",
20
+ "exports": {
21
+ ".": {
22
+ "import": "./src/index.ts",
23
+ "require": "./src/index.ts"
24
+ }
25
+ },
15
26
  "packageManager": "bun@1.3.5",
16
27
  "private": false,
17
28
  "publishConfig": {
18
29
  "access": "public",
19
- "tag": "alpha"
30
+ "tag": "latest"
20
31
  },
21
32
  "scripts": {
22
33
  "build": "tsc",
@@ -24,5 +35,5 @@
24
35
  "dev": "tsx watch src/index.ts",
25
36
  "format": "biome format --write"
26
37
  },
27
- "version": "0.0.2"
38
+ "version": "0.0.4"
28
39
  }
package/src/index.ts ADDED
@@ -0,0 +1,2 @@
1
+ export * from "./overmind-client";
2
+ export * from "./instrumentation-openai";
@@ -10,10 +10,10 @@ import {
10
10
  import { ConsoleSpanExporter } from "@opentelemetry/sdk-trace-node";
11
11
  import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION } from "@opentelemetry/semantic-conventions";
12
12
 
13
- import OpenAI from "openai";
14
- import { OpenAIInstrumentation } from "./instrumentation-openai";
13
+ import type { OpenAI } from "openai";
15
14
 
16
15
  import { name, version } from "../package.json";
16
+ import { OpenAIInstrumentation } from "./instrumentation-openai";
17
17
 
18
18
  type OvermindClientConfig = {
19
19
  apiKey: string;
@@ -26,6 +26,7 @@ export class OvermindClient {
26
26
  private version: string = version;
27
27
  private baseUrl: string;
28
28
  private apiKey: string;
29
+ private sdk?: NodeSDK;
29
30
  public experimentSlug?: string;
30
31
 
31
32
  constructor(config: OvermindClientConfig) {
@@ -51,14 +52,14 @@ export class OvermindClient {
51
52
  instrumentations?: Instrumentation[];
52
53
  spanProcessors?: SpanProcessor[];
53
54
  enableBatching: boolean;
54
- enabledProviders: Partial<Record<"openai" | "anthropic", boolean>>;
55
+ enabledProviders: { openai: OpenAI };
55
56
  }) {
56
- const traceExporter = !this.baseUrl
57
+ const traceExporter = this.baseUrl
57
58
  ? new OTLPTraceExporter({
58
- url: `${this.baseUrl}/api/v1/traces/create`,
59
59
  headers: {
60
- "X-API-TOKEN": `Bearer ${this.apiKey}`,
60
+ "X-API-TOKEN": this.apiKey,
61
61
  },
62
+ url: `${this.baseUrl}/api/v1/traces/create`,
62
63
  })
63
64
  : new ConsoleSpanExporter();
64
65
 
@@ -80,16 +81,20 @@ export class OvermindClient {
80
81
  const openaiInstrumentation = new OpenAIInstrumentation({
81
82
  enabled: true,
82
83
  });
83
- openaiInstrumentation.manuallyInstrument(OpenAI);
84
+ openaiInstrumentation.manuallyInstrument(config.enabledProviders.openai);
84
85
  instrumentations.push(openaiInstrumentation);
85
86
  }
86
87
 
87
- const _sdk = new NodeSDK({
88
+ this.sdk = new NodeSDK({
89
+ instrumentations: [...instrumentations],
88
90
  resource,
89
91
  spanProcessors,
90
- instrumentations: [...instrumentations],
91
92
  });
92
93
 
93
- _sdk.start();
94
+ this.sdk.start();
95
+ }
96
+
97
+ async shutdown() {
98
+ await this.sdk?.shutdown();
94
99
  }
95
100
  }