moda-ai 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +272 -0
- package/dist/index.cjs +1280 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +384 -0
- package/dist/index.mjs +1256 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +74 -0
package/README.md
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
# moda-ai
|
|
2
|
+
|
|
3
|
+
Official TypeScript/Node.js SDK for Moda LLM observability with automatic conversation threading.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Automatic Instrumentation**: Zero-config tracing for OpenAI and Anthropic clients
|
|
8
|
+
- **Conversation Threading**: Groups multi-turn conversations together
|
|
9
|
+
- **Streaming Support**: Full support for streaming responses
|
|
10
|
+
- **User Tracking**: Associate LLM calls with specific users
|
|
11
|
+
- **OpenTelemetry Native**: Built on OpenTelemetry for standard-compliant telemetry
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npm install moda-ai
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Quick Start
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import { Moda } from 'moda-ai';
|
|
23
|
+
import OpenAI from 'openai';
|
|
24
|
+
|
|
25
|
+
// Initialize once at application startup
|
|
26
|
+
Moda.init('moda_your_api_key');
|
|
27
|
+
|
|
28
|
+
// Set conversation ID for your session (recommended)
|
|
29
|
+
Moda.conversationId = 'session_' + sessionId;
|
|
30
|
+
|
|
31
|
+
// All OpenAI calls are now automatically tracked
|
|
32
|
+
const openai = new OpenAI();
|
|
33
|
+
|
|
34
|
+
const response = await openai.chat.completions.create({
|
|
35
|
+
model: 'gpt-4',
|
|
36
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
// Flush before exit
|
|
40
|
+
await Moda.flush();
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Conversation Tracking
|
|
44
|
+
|
|
45
|
+
### Setting Conversation ID (Recommended)
|
|
46
|
+
|
|
47
|
+
For production use, explicitly set a conversation ID to group related LLM calls:
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
// Property-style API (recommended)
|
|
51
|
+
Moda.conversationId = 'support_ticket_123';
|
|
52
|
+
await openai.chat.completions.create({ ... });
|
|
53
|
+
await openai.chat.completions.create({ ... });
|
|
54
|
+
// Both calls share the same conversation_id
|
|
55
|
+
Moda.conversationId = null; // clear when done
|
|
56
|
+
|
|
57
|
+
// Method-style API (also supported)
|
|
58
|
+
Moda.setConversationId('support_ticket_123');
|
|
59
|
+
await openai.chat.completions.create({ ... });
|
|
60
|
+
Moda.clearConversationId();
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Setting User ID
|
|
64
|
+
|
|
65
|
+
Associate LLM calls with specific users:
|
|
66
|
+
|
|
67
|
+
```typescript
|
|
68
|
+
Moda.userId = 'user_12345';
|
|
69
|
+
await openai.chat.completions.create({ ... });
|
|
70
|
+
Moda.userId = null; // clear when done
|
|
71
|
+
|
|
72
|
+
// Or use method-style
|
|
73
|
+
Moda.setUserId('user_12345');
|
|
74
|
+
await openai.chat.completions.create({ ... });
|
|
75
|
+
Moda.clearUserId();
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Scoped Context
|
|
79
|
+
|
|
80
|
+
For callback-based scoping (useful in async contexts):
|
|
81
|
+
|
|
82
|
+
```typescript
|
|
83
|
+
import { withConversationId, withUserId, withContext } from 'moda-ai';
|
|
84
|
+
|
|
85
|
+
// Scoped conversation ID
|
|
86
|
+
await withConversationId('my_session_123', async () => {
|
|
87
|
+
await openai.chat.completions.create({ ... });
|
|
88
|
+
await openai.chat.completions.create({ ... });
|
|
89
|
+
// Both calls use 'my_session_123'
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
// Scoped user ID
|
|
93
|
+
await withUserId('user_456', async () => {
|
|
94
|
+
await openai.chat.completions.create({ ... });
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
// Both at once
|
|
98
|
+
await withContext('conv_123', 'user_456', async () => {
|
|
99
|
+
// ...
|
|
100
|
+
});
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Automatic Fallback
|
|
104
|
+
|
|
105
|
+
If you don't set a conversation ID, the SDK automatically computes one from the first user message and system prompt. This works well for simple use cases but explicit IDs are recommended for production:
|
|
106
|
+
|
|
107
|
+
```typescript
|
|
108
|
+
// Turn 1
|
|
109
|
+
let messages = [{ role: 'user', content: 'Hi, help with TypeScript' }];
|
|
110
|
+
const r1 = await openai.chat.completions.create({ model: 'gpt-4', messages });
|
|
111
|
+
|
|
112
|
+
// Turn 2 - automatically linked to Turn 1
|
|
113
|
+
messages.push({ role: 'assistant', content: r1.choices[0].message.content });
|
|
114
|
+
messages.push({ role: 'user', content: 'How do I read a file?' });
|
|
115
|
+
const r2 = await openai.chat.completions.create({ model: 'gpt-4', messages });
|
|
116
|
+
|
|
117
|
+
// Both turns have the SAME conversation_id in your Moda dashboard
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Anthropic Support
|
|
121
|
+
|
|
122
|
+
Works the same way with Anthropic's Claude:
|
|
123
|
+
|
|
124
|
+
```typescript
|
|
125
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
126
|
+
|
|
127
|
+
const anthropic = new Anthropic();
|
|
128
|
+
|
|
129
|
+
Moda.conversationId = 'claude_session_123';
|
|
130
|
+
|
|
131
|
+
const response = await anthropic.messages.create({
|
|
132
|
+
model: 'claude-3-haiku-20240307',
|
|
133
|
+
max_tokens: 1024,
|
|
134
|
+
system: 'You are a helpful assistant.',
|
|
135
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
136
|
+
});
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
## Streaming Support
|
|
140
|
+
|
|
141
|
+
The SDK fully supports streaming responses:
|
|
142
|
+
|
|
143
|
+
```typescript
|
|
144
|
+
const stream = await openai.chat.completions.create({
|
|
145
|
+
model: 'gpt-4',
|
|
146
|
+
messages: [{ role: 'user', content: 'Count to 5' }],
|
|
147
|
+
stream: true,
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
for await (const chunk of stream) {
|
|
151
|
+
process.stdout.write(chunk.choices[0]?.delta?.content || '');
|
|
152
|
+
}
|
|
153
|
+
// Streaming responses are automatically tracked
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
## Configuration Options
|
|
157
|
+
|
|
158
|
+
```typescript
|
|
159
|
+
Moda.init('moda_api_key', {
|
|
160
|
+
// Base URL for telemetry ingestion
|
|
161
|
+
baseUrl: 'https://ingest.moda.so/v1/traces',
|
|
162
|
+
|
|
163
|
+
// Environment name (shown in dashboard)
|
|
164
|
+
environment: 'production',
|
|
165
|
+
|
|
166
|
+
// Enable/disable the SDK
|
|
167
|
+
enabled: true,
|
|
168
|
+
|
|
169
|
+
// Enable debug logging
|
|
170
|
+
debug: false,
|
|
171
|
+
|
|
172
|
+
// Batch size for telemetry export
|
|
173
|
+
batchSize: 100,
|
|
174
|
+
|
|
175
|
+
// Flush interval in milliseconds
|
|
176
|
+
flushInterval: 5000,
|
|
177
|
+
});
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## API Reference
|
|
181
|
+
|
|
182
|
+
### Moda Object
|
|
183
|
+
|
|
184
|
+
```typescript
|
|
185
|
+
// Initialize the SDK
|
|
186
|
+
Moda.init(apiKey: string, options?: ModaInitOptions): void
|
|
187
|
+
|
|
188
|
+
// Force flush pending telemetry
|
|
189
|
+
Moda.flush(): Promise<void>
|
|
190
|
+
|
|
191
|
+
// Shutdown and release resources
|
|
192
|
+
Moda.shutdown(): Promise<void>
|
|
193
|
+
|
|
194
|
+
// Check initialization status
|
|
195
|
+
Moda.isInitialized(): boolean
|
|
196
|
+
|
|
197
|
+
// Property-style context (recommended)
|
|
198
|
+
Moda.conversationId: string | null // get/set
|
|
199
|
+
Moda.userId: string | null // get/set
|
|
200
|
+
|
|
201
|
+
// Method-style context (also supported)
|
|
202
|
+
Moda.setConversationId(id: string): void
|
|
203
|
+
Moda.clearConversationId(): void
|
|
204
|
+
Moda.setUserId(id: string): void
|
|
205
|
+
Moda.clearUserId(): void
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
### Context Functions
|
|
209
|
+
|
|
210
|
+
```typescript
|
|
211
|
+
import { withConversationId, withUserId, withContext } from 'moda-ai';
|
|
212
|
+
|
|
213
|
+
// Scoped conversation ID
|
|
214
|
+
await withConversationId('conv_123', async () => {
|
|
215
|
+
// All LLM calls here use 'conv_123'
|
|
216
|
+
});
|
|
217
|
+
|
|
218
|
+
// Scoped user ID
|
|
219
|
+
await withUserId('user_456', async () => {
|
|
220
|
+
// All LLM calls here are associated with 'user_456'
|
|
221
|
+
});
|
|
222
|
+
|
|
223
|
+
// Both at once
|
|
224
|
+
await withContext('conv_123', 'user_456', async () => {
|
|
225
|
+
// ...
|
|
226
|
+
});
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
### Utility Functions
|
|
230
|
+
|
|
231
|
+
```typescript
|
|
232
|
+
import { computeConversationId, generateRandomConversationId } from 'moda-ai';
|
|
233
|
+
|
|
234
|
+
// Compute conversation ID from messages (same algorithm SDK uses)
|
|
235
|
+
const id = computeConversationId(messages, systemPrompt);
|
|
236
|
+
|
|
237
|
+
// Generate a random conversation ID
|
|
238
|
+
const randomId = generateRandomConversationId();
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
## Graceful Shutdown
|
|
242
|
+
|
|
243
|
+
Always flush before your application exits:
|
|
244
|
+
|
|
245
|
+
```typescript
|
|
246
|
+
process.on('SIGTERM', async () => {
|
|
247
|
+
await Moda.flush();
|
|
248
|
+
await Moda.shutdown();
|
|
249
|
+
process.exit(0);
|
|
250
|
+
});
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
## Requirements
|
|
254
|
+
|
|
255
|
+
- Node.js >= 18.0.0
|
|
256
|
+
- TypeScript >= 5.0 (for type definitions)
|
|
257
|
+
|
|
258
|
+
## Peer Dependencies
|
|
259
|
+
|
|
260
|
+
Install the LLM clients you want to use:
|
|
261
|
+
|
|
262
|
+
```bash
|
|
263
|
+
# For OpenAI
|
|
264
|
+
npm install openai
|
|
265
|
+
|
|
266
|
+
# For Anthropic
|
|
267
|
+
npm install @anthropic-ai/sdk
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
## License
|
|
271
|
+
|
|
272
|
+
MIT
|