teckel-ai 0.3.3 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -380
- package/dist/{schemas.d.ts → index.d.mts} +186 -14
- package/dist/index.d.ts +386 -7
- package/dist/index.js +2 -22
- package/dist/index.mjs +2 -0
- package/package.json +23 -12
- package/dist/conversation.d.ts +0 -55
- package/dist/conversation.js +0 -310
- package/dist/schemas.js +0 -69
- package/dist/tracer.d.ts +0 -18
- package/dist/tracer.js +0 -45
- package/dist/types.d.ts +0 -88
- package/dist/types.js +0 -6
package/README.md
CHANGED
|
@@ -1,25 +1,6 @@
|
|
|
1
1
|
# teckel-ai
|
|
2
2
|
|
|
3
|
-
TypeScript/JavaScript SDK for [Teckel AI](https://teckel.ai)
|
|
4
|
-
|
|
5
|
-
## What is Teckel AI?
|
|
6
|
-
|
|
7
|
-
Teckel AI helps you understand and improve your AI chatbots by analyzing conversations. Track what questions users ask, how well your AI answers them, and identify which areas need improvement.
|
|
8
|
-
|
|
9
|
-
**Your Problem:** Your RAG or AI chat system gives incomplete or incorrect answers because your knowledge base has gaps.
|
|
10
|
-
|
|
11
|
-
**Our Service:** We analyze every AI response for accuracy and completeness, cluster similar queries to identify trending topics, and tell you exactly which documentation to add or update.
|
|
12
|
-
|
|
13
|
-
## What This SDK Does
|
|
14
|
-
|
|
15
|
-
This lightweight SDK (zero dependencies, 20-minute integration) sends your AI conversations to Teckel for analysis:
|
|
16
|
-
|
|
17
|
-
- **Completeness Scoring** - How well each response answers the question (0-100%)
|
|
18
|
-
- **Accuracy Analysis** - Whether AI claims are supported by your source documents
|
|
19
|
-
- **Topic Intelligence** - Automatic clustering of queries to reveal documentation gaps
|
|
20
|
-
- **Actionable Feedback** - Specific recommendations on what knowledge to add
|
|
21
|
-
|
|
22
|
-
Works with any RAG system or AI framework: LangChain, LlamaIndex, Vercel AI SDK, or custom implementations.
|
|
3
|
+
TypeScript/JavaScript SDK for [Teckel AI](https://teckel.ai)- Get insight into your AI systems, track topics of your choosing, and identify and fix knowledge gaps.
|
|
23
4
|
|
|
24
5
|
## Installation
|
|
25
6
|
|
|
@@ -27,9 +8,7 @@ Works with any RAG system or AI framework: LangChain, LlamaIndex, Vercel AI SDK,
|
|
|
27
8
|
npm install teckel-ai
|
|
28
9
|
```
|
|
29
10
|
|
|
30
|
-
**Requirements:**
|
|
31
|
-
- Node.js 18+ (or Bun, Deno, serverless runtimes)
|
|
32
|
-
- TypeScript 4.5+ (optional but recommended)
|
|
11
|
+
**Requirements:** Node.js 18+ (or Bun, Deno, serverless runtimes)
|
|
33
12
|
|
|
34
13
|
## Quick Start
|
|
35
14
|
|
|
@@ -43,388 +22,40 @@ const tracer = new TeckelTracer({
|
|
|
43
22
|
|
|
44
23
|
// In your API handler
|
|
45
24
|
async function handleChat(userQuestion: string, sessionId: string) {
|
|
46
|
-
|
|
47
|
-
const conversation = tracer.start({
|
|
48
|
-
sessionRef: sessionId,
|
|
49
|
-
userId: 'user@example.com'
|
|
50
|
-
});
|
|
25
|
+
const conversation = tracer.start({ sessionRef: sessionId });
|
|
51
26
|
|
|
52
27
|
// Your existing RAG logic
|
|
53
28
|
const chunks = await vectorDB.search(userQuestion);
|
|
54
29
|
const answer = await llm.generate(userQuestion, chunks);
|
|
55
30
|
|
|
56
|
-
// Map chunks to Teckel format
|
|
57
|
-
const documents = chunks.map((chunk, index) => ({
|
|
58
|
-
documentRef: chunk.id,
|
|
59
|
-
documentName: chunk.title,
|
|
60
|
-
documentText: chunk.content,
|
|
61
|
-
documentLastUpdated: chunk.lastModified,
|
|
62
|
-
sourceUri: chunk.url,
|
|
63
|
-
similarity: chunk.score,
|
|
64
|
-
rank: index
|
|
65
|
-
}));
|
|
66
|
-
|
|
67
31
|
// Send trace (non-blocking)
|
|
68
32
|
conversation.trace({
|
|
69
33
|
query: userQuestion,
|
|
70
34
|
response: answer,
|
|
71
|
-
documents:
|
|
72
|
-
|
|
73
|
-
|
|
35
|
+
documents: chunks.map((chunk, i) => ({
|
|
36
|
+
documentRef: chunk.id,
|
|
37
|
+
documentName: chunk.title,
|
|
38
|
+
documentText: chunk.content,
|
|
39
|
+
}))
|
|
74
40
|
});
|
|
75
41
|
|
|
76
42
|
// For serverless: flush before returning
|
|
77
|
-
await conversation.flush(
|
|
43
|
+
await conversation.flush();
|
|
78
44
|
|
|
79
45
|
return answer;
|
|
80
46
|
}
|
|
81
47
|
```
|
|
82
48
|
|
|
83
|
-
## API Reference
|
|
84
|
-
|
|
85
|
-
### TeckelTracer
|
|
86
|
-
|
|
87
|
-
Main SDK class.
|
|
88
|
-
|
|
89
|
-
#### Constructor
|
|
90
|
-
|
|
91
|
-
```typescript
|
|
92
|
-
new TeckelTracer(config: TeckelConfig)
|
|
93
|
-
```
|
|
94
|
-
|
|
95
|
-
| Field | Type | Required | Default | Description |
|
|
96
|
-
|-------|------|----------|---------|-------------|
|
|
97
|
-
| `apiKey` | string | Yes | - | Your Teckel API key (starts with `tk_live_`) |
|
|
98
|
-
| `endpoint` | string | No | `"https://app.teckel.ai/api"` | API base URL |
|
|
99
|
-
| `debug` | boolean | No | `false` | Enable debug logging |
|
|
100
|
-
| `timeoutMs` | number | No | `5000` | Network timeout in milliseconds |
|
|
101
|
-
|
|
102
|
-
#### tracer.start()
|
|
103
|
-
|
|
104
|
-
Start or continue a conversation.
|
|
105
|
-
|
|
106
|
-
```typescript
|
|
107
|
-
tracer.start(options: ConversationOptions): Conversation
|
|
108
|
-
```
|
|
109
|
-
|
|
110
|
-
| Field | Type | Required | Description |
|
|
111
|
-
|-------|------|----------|-------------|
|
|
112
|
-
| `sessionRef` | string | Yes | Your unique conversation identifier |
|
|
113
|
-
| `userId` | string | No | End-user identifier |
|
|
114
|
-
| `metadata` | object | No | Custom context |
|
|
115
|
-
|
|
116
|
-
### Conversation
|
|
117
|
-
|
|
118
|
-
#### conversation.trace()
|
|
119
|
-
|
|
120
|
-
Record a query-response interaction. Fire-and-forget by default.
|
|
121
|
-
|
|
122
|
-
```typescript
|
|
123
|
-
conversation.trace(data: TraceData): TraceResult
|
|
124
|
-
```
|
|
125
|
-
|
|
126
|
-
**Returns:** `{ traceRef: string, turnNumber: number }`
|
|
127
|
-
|
|
128
|
-
| Field | Type | Required | Description |
|
|
129
|
-
|-------|------|----------|-------------|
|
|
130
|
-
| `query` | string | Yes | User's question |
|
|
131
|
-
| `response` | string | Yes | AI-generated answer |
|
|
132
|
-
| `documents` | Document[] | Recommended | Retrieved chunks (for RAG) |
|
|
133
|
-
| `traceRef` | string | No | Your correlation ID |
|
|
134
|
-
| `model` | string | No | LLM model (e.g., "gpt-5") |
|
|
135
|
-
| `responseTimeMs` | number | No | Latency in milliseconds |
|
|
136
|
-
| `tokens` | TokenUsage | No | Token usage |
|
|
137
|
-
| `metadata` | object | No | Custom context |
|
|
138
|
-
|
|
139
|
-
**Example:**
|
|
140
|
-
|
|
141
|
-
```typescript
|
|
142
|
-
const result = conversation.trace({
|
|
143
|
-
query: "How do I reset my password?",
|
|
144
|
-
response: "Go to Settings > Security...",
|
|
145
|
-
model: "gpt-5",
|
|
146
|
-
documents: [
|
|
147
|
-
{
|
|
148
|
-
documentRef: "kb-123",
|
|
149
|
-
documentName: "Password Reset Guide",
|
|
150
|
-
documentText: "To reset your password...",
|
|
151
|
-
documentLastUpdated: "2025-01-15T10:00:00Z",
|
|
152
|
-
sourceUri: "https://kb.example.com/security",
|
|
153
|
-
similarity: 0.92,
|
|
154
|
-
rank: 0
|
|
155
|
-
}
|
|
156
|
-
]
|
|
157
|
-
});
|
|
158
|
-
|
|
159
|
-
console.log(result.traceRef); // "session-123:1"
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
#### conversation.feedback()
|
|
163
|
-
|
|
164
|
-
Add user feedback signal.
|
|
165
|
-
|
|
166
|
-
```typescript
|
|
167
|
-
await conversation.feedback(data: FeedbackData): Promise<void>
|
|
168
|
-
```
|
|
169
|
-
|
|
170
|
-
| Field | Type | Required | Description |
|
|
171
|
-
|-------|------|----------|-------------|
|
|
172
|
-
| `type` | FeedbackType | Yes | `"thumbs_up"`, `"thumbs_down"`, `"flag"`, or `"rating"` |
|
|
173
|
-
| `value` | string | No | For ratings: `"1"` to `"5"` |
|
|
174
|
-
| `comment` | string | No | User's explanation |
|
|
175
|
-
| `traceRef` | string | No | Link to specific trace |
|
|
176
|
-
|
|
177
|
-
**Example:**
|
|
178
|
-
|
|
179
|
-
```typescript
|
|
180
|
-
await conversation.feedback({
|
|
181
|
-
type: "thumbs_down",
|
|
182
|
-
comment: "Information was outdated"
|
|
183
|
-
});
|
|
184
|
-
```
|
|
185
|
-
|
|
186
|
-
#### conversation.flush()
|
|
187
|
-
|
|
188
|
-
Wait for queued traces to send. **Required for serverless** to prevent data loss.
|
|
189
|
-
|
|
190
|
-
```typescript
|
|
191
|
-
await conversation.flush(timeoutMs?: number): Promise<void>
|
|
192
|
-
```
|
|
193
|
-
|
|
194
|
-
**Throws:** Error on timeout
|
|
195
|
-
|
|
196
|
-
**Example:**
|
|
197
|
-
|
|
198
|
-
```typescript
|
|
199
|
-
// Serverless: flush before returning
|
|
200
|
-
try {
|
|
201
|
-
await conversation.flush(5000);
|
|
202
|
-
} catch (err) {
|
|
203
|
-
logger.warn('Flush timeout', { err });
|
|
204
|
-
}
|
|
205
|
-
```
|
|
206
|
-
|
|
207
|
-
#### conversation.end()
|
|
208
|
-
|
|
209
|
-
End conversation and flush pending traces.
|
|
210
|
-
|
|
211
|
-
```typescript
|
|
212
|
-
await conversation.end(): Promise<void>
|
|
213
|
-
```
|
|
214
|
-
|
|
215
|
-
#### Read-only Properties
|
|
216
|
-
|
|
217
|
-
```typescript
|
|
218
|
-
conversation.id // session reference
|
|
219
|
-
conversation.turns // number of traces
|
|
220
|
-
conversation.started // start time
|
|
221
|
-
```
|
|
222
|
-
|
|
223
|
-
## Type Definitions
|
|
224
|
-
|
|
225
|
-
### Document
|
|
226
|
-
|
|
227
|
-
```typescript
|
|
228
|
-
interface Document {
|
|
229
|
-
// Required
|
|
230
|
-
documentRef: string; // Your document ID
|
|
231
|
-
documentName: string; // Human-readable name
|
|
232
|
-
documentText: string; // Chunk content
|
|
233
|
-
|
|
234
|
-
// Recommended
|
|
235
|
-
documentLastUpdated?: string; // ISO 8601 timestamp
|
|
236
|
-
sourceUri?: string; // URL or path
|
|
237
|
-
|
|
238
|
-
// Optional
|
|
239
|
-
sourceType?: string; // e.g., 'confluence', 'slack'
|
|
240
|
-
similarity?: number; // 0-1 score
|
|
241
|
-
rank?: number; // Position (0 = first)
|
|
242
|
-
ownerEmail?: string; // Owner email
|
|
243
|
-
documentType?: string; // e.g., 'pdf', 'markdown'
|
|
244
|
-
}
|
|
245
|
-
```
|
|
246
|
-
|
|
247
|
-
### TokenUsage
|
|
248
|
-
|
|
249
|
-
```typescript
|
|
250
|
-
interface TokenUsage {
|
|
251
|
-
prompt: number; // Input tokens
|
|
252
|
-
completion: number; // Output tokens
|
|
253
|
-
total: number; // Total tokens
|
|
254
|
-
}
|
|
255
|
-
```
|
|
256
|
-
|
|
257
|
-
### FeedbackType
|
|
258
|
-
|
|
259
|
-
```typescript
|
|
260
|
-
type FeedbackType = 'thumbs_up' | 'thumbs_down' | 'flag' | 'rating'
|
|
261
|
-
```
|
|
262
|
-
|
|
263
|
-
## Usage Patterns
|
|
264
|
-
|
|
265
|
-
### Serverless (Vercel, Lambda, Cloudflare Workers)
|
|
266
|
-
|
|
267
|
-
**Must** call `flush()` before returning to prevent data loss.
|
|
268
|
-
|
|
269
|
-
```typescript
|
|
270
|
-
const tracer = new TeckelTracer({
|
|
271
|
-
apiKey: process.env.TECKEL_API_KEY
|
|
272
|
-
});
|
|
273
|
-
|
|
274
|
-
export async function handler(request) {
|
|
275
|
-
const conversation = tracer.start({
|
|
276
|
-
sessionRef: request.sessionId
|
|
277
|
-
});
|
|
278
|
-
|
|
279
|
-
const answer = await generateAnswer(request.question);
|
|
280
|
-
|
|
281
|
-
conversation.trace({
|
|
282
|
-
query: request.question,
|
|
283
|
-
response: answer,
|
|
284
|
-
documents: retrievedDocs
|
|
285
|
-
});
|
|
286
|
-
|
|
287
|
-
// CRITICAL: Flush before returning (3-5s recommended)
|
|
288
|
-
await conversation.flush(5000);
|
|
289
|
-
|
|
290
|
-
return { statusCode: 200, body: answer };
|
|
291
|
-
}
|
|
292
|
-
```
|
|
293
|
-
|
|
294
|
-
### Long-Running Servers (Express, Fastify, etc.)
|
|
295
|
-
|
|
296
|
-
No `flush()` needed - traces send in background.
|
|
297
|
-
|
|
298
|
-
```typescript
|
|
299
|
-
const tracer = new TeckelTracer({
|
|
300
|
-
apiKey: process.env.TECKEL_API_KEY
|
|
301
|
-
});
|
|
302
|
-
|
|
303
|
-
app.post('/chat', async (req, res) => {
|
|
304
|
-
const conversation = tracer.start({
|
|
305
|
-
sessionRef: req.session.id
|
|
306
|
-
});
|
|
307
|
-
|
|
308
|
-
const answer = await generateAnswer(req.body.question);
|
|
309
|
-
|
|
310
|
-
conversation.trace({
|
|
311
|
-
query: req.body.question,
|
|
312
|
-
response: answer,
|
|
313
|
-
documents: retrievedDocs
|
|
314
|
-
});
|
|
315
|
-
|
|
316
|
-
// No flush needed
|
|
317
|
-
res.json({ answer });
|
|
318
|
-
});
|
|
319
|
-
```
|
|
320
|
-
|
|
321
|
-
### Non-RAG Systems
|
|
322
|
-
|
|
323
|
-
Omit `documents` if not using retrieval.
|
|
324
|
-
|
|
325
|
-
```typescript
|
|
326
|
-
conversation.trace({
|
|
327
|
-
query: userQuestion,
|
|
328
|
-
response: answer,
|
|
329
|
-
model: 'gpt-5'
|
|
330
|
-
});
|
|
331
|
-
```
|
|
332
|
-
|
|
333
|
-
## SDK Behavior
|
|
334
|
-
|
|
335
|
-
### Error Handling
|
|
336
|
-
|
|
337
|
-
- `trace()`, `feedback()`, `end()` **never throw** - failures logged in debug mode
|
|
338
|
-
- `flush()` **throws on timeout** - catch to monitor potential data loss
|
|
339
|
-
|
|
340
|
-
### Retry Logic
|
|
341
|
-
|
|
342
|
-
Automatically retries once on transient errors:
|
|
343
|
-
- HTTP 429 (rate limit)
|
|
344
|
-
- HTTP 5xx (server errors)
|
|
345
|
-
- Network failures
|
|
346
|
-
|
|
347
|
-
**Retry pattern:**
|
|
348
|
-
1. Initial attempt
|
|
349
|
-
2. Wait 250-350ms (jittered)
|
|
350
|
-
3. Single retry
|
|
351
|
-
4. Log failure (debug mode)
|
|
352
|
-
|
|
353
|
-
**Total time:** `2 * timeoutMs + ~300ms`
|
|
354
|
-
|
|
355
|
-
### Timeouts and Flush
|
|
356
|
-
|
|
357
|
-
- `timeoutMs`: Per-request network timeout for SDK HTTP calls. If a request exceeds this, it is aborted. With one retry, total worst-case send time is approximately `2 * timeoutMs + ~300ms`.
|
|
358
|
-
- `flush(timeoutMs?)`: Bounded wait for the internal send queue to drain. In serverless, call this before returning to avoid data loss. If no argument is passed, it uses the SDK `timeoutMs` value.
|
|
359
|
-
- Recommendation for serverless: `await conversation.flush(3000–5000)` to balance reliability and latency.
|
|
360
|
-
- `end()`: Convenience that flushes pending sends and marks the conversation as ended. It throws on flush timeout just like `flush()`.
|
|
361
|
-
|
|
362
|
-
### Rate Limits
|
|
363
|
-
|
|
364
|
-
- **Default:** 1,000 requests/hour per organization
|
|
365
|
-
- **Reset:** Top of each hour
|
|
366
|
-
- **Headers:** `X-RateLimit-Limit`, `X-RateLimit-Remaining`, `X-RateLimit-Reset`
|
|
367
|
-
|
|
368
|
-
Contact support@teckel.ai for increases.
|
|
369
|
-
|
|
370
|
-
## Runtime Compatibility
|
|
371
|
-
|
|
372
|
-
Uses standard Web APIs (`fetch`, `AbortSignal`):
|
|
373
|
-
|
|
374
|
-
- ✅ Node.js 18+
|
|
375
|
-
- ✅ Bun 1.0+
|
|
376
|
-
- ✅ Deno 1.35+ (`npm:teckel-ai`)
|
|
377
|
-
- ✅ Cloudflare Workers
|
|
378
|
-
- ✅ AWS Lambda
|
|
379
|
-
- ✅ Vercel Edge Runtime
|
|
380
|
-
- ✅ Google Cloud Run
|
|
381
|
-
|
|
382
|
-
**Security:** Never expose API keys in browser code. Always call from server/serverless backend.
|
|
383
|
-
|
|
384
|
-
## Best Practices
|
|
385
|
-
|
|
386
|
-
1. Initialize `TeckelTracer` once at startup, reuse across requests
|
|
387
|
-
2. Always call `flush()` in serverless before returning
|
|
388
|
-
3. Include `documentLastUpdated` when available (enables freshness scoring)
|
|
389
|
-
4. Use consistent `sessionRef` and `traceRef` for tracking
|
|
390
|
-
5. Include `model`, `responseTimeMs`, `tokens` for insights
|
|
391
|
-
6. Set `debug: false` in production
|
|
392
|
-
7. Call `conversation.end()` when chat session completes
|
|
393
|
-
|
|
394
|
-
## Troubleshooting
|
|
395
|
-
|
|
396
|
-
**Traces not appearing?**
|
|
397
|
-
1. Verify API key starts with `tk_live_`
|
|
398
|
-
2. Check network connectivity to `https://app.teckel.ai/api`
|
|
399
|
-
3. Enable `debug: true` to see errors
|
|
400
|
-
4. Look for validation errors in console
|
|
401
|
-
|
|
402
|
-
**Serverless traces dropping?**
|
|
403
|
-
1. Ensure `await conversation.flush(5000)` before returning
|
|
404
|
-
2. Monitor flush timeout errors in logs
|
|
405
|
-
3. Increase timeout if needed (up to 5s for slow networks)
|
|
406
|
-
|
|
407
|
-
**High latency?**
|
|
408
|
-
1. Verify `trace()` is non-blocking (don't await it)
|
|
409
|
-
2. Check `timeoutMs` configuration (default 5000ms)
|
|
410
|
-
3. Review network connectivity
|
|
411
|
-
|
|
412
49
|
## Documentation
|
|
413
50
|
|
|
51
|
+
- **Full SDK Reference:** [docs.teckel.ai/docs/typescript-sdk-reference](https://docs.teckel.ai/docs/typescript-sdk-reference)
|
|
414
52
|
- **Getting Started:** [docs.teckel.ai/docs/getting-started](https://docs.teckel.ai/docs/getting-started)
|
|
415
|
-
- **Complete SDK Reference:** [docs.teckel.ai/docs/typescript-sdk-reference](https://docs.teckel.ai/docs/typescript-sdk-reference)
|
|
416
|
-
- **HTTP API Reference:** [docs.teckel.ai/docs/http-api-reference](https://docs.teckel.ai/docs/http-api-reference)
|
|
417
|
-
- **Dashboard:** [app.teckel.ai](https://app.teckel.ai)
|
|
418
53
|
|
|
419
54
|
## Support
|
|
420
55
|
|
|
421
56
|
- **Email:** support@teckel.ai
|
|
422
|
-
- **
|
|
57
|
+
- **Website** [teckel.ai](https://teckel.ai)
|
|
423
58
|
|
|
424
59
|
## License
|
|
425
60
|
|
|
426
61
|
MIT
|
|
427
|
-
|
|
428
|
-
---
|
|
429
|
-
|
|
430
|
-
Version 0.3.3
|
|
@@ -1,11 +1,178 @@
|
|
|
1
|
+
import { z } from 'zod';
|
|
2
|
+
|
|
1
3
|
/**
|
|
2
|
-
*
|
|
4
|
+
* Type definitions for teckel-ai SDK v0.3.5
|
|
5
|
+
* Simple, clean types matching existing database schema
|
|
3
6
|
*/
|
|
4
|
-
|
|
7
|
+
/**
|
|
8
|
+
* SDK Configuration
|
|
9
|
+
*/
|
|
10
|
+
interface TeckelConfig {
|
|
11
|
+
apiKey: string;
|
|
12
|
+
endpoint?: string;
|
|
13
|
+
debug?: boolean;
|
|
14
|
+
timeoutMs?: number;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Conversation options
|
|
18
|
+
* sessionRef IS the conversation identifier
|
|
19
|
+
*
|
|
20
|
+
* Naming convention:
|
|
21
|
+
* - *Ref = Client-provided identifier (TEXT)
|
|
22
|
+
* - *Id = Server-generated internal ID (UUID)
|
|
23
|
+
*/
|
|
24
|
+
interface ConversationOptions {
|
|
25
|
+
sessionRef?: string;
|
|
26
|
+
userRef?: string;
|
|
27
|
+
metadata?: Record<string, unknown>;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Document structure for RAG systems
|
|
31
|
+
* Matches existing documents + chunk_events schema
|
|
32
|
+
*/
|
|
33
|
+
interface Document {
|
|
34
|
+
documentRef: string;
|
|
35
|
+
documentName: string;
|
|
36
|
+
documentText: string;
|
|
37
|
+
documentLastUpdated?: string;
|
|
38
|
+
sourceUri?: string;
|
|
39
|
+
sourceType?: string;
|
|
40
|
+
similarity?: number;
|
|
41
|
+
rank?: number;
|
|
42
|
+
ownerEmail?: string;
|
|
43
|
+
documentType?: string;
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Token usage tracking
|
|
47
|
+
*/
|
|
48
|
+
interface TokenUsage {
|
|
49
|
+
prompt: number;
|
|
50
|
+
completion: number;
|
|
51
|
+
total: number;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Trace data for a single query-response interaction
|
|
55
|
+
* Matches existing traces table schema
|
|
56
|
+
*/
|
|
57
|
+
interface TraceData {
|
|
58
|
+
query: string;
|
|
59
|
+
response: string;
|
|
60
|
+
model?: string;
|
|
61
|
+
responseTimeMs?: number;
|
|
62
|
+
documents?: Document[];
|
|
63
|
+
tokens?: TokenUsage;
|
|
64
|
+
metadata?: Record<string, unknown>;
|
|
65
|
+
traceRef?: string;
|
|
66
|
+
userRef?: string;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Feedback types
|
|
70
|
+
*/
|
|
71
|
+
type FeedbackType = 'thumbs_up' | 'thumbs_down' | 'flag' | 'rating';
|
|
72
|
+
/**
|
|
73
|
+
* User feedback signal
|
|
74
|
+
*/
|
|
75
|
+
interface FeedbackData {
|
|
76
|
+
type: FeedbackType;
|
|
77
|
+
value?: string;
|
|
78
|
+
comment?: string;
|
|
79
|
+
traceRef?: string;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Result returned when a trace is created
|
|
83
|
+
*/
|
|
84
|
+
interface TraceResult {
|
|
85
|
+
traceRef: string;
|
|
86
|
+
turnNumber: number;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Conversation class for teckel-ai SDK v0.3.5
|
|
91
|
+
* Manages a single conversation with fire-and-forget semantics
|
|
92
|
+
*/
|
|
93
|
+
|
|
94
|
+
/** Internal type - sessionRef is guaranteed by TeckelTracer.start() */
|
|
95
|
+
interface ResolvedConversationOptions extends ConversationOptions {
|
|
96
|
+
sessionRef: string;
|
|
97
|
+
}
|
|
98
|
+
declare class Conversation {
|
|
99
|
+
private readonly apiKey;
|
|
100
|
+
private readonly endpoint;
|
|
101
|
+
private readonly sessionRef;
|
|
102
|
+
private readonly userRef?;
|
|
103
|
+
private readonly metadata?;
|
|
104
|
+
private readonly startedAt;
|
|
105
|
+
private readonly debug;
|
|
106
|
+
private readonly timeoutMs;
|
|
107
|
+
private turnCount;
|
|
108
|
+
private startPromise;
|
|
109
|
+
private sendQueue;
|
|
110
|
+
constructor(apiKey: string, endpoint: string, options: ResolvedConversationOptions, debug?: boolean, extras?: {
|
|
111
|
+
timeoutMs: number;
|
|
112
|
+
});
|
|
113
|
+
/**
|
|
114
|
+
* Record a trace (single query-response interaction)
|
|
115
|
+
* Fire-and-forget by default - never blocks
|
|
116
|
+
* For serverless, call flush() before function termination
|
|
117
|
+
*/
|
|
118
|
+
trace(data: TraceData): TraceResult | void;
|
|
119
|
+
/**
|
|
120
|
+
* Add user feedback signal
|
|
121
|
+
* Never throws - gracefully handles errors
|
|
122
|
+
*/
|
|
123
|
+
feedback(data: FeedbackData): Promise<void>;
|
|
124
|
+
/**
|
|
125
|
+
* End the conversation
|
|
126
|
+
* Flushes all pending traces before sending end signal
|
|
127
|
+
* Never throws - gracefully handles errors
|
|
128
|
+
*/
|
|
129
|
+
end(): Promise<void>;
|
|
130
|
+
/**
|
|
131
|
+
* Read-only properties
|
|
132
|
+
*/
|
|
133
|
+
get id(): string;
|
|
134
|
+
get turns(): number;
|
|
135
|
+
get started(): Date;
|
|
136
|
+
private fetchWithRetry;
|
|
137
|
+
private _startConversation;
|
|
138
|
+
private _sendTrace;
|
|
139
|
+
private _sendFeedback;
|
|
140
|
+
private _endConversation;
|
|
141
|
+
private enqueueSend;
|
|
142
|
+
/**
|
|
143
|
+
* Flush queued sends with a bounded timeout.
|
|
144
|
+
* Returns when the queue is empty or the timeout elapses (whichever comes first).
|
|
145
|
+
*/
|
|
146
|
+
flush(timeoutMs?: number): Promise<void>;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/**
|
|
150
|
+
* TeckelTracer - Main SDK class for teckel-ai v0.3.5
|
|
151
|
+
* Simple, lightweight SDK for AI conversation tracking
|
|
152
|
+
*/
|
|
153
|
+
|
|
154
|
+
declare class TeckelTracer {
|
|
155
|
+
private readonly apiKey;
|
|
156
|
+
private readonly endpoint;
|
|
157
|
+
private readonly debug;
|
|
158
|
+
private readonly timeoutMs;
|
|
159
|
+
constructor(config: TeckelConfig);
|
|
160
|
+
/**
|
|
161
|
+
* Start a new conversation
|
|
162
|
+
* sessionRef IS the public conversation identifier
|
|
163
|
+
* If not provided, auto-generates one as 'auto:{8-char-uuid}'
|
|
164
|
+
*/
|
|
165
|
+
start(options?: ConversationOptions): Conversation;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Zod validation schemas for teckel-ai SDK v0.3.5
|
|
170
|
+
*/
|
|
171
|
+
|
|
5
172
|
/**
|
|
6
173
|
* Document schema
|
|
7
174
|
*/
|
|
8
|
-
|
|
175
|
+
declare const DocumentSchema: z.ZodObject<{
|
|
9
176
|
documentRef: z.ZodString;
|
|
10
177
|
documentName: z.ZodString;
|
|
11
178
|
documentText: z.ZodString;
|
|
@@ -42,7 +209,7 @@ export declare const DocumentSchema: z.ZodObject<{
|
|
|
42
209
|
/**
|
|
43
210
|
* Token usage schema
|
|
44
211
|
*/
|
|
45
|
-
|
|
212
|
+
declare const TokenUsageSchema: z.ZodObject<{
|
|
46
213
|
prompt: z.ZodNumber;
|
|
47
214
|
completion: z.ZodNumber;
|
|
48
215
|
total: z.ZodNumber;
|
|
@@ -58,7 +225,7 @@ export declare const TokenUsageSchema: z.ZodObject<{
|
|
|
58
225
|
/**
|
|
59
226
|
* Trace data schema
|
|
60
227
|
*/
|
|
61
|
-
|
|
228
|
+
declare const TraceDataSchema: z.ZodObject<{
|
|
62
229
|
query: z.ZodString;
|
|
63
230
|
response: z.ZodString;
|
|
64
231
|
model: z.ZodOptional<z.ZodString>;
|
|
@@ -112,6 +279,7 @@ export declare const TraceDataSchema: z.ZodObject<{
|
|
|
112
279
|
}>>;
|
|
113
280
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
114
281
|
traceRef: z.ZodOptional<z.ZodString>;
|
|
282
|
+
userRef: z.ZodOptional<z.ZodString>;
|
|
115
283
|
}, "strip", z.ZodTypeAny, {
|
|
116
284
|
query: string;
|
|
117
285
|
response: string;
|
|
@@ -136,6 +304,7 @@ export declare const TraceDataSchema: z.ZodObject<{
|
|
|
136
304
|
} | undefined;
|
|
137
305
|
metadata?: Record<string, unknown> | undefined;
|
|
138
306
|
traceRef?: string | undefined;
|
|
307
|
+
userRef?: string | undefined;
|
|
139
308
|
}, {
|
|
140
309
|
query: string;
|
|
141
310
|
response: string;
|
|
@@ -160,27 +329,28 @@ export declare const TraceDataSchema: z.ZodObject<{
|
|
|
160
329
|
} | undefined;
|
|
161
330
|
metadata?: Record<string, unknown> | undefined;
|
|
162
331
|
traceRef?: string | undefined;
|
|
332
|
+
userRef?: string | undefined;
|
|
163
333
|
}>;
|
|
164
334
|
/**
|
|
165
335
|
* Conversation options schema
|
|
166
336
|
*/
|
|
167
|
-
|
|
168
|
-
sessionRef: z.ZodString
|
|
169
|
-
|
|
337
|
+
declare const ConversationOptionsSchema: z.ZodObject<{
|
|
338
|
+
sessionRef: z.ZodOptional<z.ZodString>;
|
|
339
|
+
userRef: z.ZodOptional<z.ZodString>;
|
|
170
340
|
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodUnknown>>;
|
|
171
341
|
}, "strip", z.ZodTypeAny, {
|
|
172
|
-
sessionRef: string;
|
|
173
342
|
metadata?: Record<string, unknown> | undefined;
|
|
174
|
-
|
|
343
|
+
userRef?: string | undefined;
|
|
344
|
+
sessionRef?: string | undefined;
|
|
175
345
|
}, {
|
|
176
|
-
sessionRef: string;
|
|
177
346
|
metadata?: Record<string, unknown> | undefined;
|
|
178
|
-
|
|
347
|
+
userRef?: string | undefined;
|
|
348
|
+
sessionRef?: string | undefined;
|
|
179
349
|
}>;
|
|
180
350
|
/**
|
|
181
351
|
* Feedback data schema
|
|
182
352
|
*/
|
|
183
|
-
|
|
353
|
+
declare const FeedbackDataSchema: z.ZodObject<{
|
|
184
354
|
type: z.ZodEnum<["thumbs_up", "thumbs_down", "flag", "rating"]>;
|
|
185
355
|
value: z.ZodOptional<z.ZodString>;
|
|
186
356
|
comment: z.ZodOptional<z.ZodString>;
|
|
@@ -199,7 +369,7 @@ export declare const FeedbackDataSchema: z.ZodObject<{
|
|
|
199
369
|
/**
|
|
200
370
|
* Config schema
|
|
201
371
|
*/
|
|
202
|
-
|
|
372
|
+
declare const TeckelConfigSchema: z.ZodObject<{
|
|
203
373
|
apiKey: z.ZodString;
|
|
204
374
|
endpoint: z.ZodOptional<z.ZodString>;
|
|
205
375
|
debug: z.ZodOptional<z.ZodBoolean>;
|
|
@@ -215,3 +385,5 @@ export declare const TeckelConfigSchema: z.ZodObject<{
|
|
|
215
385
|
debug?: boolean | undefined;
|
|
216
386
|
timeoutMs?: number | undefined;
|
|
217
387
|
}>;
|
|
388
|
+
|
|
389
|
+
export { Conversation, type ConversationOptions, ConversationOptionsSchema, type Document, DocumentSchema, type FeedbackData, FeedbackDataSchema, type FeedbackType, type TeckelConfig, TeckelConfigSchema, TeckelTracer, type TokenUsage, TokenUsageSchema, type TraceData, TraceDataSchema, type TraceResult };
|