@traccia2/sdk 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,358 @@
1
+ /**
2
+ * Ollama integration for LangChain and LangGraph with automatic tracing.
3
+ *
4
+ * Run Ollama models locally and trace them automatically with Traccia.
5
+ *
6
+ * @example
7
+ * // First, start Ollama:
8
+ * // ollama pull llama2
9
+ * // ollama serve
10
+ *
11
+ * // Then use in your code:
12
+ * const model = await createOllamaWithTracing({
13
+ * model: 'llama2',
14
+ * baseUrl: 'http://localhost:11434',
15
+ * });
16
+ *
17
+ * const response = await model.invoke({ input: 'Hello!' });
18
+ * // Automatically traced!
19
+ */
20
+
21
+ import { getTraciaHandler, withTracing, setupLangChainWithTracing } from './auto-langchain';
22
+
23
+ /**
24
+ * Create a traced Ollama model for LangChain.
25
+ *
26
+ * @example
27
+ * const model = await createOllamaWithTracing({
28
+ * model: 'mistral',
29
+ * baseUrl: 'http://localhost:11434',
30
+ * temperature: 0.7,
31
+ * });
32
+ *
33
+ * const response = await model.invoke({ input: 'Write a poem' });
34
+ * // Automatically traced!
35
+ */
36
+ export async function createOllamaWithTracing(config: {
37
+ model: string;
38
+ baseUrl?: string;
39
+ temperature?: number;
40
+ topK?: number;
41
+ topP?: number;
42
+ [key: string]: any;
43
+ }): Promise<any> {
44
+ try {
45
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
46
+ const langchainOllama = require('@langchain/ollama');
47
+
48
+ const {
49
+ model,
50
+ baseUrl = 'http://localhost:11434',
51
+ ...otherConfig
52
+ } = config;
53
+
54
+ const ollamaModel = new langchainOllama.Ollama({
55
+ model,
56
+ baseUrl,
57
+ ...otherConfig,
58
+ callbacks: [getTraciaHandler()],
59
+ });
60
+
61
+ return withTracing(ollamaModel);
62
+ } catch (error) {
63
+ throw new Error(
64
+ 'Failed to create Ollama model. Make sure @langchain/ollama is installed and Ollama is running.\n' +
65
+ 'Install with: npm install @langchain/ollama\n' +
66
+ 'Run Ollama with: ollama serve'
67
+ );
68
+ }
69
+ }
70
+
71
+ /**
72
+ * Set up LangChain with Ollama and automatic tracing.
73
+ *
74
+ * @example
75
+ * const { model, executor } = await setupOllamaWithTracing({
76
+ * model: 'mistral',
77
+ * tools: [weatherTool, calculatorTool],
78
+ * systemPrompt: 'You are a helpful assistant.',
79
+ * });
80
+ *
81
+ * const result = await executor.invoke({ input: 'What is the weather?' });
82
+ * // Automatically traced!
83
+ */
84
+ export async function setupOllamaWithTracing(options: {
85
+ model: string;
86
+ modelConfig?: Record<string, any>;
87
+ baseUrl?: string;
88
+ tools?: any[];
89
+ systemPrompt?: string;
90
+ }): Promise<{
91
+ model: any;
92
+ executor: any;
93
+ handler: any;
94
+ }> {
95
+ try {
96
+ const {
97
+ model,
98
+ modelConfig = {},
99
+ baseUrl = 'http://localhost:11434',
100
+ tools = [],
101
+ systemPrompt,
102
+ } = options;
103
+
104
+ // Create traced Ollama model
105
+ const ollamaModel = await createOllamaWithTracing({
106
+ model,
107
+ baseUrl,
108
+ ...modelConfig,
109
+ });
110
+
111
+ // Set up with LangChain
112
+ return await setupLangChainWithTracing({
113
+ modelConfig: {},
114
+ tools,
115
+ systemPrompt,
116
+ // We're using the ollama model directly, not creating a new one
117
+ }).then((result) => ({
118
+ model: ollamaModel,
119
+ executor: result.executor,
120
+ handler: getTraciaHandler(),
121
+ }));
122
+ } catch (error) {
123
+ const err = error as Error;
124
+ throw new Error(`Failed to setup Ollama with tracing: ${err.message}`);
125
+ }
126
+ }
127
+
128
+ /**
129
+ * Create a simple chatbot using Ollama with automatic tracing.
130
+ *
131
+ * @example
132
+ * const chatbot = await createOllamaChatbot({
133
+ * model: 'neural-chat',
134
+ * systemPrompt: 'You are a helpful assistant.',
135
+ * });
136
+ *
137
+ * const response = await chatbot('What is machine learning?');
138
+ * // Automatically traced!
139
+ */
140
+ export async function createOllamaChatbot(options: {
141
+ model: string;
142
+ baseUrl?: string;
143
+ systemPrompt?: string;
144
+ temperature?: number;
145
+ }): Promise<(input: string) => Promise<string>> {
146
+ try {
147
+ const {
148
+ model,
149
+ baseUrl = 'http://localhost:11434',
150
+ systemPrompt = 'You are a helpful assistant.',
151
+ temperature = 0.7,
152
+ } = options;
153
+
154
+ const ollamaModel = await createOllamaWithTracing({
155
+ model,
156
+ baseUrl,
157
+ temperature,
158
+ });
159
+
160
+ // Return a simple chatbot function
161
+ return async (input: string): Promise<string> => {
162
+ const { getTracer } = await import('../auto');
163
+ const tracer = getTracer('ollama-chatbot');
164
+
165
+ return tracer.startActiveSpan('chatbot-query', async (span) => {
166
+ try {
167
+ span.setAttribute('model', model);
168
+ span.setAttribute('input_length', input.length);
169
+
170
+ const response = await ollamaModel.invoke({
171
+ input: `${systemPrompt}\n\nUser: ${input}`,
172
+ });
173
+
174
+ span.setAttribute('output_length', response.length || 0);
175
+ span.setAttribute('success', true);
176
+
177
+ return response;
178
+ } catch (error) {
179
+ if (error instanceof Error) {
180
+ span.recordException(error);
181
+ }
182
+ throw error;
183
+ }
184
+ });
185
+ };
186
+ } catch (error) {
187
+ const err = error as Error;
188
+ throw new Error(`Failed to create Ollama chatbot: ${err.message}`);
189
+ }
190
+ }
191
+
192
+ /**
193
+ * Available Ollama models you can pull and use.
194
+ *
195
+ * @example
196
+ * const models = POPULAR_OLLAMA_MODELS;
197
+ * // Use any of: mistral, neural-chat, llama2, orca-mini, etc.
198
+ */
199
+ export const POPULAR_OLLAMA_MODELS = [
200
+ {
201
+ name: 'mistral',
202
+ description: 'Fast and powerful 7B model',
203
+ size: '5.4GB',
204
+ command: 'ollama pull mistral',
205
+ },
206
+ {
207
+ name: 'neural-chat',
208
+ description: 'Intel Neural Chat, good for conversations',
209
+ size: '3.8GB',
210
+ command: 'ollama pull neural-chat',
211
+ },
212
+ {
213
+ name: 'llama2',
214
+ description: 'Meta Llama 2, versatile 7B model',
215
+ size: '3.8GB',
216
+ command: 'ollama pull llama2',
217
+ },
218
+ {
219
+ name: 'orca-mini',
220
+ description: 'Small 3B model, fast',
221
+ size: '1.5GB',
222
+ command: 'ollama pull orca-mini',
223
+ },
224
+ {
225
+ name: 'dolphin-mixtral',
226
+ description: 'Mixtral MoE, high quality but larger',
227
+ size: '26GB',
228
+ command: 'ollama pull dolphin-mixtral',
229
+ },
230
+ {
231
+ name: 'opencodeup',
232
+ description: 'Specialized for code generation',
233
+ size: '3.5GB',
234
+ command: 'ollama pull opencodeup',
235
+ },
236
+ ];
237
+
238
+ /**
239
+ * Helper to get setup instructions for Ollama.
240
+ *
241
+ * @example
242
+ * console.log(getOllamaSetupInstructions());
243
+ */
244
+ export function getOllamaSetupInstructions(): string {
245
+ return `
246
+ 📦 Ollama Setup Instructions
247
+
248
+ 1. Install Ollama:
249
+ - macOS: https://ollama.ai/download/Ollama-darwin.zip
250
+ - Windows: https://ollama.ai/download/OllamaSetup.exe
251
+ - Linux: curl https://ollama.ai/install.sh | sh
252
+
253
+ 2. Start Ollama in the background:
254
+ ollama serve
255
+
256
+ 3. Pull a model (in another terminal):
257
+ ollama pull mistral # Fast 7B model (recommended)
258
+ # or
259
+ ollama pull neural-chat # Optimized for chat
260
+ # or
261
+ ollama pull llama2 # Meta's Llama 2
262
+
263
+ 4. Use in Traccia:
264
+ import { createOllamaWithTracing } from '@traccia/sdk/integrations';
265
+
266
+ const model = await createOllamaWithTracing({
267
+ model: 'mistral',
268
+ baseUrl: 'http://localhost:11434',
269
+ });
270
+
271
+ const response = await model.invoke({ input: 'Hello!' });
272
+
273
+ 📚 Available Models:
274
+ ${POPULAR_OLLAMA_MODELS.map((m) => ` - ${m.name}: ${m.description}`).join('\n')}
275
+
276
+ 🔗 More info: https://ollama.ai/
277
+ `;
278
+ }
279
+
280
+ /**
281
+ * Create a streaming chatbot with Ollama.
282
+ *
283
+ * @example
284
+ * const chatbot = await createOllamaStreamingChatbot({
285
+ * model: 'mistral',
286
+ * onChunk: (chunk) => process.stdout.write(chunk),
287
+ * });
288
+ *
289
+ * await chatbot('Tell me a story');
290
+ * // Streams response as it's generated!
291
+ */
292
+ export async function createOllamaStreamingChatbot(options: {
293
+ model: string;
294
+ baseUrl?: string;
295
+ systemPrompt?: string;
296
+ temperature?: number;
297
+ onChunk?: (chunk: string) => void;
298
+ }): Promise<(input: string) => Promise<void>> {
299
+ try {
300
+ const {
301
+ model,
302
+ baseUrl = 'http://localhost:11434',
303
+ systemPrompt = 'You are a helpful assistant.',
304
+ temperature = 0.7,
305
+ onChunk = (chunk) => process.stdout.write(chunk),
306
+ } = options;
307
+
308
+ const ollamaModel = await createOllamaWithTracing({
309
+ model,
310
+ baseUrl,
311
+ temperature,
312
+ });
313
+
314
+ return async (input: string): Promise<void> => {
315
+ const { getTracer } = await import('../auto');
316
+ const tracer = getTracer('ollama-streaming-chatbot');
317
+
318
+ return tracer.startActiveSpan('streaming-query', async (span) => {
319
+ try {
320
+ span.setAttribute('model', model);
321
+ span.setAttribute('input_length', input.length);
322
+
323
+ const prompt = `${systemPrompt}\n\nUser: ${input}`;
324
+ let totalChunks = 0;
325
+
326
+ // Use streaming if available
327
+ if (ollamaModel.stream) {
328
+ for await (const chunk of await ollamaModel.stream({
329
+ input: prompt,
330
+ })) {
331
+ const text = typeof chunk === 'string' ? chunk : chunk.text || '';
332
+ onChunk(text);
333
+ totalChunks += text.length;
334
+ }
335
+ } else {
336
+ // Fallback to regular invoke
337
+ const response = await ollamaModel.invoke({ input: prompt });
338
+ onChunk(response);
339
+ totalChunks = response.length;
340
+ }
341
+
342
+ span.setAttribute('output_length', totalChunks);
343
+ span.setAttribute('success', true);
344
+ } catch (error) {
345
+ if (error instanceof Error) {
346
+ span.recordException(error);
347
+ }
348
+ throw error;
349
+ }
350
+ });
351
+ };
352
+ } catch (error) {
353
+ const err = error as Error;
354
+ throw new Error(
355
+ `Failed to create streaming Ollama chatbot: ${err.message}`
356
+ );
357
+ }
358
+ }
@@ -5,6 +5,7 @@
5
5
  import { ISpan, ITracer, SpanStatus, SpanEvent, ISpanContext } from '../types';
6
6
  import { SpanContext } from './span-context';
7
7
  import { getConfig } from '../config/runtime-config';
8
+ import type { TracerProvider } from './provider';
8
9
 
9
10
  /**
10
11
  * Span implementation representing a unit of work.
@@ -21,19 +22,22 @@ export class Span implements ISpan {
21
22
  public endTimeNs?: number;
22
23
 
23
24
  private ended = false;
25
+ private provider: TracerProvider;
24
26
 
25
27
  constructor(
26
28
  name: string,
27
29
  _tracer: ITracer,
28
30
  context: ISpanContext,
29
31
  parentSpanId?: string,
30
- attributes?: Record<string, unknown>
32
+ attributes?: Record<string, unknown>,
33
+ provider?: TracerProvider
31
34
  ) {
32
35
  this.name = name;
33
36
  this.context = context;
34
37
  this.parentSpanId = parentSpanId;
35
38
  this.attributes = attributes ? { ...attributes } : {};
36
39
  this.startTimeNs = performance.now() * 1_000_000;
40
+ this.provider = provider!;
37
41
 
38
42
  // Apply runtime metadata to tracestate
39
43
  this.enrichTraceState();
@@ -99,6 +103,11 @@ export class Span implements ISpan {
99
103
  }
100
104
  this.ended = true;
101
105
  this.endTimeNs = performance.now() * 1_000_000;
106
+
107
+ // Notify provider of span end
108
+ if (this.provider) {
109
+ this.provider.notifySpanEnd(this);
110
+ }
102
111
  }
103
112
 
104
113
  /**
@@ -67,7 +67,7 @@ export class Tracer implements ITracer {
67
67
  effectiveContext?.traceState
68
68
  );
69
69
 
70
- return new Span(name, this, spanContext, parentSpanId, options?.attributes);
70
+ return new Span(name, this, spanContext, parentSpanId, options?.attributes, this.provider);
71
71
  }
72
72
 
73
73
  /**