praisonai 1.3.4 → 1.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  import type { DbAdapter } from '../db/types';
2
+ import type { LLMProvider } from '../llm/providers/types';
2
3
  /**
3
4
  * Agent Configuration
4
5
  *
@@ -108,6 +109,10 @@ export declare class Agent {
108
109
  private cacheTTL;
109
110
  private responseCache;
110
111
  private telemetryEnabled;
112
+ private _backend;
113
+ private _backendPromise;
114
+ private _backendSource;
115
+ private _useAISDKBackend;
111
116
  constructor(config: SimpleAgentConfig);
112
117
  /**
113
118
  * Generate a session ID based on current hour and agent name (like Python SDK)
@@ -182,6 +187,39 @@ export declare class Agent {
182
187
  * Clear response cache
183
188
  */
184
189
  clearCache(): void;
190
+ /**
191
+ * Get the resolved backend (AI SDK preferred, native fallback)
192
+ * Lazy initialization - backend is only resolved on first use
193
+ */
194
+ getBackend(): Promise<LLMProvider>;
195
+ /**
196
+ * Get the backend source (ai-sdk, native, custom, or legacy)
197
+ */
198
+ getBackendSource(): 'ai-sdk' | 'native' | 'custom' | 'legacy';
199
+ /**
200
+ * Embed text using AI SDK (preferred) or native provider
201
+ *
202
+ * @param text - Text to embed (string or array of strings)
203
+ * @param options - Embedding options
204
+ * @returns Embedding vector(s)
205
+ *
206
+ * @example Single text
207
+ * ```typescript
208
+ * const embedding = await agent.embed("Hello world");
209
+ * ```
210
+ *
211
+ * @example Multiple texts
212
+ * ```typescript
213
+ * const embeddings = await agent.embed(["Hello", "World"]);
214
+ * ```
215
+ */
216
+ embed(text: string | string[], options?: {
217
+ model?: string;
218
+ }): Promise<number[] | number[][]>;
219
+ /**
220
+ * Get the model string for this agent
221
+ */
222
+ getModel(): string;
185
223
  }
186
224
  /**
187
225
  * Configuration for multi-agent orchestration
@@ -1,4 +1,37 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
2
35
  Object.defineProperty(exports, "__esModule", { value: true });
3
36
  exports.Agents = exports.PraisonAIAgents = exports.Agent = void 0;
4
37
  const openai_1 = require("../llm/openai");
@@ -10,6 +43,11 @@ class Agent {
10
43
  this.messages = [];
11
44
  this.dbInitialized = false;
12
45
  this.responseCache = new Map();
46
+ // AI SDK backend support
47
+ this._backend = null;
48
+ this._backendPromise = null;
49
+ this._backendSource = 'legacy';
50
+ this._useAISDKBackend = false;
13
51
  // Build instructions from either simple or advanced mode
14
52
  if (config.instructions) {
15
53
  this.instructions = config.instructions;
@@ -44,7 +82,14 @@ class Agent {
44
82
  this.cache = config.cache ?? false;
45
83
  this.cacheTTL = config.cacheTTL ?? 3600;
46
84
  this.telemetryEnabled = config.telemetry ?? false;
47
- this.llmService = new openai_1.OpenAIService(this.llm);
85
+ // Parse model string to extract provider and model ID
86
+ // Format: "provider/model" or just "model"
87
+ const providerId = this.llm.includes('/') ? this.llm.split('/')[0] : 'openai';
88
+ const modelId = this.llm.includes('/') ? this.llm.split('/').slice(1).join('/') : this.llm;
89
+ // For OpenAI, use OpenAIService directly for backward compatibility
90
+ // For other providers, we'll use the AI SDK backend via getBackend()
91
+ this._useAISDKBackend = providerId !== 'openai';
92
+ this.llmService = new openai_1.OpenAIService(modelId);
48
93
  // Configure logging
49
94
  logger_1.Logger.setVerbose(this.verbose);
50
95
  logger_1.Logger.setPretty(this.pretty);
@@ -290,8 +335,35 @@ class Agent {
290
335
  // Add current user prompt
291
336
  messages.push({ role: 'user', content: prompt });
292
337
  let finalResponse = '';
293
- if (this.stream && !this.tools) {
294
- // Use streaming with full conversation history
338
+ // Use AI SDK backend for non-OpenAI providers
339
+ if (this._useAISDKBackend) {
340
+ const backend = await this.getBackend();
341
+ if (this.stream && !this.tools) {
342
+ // Streaming with AI SDK backend
343
+ const stream = await backend.streamText({
344
+ messages,
345
+ temperature: 0.7
346
+ });
347
+ let accumulated = '';
348
+ for await (const chunk of stream) {
349
+ if (chunk.text) {
350
+ process.stdout.write(chunk.text);
351
+ accumulated += chunk.text;
352
+ }
353
+ }
354
+ finalResponse = accumulated;
355
+ }
356
+ else {
357
+ // Non-streaming with AI SDK backend
358
+ const result = await backend.generateText({
359
+ messages,
360
+ temperature: 0.7
361
+ });
362
+ finalResponse = result.text;
363
+ }
364
+ }
365
+ else if (this.stream && !this.tools) {
366
+ // Use streaming with full conversation history (OpenAI)
295
367
  finalResponse = await this.llmService.streamChat(messages, 0.7, (token) => {
296
368
  process.stdout.write(token);
297
369
  });
@@ -437,6 +509,73 @@ class Agent {
437
509
  clearCache() {
438
510
  this.responseCache.clear();
439
511
  }
512
+ /**
513
+ * Get the resolved backend (AI SDK preferred, native fallback)
514
+ * Lazy initialization - backend is only resolved on first use
515
+ */
516
+ async getBackend() {
517
+ if (this._backend) {
518
+ return this._backend;
519
+ }
520
+ if (!this._backendPromise) {
521
+ this._backendPromise = (async () => {
522
+ const { resolveBackend } = await Promise.resolve().then(() => __importStar(require('../llm/backend-resolver')));
523
+ const result = await resolveBackend(this.llm, {
524
+ attribution: {
525
+ agentId: this.name,
526
+ runId: this.runId,
527
+ sessionId: this.sessionId,
528
+ },
529
+ });
530
+ this._backend = result.provider;
531
+ this._backendSource = result.source;
532
+ logger_1.Logger.debug(`Agent ${this.name} using ${result.source} backend for ${this.llm}`);
533
+ return result;
534
+ })();
535
+ }
536
+ const result = await this._backendPromise;
537
+ return result.provider;
538
+ }
539
+ /**
540
+ * Get the backend source (ai-sdk, native, custom, or legacy)
541
+ */
542
+ getBackendSource() {
543
+ return this._backendSource;
544
+ }
545
+ /**
546
+ * Embed text using AI SDK (preferred) or native provider
547
+ *
548
+ * @param text - Text to embed (string or array of strings)
549
+ * @param options - Embedding options
550
+ * @returns Embedding vector(s)
551
+ *
552
+ * @example Single text
553
+ * ```typescript
554
+ * const embedding = await agent.embed("Hello world");
555
+ * ```
556
+ *
557
+ * @example Multiple texts
558
+ * ```typescript
559
+ * const embeddings = await agent.embed(["Hello", "World"]);
560
+ * ```
561
+ */
562
+ async embed(text, options) {
563
+ const { embed, embedMany } = await Promise.resolve().then(() => __importStar(require('../llm/embeddings')));
564
+ if (Array.isArray(text)) {
565
+ const result = await embedMany(text, { model: options?.model });
566
+ return result.embeddings;
567
+ }
568
+ else {
569
+ const result = await embed(text, { model: options?.model });
570
+ return result.embedding;
571
+ }
572
+ }
573
+ /**
574
+ * Get the model string for this agent
575
+ */
576
+ getModel() {
577
+ return this.llm;
578
+ }
440
579
  }
441
580
  exports.Agent = Agent;
442
581
  /**
@@ -0,0 +1,19 @@
1
+ /**
2
+ * Benchmark Command - Performance benchmarks for AI SDK vs Native backends
3
+ *
4
+ * Measures:
5
+ * - Import time (cold start)
6
+ * - Memory usage
7
+ * - First-call latency
8
+ * - Streaming throughput
9
+ * - Embedding throughput
10
+ */
11
+ export interface BenchmarkOptions {
12
+ verbose?: boolean;
13
+ output?: 'json' | 'text' | 'pretty';
14
+ json?: boolean;
15
+ iterations?: number;
16
+ backend?: 'ai-sdk' | 'native' | 'both';
17
+ real?: boolean;
18
+ }
19
+ export declare function execute(args: string[], options: BenchmarkOptions): Promise<void>;