praisonai 1.0.18 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/context.d.ts +68 -0
- package/dist/agent/context.js +119 -0
- package/dist/agent/enhanced.d.ts +92 -0
- package/dist/agent/enhanced.js +267 -0
- package/dist/agent/handoff.d.ts +82 -0
- package/dist/agent/handoff.js +124 -0
- package/dist/agent/router.d.ts +77 -0
- package/dist/agent/router.js +113 -0
- package/dist/agent/simple.d.ts +1 -1
- package/dist/agent/simple.js +40 -4
- package/dist/agent/types.js +2 -2
- package/dist/cli/index.d.ts +20 -0
- package/dist/cli/index.js +150 -0
- package/dist/db/index.d.ts +23 -0
- package/dist/db/index.js +72 -0
- package/dist/db/memory-adapter.d.ts +42 -0
- package/dist/db/memory-adapter.js +146 -0
- package/dist/db/types.d.ts +113 -0
- package/dist/db/types.js +5 -0
- package/dist/eval/index.d.ts +61 -0
- package/dist/eval/index.js +157 -0
- package/dist/guardrails/index.d.ts +82 -0
- package/dist/guardrails/index.js +202 -0
- package/dist/index.d.ts +16 -1
- package/dist/index.js +72 -1
- package/dist/knowledge/rag.d.ts +80 -0
- package/dist/knowledge/rag.js +147 -0
- package/dist/llm/openai.js +11 -3
- package/dist/llm/providers/anthropic.d.ts +33 -0
- package/dist/llm/providers/anthropic.js +291 -0
- package/dist/llm/providers/base.d.ts +25 -0
- package/dist/llm/providers/base.js +43 -0
- package/dist/llm/providers/google.d.ts +27 -0
- package/dist/llm/providers/google.js +275 -0
- package/dist/llm/providers/index.d.ts +43 -0
- package/dist/llm/providers/index.js +116 -0
- package/dist/llm/providers/openai.d.ts +18 -0
- package/dist/llm/providers/openai.js +203 -0
- package/dist/llm/providers/types.d.ts +94 -0
- package/dist/llm/providers/types.js +5 -0
- package/dist/observability/index.d.ts +86 -0
- package/dist/observability/index.js +166 -0
- package/dist/session/index.d.ts +111 -0
- package/dist/session/index.js +250 -0
- package/dist/skills/index.d.ts +70 -0
- package/dist/skills/index.js +233 -0
- package/dist/tools/decorator.d.ts +91 -0
- package/dist/tools/decorator.js +165 -0
- package/dist/tools/index.d.ts +2 -0
- package/dist/tools/index.js +3 -0
- package/dist/tools/mcpSse.d.ts +41 -0
- package/dist/tools/mcpSse.js +108 -0
- package/dist/workflows/index.d.ts +97 -0
- package/dist/workflows/index.js +216 -0
- package/package.json +6 -2
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Providers - Factory and exports for multi-provider support
|
|
3
|
+
*/
|
|
4
|
+
export * from './types';
|
|
5
|
+
export * from './base';
|
|
6
|
+
export { OpenAIProvider } from './openai';
|
|
7
|
+
export { AnthropicProvider } from './anthropic';
|
|
8
|
+
export { GoogleProvider } from './google';
|
|
9
|
+
import type { LLMProvider, ProviderConfig } from './types';
|
|
10
|
+
/**
|
|
11
|
+
* Parse model string into provider and model ID
|
|
12
|
+
* Supports formats:
|
|
13
|
+
* - "provider/model" (e.g., "openai/gpt-4o")
|
|
14
|
+
* - "model" (defaults to OpenAI, e.g., "gpt-4o-mini")
|
|
15
|
+
*/
|
|
16
|
+
export declare function parseModelString(model: string): {
|
|
17
|
+
providerId: string;
|
|
18
|
+
modelId: string;
|
|
19
|
+
};
|
|
20
|
+
/**
|
|
21
|
+
* Create a provider instance from a model string
|
|
22
|
+
*
|
|
23
|
+
* @example
|
|
24
|
+
* ```typescript
|
|
25
|
+
* const provider = createProvider('openai/gpt-4o');
|
|
26
|
+
* const provider = createProvider('anthropic/claude-3-5-sonnet-latest');
|
|
27
|
+
* const provider = createProvider('google/gemini-2.0-flash');
|
|
28
|
+
* const provider = createProvider('gpt-4o-mini'); // Defaults to OpenAI
|
|
29
|
+
* ```
|
|
30
|
+
*/
|
|
31
|
+
export declare function createProvider(model: string, config?: ProviderConfig): LLMProvider;
|
|
32
|
+
/**
|
|
33
|
+
* Get the default provider (OpenAI with gpt-4o-mini)
|
|
34
|
+
*/
|
|
35
|
+
export declare function getDefaultProvider(config?: ProviderConfig): LLMProvider;
|
|
36
|
+
/**
|
|
37
|
+
* Check if a provider is available (has required API key)
|
|
38
|
+
*/
|
|
39
|
+
export declare function isProviderAvailable(providerId: string): boolean;
|
|
40
|
+
/**
|
|
41
|
+
* Get list of available providers
|
|
42
|
+
*/
|
|
43
|
+
export declare function getAvailableProviders(): string[];
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LLM Providers - Factory and exports for multi-provider support
|
|
4
|
+
*/
|
|
5
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
6
|
+
if (k2 === undefined) k2 = k;
|
|
7
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
8
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
9
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
10
|
+
}
|
|
11
|
+
Object.defineProperty(o, k2, desc);
|
|
12
|
+
}) : (function(o, m, k, k2) {
|
|
13
|
+
if (k2 === undefined) k2 = k;
|
|
14
|
+
o[k2] = m[k];
|
|
15
|
+
}));
|
|
16
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
17
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
18
|
+
};
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.GoogleProvider = exports.AnthropicProvider = exports.OpenAIProvider = void 0;
|
|
21
|
+
exports.parseModelString = parseModelString;
|
|
22
|
+
exports.createProvider = createProvider;
|
|
23
|
+
exports.getDefaultProvider = getDefaultProvider;
|
|
24
|
+
exports.isProviderAvailable = isProviderAvailable;
|
|
25
|
+
exports.getAvailableProviders = getAvailableProviders;
|
|
26
|
+
__exportStar(require("./types"), exports);
|
|
27
|
+
__exportStar(require("./base"), exports);
|
|
28
|
+
var openai_1 = require("./openai");
|
|
29
|
+
Object.defineProperty(exports, "OpenAIProvider", { enumerable: true, get: function () { return openai_1.OpenAIProvider; } });
|
|
30
|
+
var anthropic_1 = require("./anthropic");
|
|
31
|
+
Object.defineProperty(exports, "AnthropicProvider", { enumerable: true, get: function () { return anthropic_1.AnthropicProvider; } });
|
|
32
|
+
var google_1 = require("./google");
|
|
33
|
+
Object.defineProperty(exports, "GoogleProvider", { enumerable: true, get: function () { return google_1.GoogleProvider; } });
|
|
34
|
+
const openai_2 = require("./openai");
|
|
35
|
+
const anthropic_2 = require("./anthropic");
|
|
36
|
+
const google_2 = require("./google");
|
|
37
|
+
/**
|
|
38
|
+
* Provider registry for dynamic provider loading
|
|
39
|
+
*/
|
|
40
|
+
const PROVIDER_MAP = {
|
|
41
|
+
openai: openai_2.OpenAIProvider,
|
|
42
|
+
anthropic: anthropic_2.AnthropicProvider,
|
|
43
|
+
google: google_2.GoogleProvider,
|
|
44
|
+
gemini: google_2.GoogleProvider, // Alias
|
|
45
|
+
};
|
|
46
|
+
/**
|
|
47
|
+
* Parse model string into provider and model ID
|
|
48
|
+
* Supports formats:
|
|
49
|
+
* - "provider/model" (e.g., "openai/gpt-4o")
|
|
50
|
+
* - "model" (defaults to OpenAI, e.g., "gpt-4o-mini")
|
|
51
|
+
*/
|
|
52
|
+
function parseModelString(model) {
|
|
53
|
+
if (model.includes('/')) {
|
|
54
|
+
const [providerId, ...rest] = model.split('/');
|
|
55
|
+
return { providerId: providerId.toLowerCase(), modelId: rest.join('/') };
|
|
56
|
+
}
|
|
57
|
+
// Default to OpenAI for common model prefixes
|
|
58
|
+
if (model.startsWith('gpt-') || model.startsWith('o1') || model.startsWith('o3')) {
|
|
59
|
+
return { providerId: 'openai', modelId: model };
|
|
60
|
+
}
|
|
61
|
+
if (model.startsWith('claude-')) {
|
|
62
|
+
return { providerId: 'anthropic', modelId: model };
|
|
63
|
+
}
|
|
64
|
+
if (model.startsWith('gemini-')) {
|
|
65
|
+
return { providerId: 'google', modelId: model };
|
|
66
|
+
}
|
|
67
|
+
// Default to OpenAI
|
|
68
|
+
return { providerId: 'openai', modelId: model };
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Create a provider instance from a model string
|
|
72
|
+
*
|
|
73
|
+
* @example
|
|
74
|
+
* ```typescript
|
|
75
|
+
* const provider = createProvider('openai/gpt-4o');
|
|
76
|
+
* const provider = createProvider('anthropic/claude-3-5-sonnet-latest');
|
|
77
|
+
* const provider = createProvider('google/gemini-2.0-flash');
|
|
78
|
+
* const provider = createProvider('gpt-4o-mini'); // Defaults to OpenAI
|
|
79
|
+
* ```
|
|
80
|
+
*/
|
|
81
|
+
function createProvider(model, config) {
|
|
82
|
+
const { providerId, modelId } = parseModelString(model);
|
|
83
|
+
const ProviderClass = PROVIDER_MAP[providerId];
|
|
84
|
+
if (!ProviderClass) {
|
|
85
|
+
throw new Error(`Unknown provider: ${providerId}. Available providers: ${Object.keys(PROVIDER_MAP).join(', ')}`);
|
|
86
|
+
}
|
|
87
|
+
return new ProviderClass(modelId, config);
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Get the default provider (OpenAI with gpt-4o-mini)
|
|
91
|
+
*/
|
|
92
|
+
function getDefaultProvider(config) {
|
|
93
|
+
return createProvider('openai/gpt-4o-mini', config);
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Check if a provider is available (has required API key)
|
|
97
|
+
*/
|
|
98
|
+
function isProviderAvailable(providerId) {
|
|
99
|
+
switch (providerId.toLowerCase()) {
|
|
100
|
+
case 'openai':
|
|
101
|
+
return !!process.env.OPENAI_API_KEY;
|
|
102
|
+
case 'anthropic':
|
|
103
|
+
return !!process.env.ANTHROPIC_API_KEY;
|
|
104
|
+
case 'google':
|
|
105
|
+
case 'gemini':
|
|
106
|
+
return !!process.env.GOOGLE_API_KEY;
|
|
107
|
+
default:
|
|
108
|
+
return false;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Get list of available providers
|
|
113
|
+
*/
|
|
114
|
+
function getAvailableProviders() {
|
|
115
|
+
return Object.keys(PROVIDER_MAP).filter(isProviderAvailable);
|
|
116
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Provider - Implementation for OpenAI API
|
|
3
|
+
*/
|
|
4
|
+
import type { ChatCompletionMessageParam, ChatCompletionTool } from 'openai/resources/chat/completions';
|
|
5
|
+
import { BaseProvider } from './base';
|
|
6
|
+
import type { ProviderConfig, GenerateTextOptions, GenerateTextResult, StreamTextOptions, StreamChunk, GenerateObjectOptions, GenerateObjectResult, Message, ToolDefinition } from './types';
|
|
7
|
+
export declare class OpenAIProvider extends BaseProvider {
|
|
8
|
+
readonly providerId = "openai";
|
|
9
|
+
private client;
|
|
10
|
+
constructor(modelId: string, config?: ProviderConfig);
|
|
11
|
+
generateText(options: GenerateTextOptions): Promise<GenerateTextResult>;
|
|
12
|
+
streamText(options: StreamTextOptions): Promise<AsyncIterable<StreamChunk>>;
|
|
13
|
+
generateObject<T = any>(options: GenerateObjectOptions<T>): Promise<GenerateObjectResult<T>>;
|
|
14
|
+
protected formatMessages(messages: Message[]): ChatCompletionMessageParam[];
|
|
15
|
+
protected formatTools(tools: ToolDefinition[]): ChatCompletionTool[];
|
|
16
|
+
private mapFinishReason;
|
|
17
|
+
private normalizeSchema;
|
|
18
|
+
}
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* OpenAI Provider - Implementation for OpenAI API
|
|
4
|
+
*/
|
|
5
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
6
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
7
|
+
};
|
|
8
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
|
+
exports.OpenAIProvider = void 0;
|
|
10
|
+
const openai_1 = __importDefault(require("openai"));
|
|
11
|
+
const base_1 = require("./base");
|
|
12
|
+
class OpenAIProvider extends base_1.BaseProvider {
|
|
13
|
+
constructor(modelId, config = {}) {
|
|
14
|
+
super(modelId, config);
|
|
15
|
+
this.providerId = 'openai';
|
|
16
|
+
this.client = new openai_1.default({
|
|
17
|
+
apiKey: config.apiKey || process.env.OPENAI_API_KEY,
|
|
18
|
+
baseURL: config.baseUrl,
|
|
19
|
+
timeout: config.timeout || 60000,
|
|
20
|
+
maxRetries: 0, // We handle retries ourselves
|
|
21
|
+
});
|
|
22
|
+
}
|
|
23
|
+
async generateText(options) {
|
|
24
|
+
return this.withRetry(async () => {
|
|
25
|
+
const response = await this.client.chat.completions.create({
|
|
26
|
+
model: this.modelId,
|
|
27
|
+
messages: this.formatMessages(options.messages),
|
|
28
|
+
temperature: options.temperature ?? 0.7,
|
|
29
|
+
max_tokens: options.maxTokens,
|
|
30
|
+
tools: options.tools ? this.formatTools(options.tools) : undefined,
|
|
31
|
+
tool_choice: options.toolChoice,
|
|
32
|
+
stop: options.stop,
|
|
33
|
+
top_p: options.topP,
|
|
34
|
+
frequency_penalty: options.frequencyPenalty,
|
|
35
|
+
presence_penalty: options.presencePenalty,
|
|
36
|
+
});
|
|
37
|
+
const choice = response.choices[0];
|
|
38
|
+
const message = choice.message;
|
|
39
|
+
return {
|
|
40
|
+
text: message.content || '',
|
|
41
|
+
toolCalls: message.tool_calls?.map(tc => ({
|
|
42
|
+
id: tc.id,
|
|
43
|
+
type: 'function',
|
|
44
|
+
function: {
|
|
45
|
+
name: tc.function.name,
|
|
46
|
+
arguments: tc.function.arguments,
|
|
47
|
+
},
|
|
48
|
+
})),
|
|
49
|
+
usage: {
|
|
50
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
51
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
52
|
+
totalTokens: response.usage?.total_tokens || 0,
|
|
53
|
+
},
|
|
54
|
+
finishReason: this.mapFinishReason(choice.finish_reason),
|
|
55
|
+
raw: response,
|
|
56
|
+
};
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
async streamText(options) {
|
|
60
|
+
const self = this;
|
|
61
|
+
return {
|
|
62
|
+
async *[Symbol.asyncIterator]() {
|
|
63
|
+
const stream = await self.client.chat.completions.create({
|
|
64
|
+
model: self.modelId,
|
|
65
|
+
messages: self.formatMessages(options.messages),
|
|
66
|
+
temperature: options.temperature ?? 0.7,
|
|
67
|
+
max_tokens: options.maxTokens,
|
|
68
|
+
tools: options.tools ? self.formatTools(options.tools) : undefined,
|
|
69
|
+
tool_choice: options.toolChoice,
|
|
70
|
+
stop: options.stop,
|
|
71
|
+
stream: true,
|
|
72
|
+
});
|
|
73
|
+
let toolCalls = [];
|
|
74
|
+
for await (const chunk of stream) {
|
|
75
|
+
const delta = chunk.choices[0]?.delta;
|
|
76
|
+
if (delta?.content) {
|
|
77
|
+
if (options.onToken) {
|
|
78
|
+
options.onToken(delta.content);
|
|
79
|
+
}
|
|
80
|
+
yield { text: delta.content };
|
|
81
|
+
}
|
|
82
|
+
if (delta?.tool_calls) {
|
|
83
|
+
for (const tc of delta.tool_calls) {
|
|
84
|
+
if (tc.index !== undefined) {
|
|
85
|
+
if (!toolCalls[tc.index]) {
|
|
86
|
+
toolCalls[tc.index] = {
|
|
87
|
+
id: tc.id || '',
|
|
88
|
+
type: 'function',
|
|
89
|
+
function: { name: '', arguments: '' },
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
if (tc.id)
|
|
93
|
+
toolCalls[tc.index].id = tc.id;
|
|
94
|
+
if (tc.function?.name)
|
|
95
|
+
toolCalls[tc.index].function.name = tc.function.name;
|
|
96
|
+
if (tc.function?.arguments)
|
|
97
|
+
toolCalls[tc.index].function.arguments += tc.function.arguments;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
if (chunk.choices[0]?.finish_reason) {
|
|
102
|
+
yield {
|
|
103
|
+
finishReason: chunk.choices[0].finish_reason,
|
|
104
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
105
|
+
usage: chunk.usage ? {
|
|
106
|
+
promptTokens: chunk.usage.prompt_tokens,
|
|
107
|
+
completionTokens: chunk.usage.completion_tokens,
|
|
108
|
+
totalTokens: chunk.usage.total_tokens,
|
|
109
|
+
} : undefined,
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
},
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
async generateObject(options) {
|
|
117
|
+
return this.withRetry(async () => {
|
|
118
|
+
const response = await this.client.chat.completions.create({
|
|
119
|
+
model: this.modelId,
|
|
120
|
+
messages: this.formatMessages(options.messages),
|
|
121
|
+
temperature: options.temperature ?? 0.7,
|
|
122
|
+
max_tokens: options.maxTokens,
|
|
123
|
+
response_format: {
|
|
124
|
+
type: 'json_schema',
|
|
125
|
+
json_schema: {
|
|
126
|
+
name: 'response',
|
|
127
|
+
schema: this.normalizeSchema(options.schema),
|
|
128
|
+
strict: true,
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
const choice = response.choices[0];
|
|
133
|
+
const content = choice.message.content || '{}';
|
|
134
|
+
let parsed;
|
|
135
|
+
try {
|
|
136
|
+
parsed = JSON.parse(content);
|
|
137
|
+
}
|
|
138
|
+
catch (e) {
|
|
139
|
+
throw new Error(`Failed to parse JSON response: ${content}`);
|
|
140
|
+
}
|
|
141
|
+
return {
|
|
142
|
+
object: parsed,
|
|
143
|
+
usage: {
|
|
144
|
+
promptTokens: response.usage?.prompt_tokens || 0,
|
|
145
|
+
completionTokens: response.usage?.completion_tokens || 0,
|
|
146
|
+
totalTokens: response.usage?.total_tokens || 0,
|
|
147
|
+
},
|
|
148
|
+
raw: response,
|
|
149
|
+
};
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
formatMessages(messages) {
|
|
153
|
+
return messages.map(msg => {
|
|
154
|
+
if (msg.role === 'tool') {
|
|
155
|
+
return {
|
|
156
|
+
role: 'tool',
|
|
157
|
+
content: msg.content || '',
|
|
158
|
+
tool_call_id: msg.tool_call_id || '',
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
if (msg.role === 'assistant' && msg.tool_calls) {
|
|
162
|
+
return {
|
|
163
|
+
role: 'assistant',
|
|
164
|
+
content: msg.content,
|
|
165
|
+
tool_calls: msg.tool_calls,
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
return {
|
|
169
|
+
role: msg.role,
|
|
170
|
+
content: msg.content || '',
|
|
171
|
+
};
|
|
172
|
+
});
|
|
173
|
+
}
|
|
174
|
+
formatTools(tools) {
|
|
175
|
+
return tools.map(tool => ({
|
|
176
|
+
type: 'function',
|
|
177
|
+
function: {
|
|
178
|
+
name: tool.name,
|
|
179
|
+
description: tool.description || `Function ${tool.name}`,
|
|
180
|
+
parameters: tool.parameters || { type: 'object', properties: {} },
|
|
181
|
+
},
|
|
182
|
+
}));
|
|
183
|
+
}
|
|
184
|
+
mapFinishReason(reason) {
|
|
185
|
+
switch (reason) {
|
|
186
|
+
case 'stop': return 'stop';
|
|
187
|
+
case 'length': return 'length';
|
|
188
|
+
case 'tool_calls': return 'tool_calls';
|
|
189
|
+
case 'content_filter': return 'content_filter';
|
|
190
|
+
default: return 'stop';
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
normalizeSchema(schema) {
|
|
194
|
+
// If it's a Zod schema, convert to JSON schema
|
|
195
|
+
if (schema && typeof schema.parse === 'function' && typeof schema._def === 'object') {
|
|
196
|
+
// This is a Zod schema - we need zod-to-json-schema
|
|
197
|
+
// For now, throw an error suggesting to use JSON schema directly
|
|
198
|
+
throw new Error('Zod schemas require zod-to-json-schema. Please use JSON schema directly or install zod-to-json-schema.');
|
|
199
|
+
}
|
|
200
|
+
return schema;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Provider Types - Core type definitions for the provider abstraction layer
|
|
3
|
+
*/
|
|
4
|
+
export interface Message {
|
|
5
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
6
|
+
content: string | null;
|
|
7
|
+
name?: string;
|
|
8
|
+
tool_call_id?: string;
|
|
9
|
+
tool_calls?: ToolCall[];
|
|
10
|
+
}
|
|
11
|
+
export interface ToolCall {
|
|
12
|
+
id: string;
|
|
13
|
+
type: 'function';
|
|
14
|
+
function: {
|
|
15
|
+
name: string;
|
|
16
|
+
arguments: string;
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
export interface ToolDefinition {
|
|
20
|
+
name: string;
|
|
21
|
+
description?: string;
|
|
22
|
+
parameters?: Record<string, any>;
|
|
23
|
+
}
|
|
24
|
+
export interface TokenUsage {
|
|
25
|
+
promptTokens: number;
|
|
26
|
+
completionTokens: number;
|
|
27
|
+
totalTokens: number;
|
|
28
|
+
}
|
|
29
|
+
export interface GenerateTextOptions {
|
|
30
|
+
messages: Message[];
|
|
31
|
+
temperature?: number;
|
|
32
|
+
maxTokens?: number;
|
|
33
|
+
tools?: ToolDefinition[];
|
|
34
|
+
toolChoice?: 'auto' | 'none' | 'required' | {
|
|
35
|
+
type: 'function';
|
|
36
|
+
function: {
|
|
37
|
+
name: string;
|
|
38
|
+
};
|
|
39
|
+
};
|
|
40
|
+
stop?: string[];
|
|
41
|
+
topP?: number;
|
|
42
|
+
frequencyPenalty?: number;
|
|
43
|
+
presencePenalty?: number;
|
|
44
|
+
}
|
|
45
|
+
export interface GenerateTextResult {
|
|
46
|
+
text: string;
|
|
47
|
+
toolCalls?: ToolCall[];
|
|
48
|
+
usage: TokenUsage;
|
|
49
|
+
finishReason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'error';
|
|
50
|
+
raw?: any;
|
|
51
|
+
}
|
|
52
|
+
export interface StreamTextOptions extends GenerateTextOptions {
|
|
53
|
+
onToken?: (token: string) => void;
|
|
54
|
+
onToolCall?: (toolCall: ToolCall) => void;
|
|
55
|
+
}
|
|
56
|
+
export interface StreamChunk {
|
|
57
|
+
text?: string;
|
|
58
|
+
toolCalls?: ToolCall[];
|
|
59
|
+
usage?: Partial<TokenUsage>;
|
|
60
|
+
finishReason?: string;
|
|
61
|
+
}
|
|
62
|
+
export interface GenerateObjectOptions<T = any> {
|
|
63
|
+
messages: Message[];
|
|
64
|
+
schema: Record<string, any> | any;
|
|
65
|
+
temperature?: number;
|
|
66
|
+
maxTokens?: number;
|
|
67
|
+
maxRetries?: number;
|
|
68
|
+
}
|
|
69
|
+
export interface GenerateObjectResult<T = any> {
|
|
70
|
+
object: T;
|
|
71
|
+
usage: TokenUsage;
|
|
72
|
+
raw?: any;
|
|
73
|
+
}
|
|
74
|
+
export interface ProviderConfig {
|
|
75
|
+
apiKey?: string;
|
|
76
|
+
baseUrl?: string;
|
|
77
|
+
maxRetries?: number;
|
|
78
|
+
timeout?: number;
|
|
79
|
+
defaultModel?: string;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Base interface for all LLM providers
|
|
83
|
+
*/
|
|
84
|
+
export interface LLMProvider {
|
|
85
|
+
readonly providerId: string;
|
|
86
|
+
readonly modelId: string;
|
|
87
|
+
generateText(options: GenerateTextOptions): Promise<GenerateTextResult>;
|
|
88
|
+
streamText(options: StreamTextOptions): Promise<AsyncIterable<StreamChunk>>;
|
|
89
|
+
generateObject<T = any>(options: GenerateObjectOptions<T>): Promise<GenerateObjectResult<T>>;
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Provider factory function type
|
|
93
|
+
*/
|
|
94
|
+
export type ProviderFactory = (modelId: string, config?: ProviderConfig) => LLMProvider;
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Observability - Tracing, logging, and metrics hooks
|
|
3
|
+
*/
|
|
4
|
+
export type SpanKind = 'llm' | 'tool' | 'agent' | 'workflow' | 'custom';
|
|
5
|
+
export type SpanStatus = 'pending' | 'running' | 'completed' | 'failed';
|
|
6
|
+
export interface SpanData {
|
|
7
|
+
id: string;
|
|
8
|
+
traceId: string;
|
|
9
|
+
parentId?: string;
|
|
10
|
+
name: string;
|
|
11
|
+
kind: SpanKind;
|
|
12
|
+
status: SpanStatus;
|
|
13
|
+
startTime: number;
|
|
14
|
+
endTime?: number;
|
|
15
|
+
attributes: Record<string, any>;
|
|
16
|
+
events: SpanEvent[];
|
|
17
|
+
}
|
|
18
|
+
export interface SpanEvent {
|
|
19
|
+
name: string;
|
|
20
|
+
timestamp: number;
|
|
21
|
+
attributes?: Record<string, any>;
|
|
22
|
+
}
|
|
23
|
+
export interface TraceData {
|
|
24
|
+
id: string;
|
|
25
|
+
name: string;
|
|
26
|
+
startTime: number;
|
|
27
|
+
endTime?: number;
|
|
28
|
+
status: SpanStatus;
|
|
29
|
+
spans: SpanData[];
|
|
30
|
+
metadata: Record<string, any>;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Observability Adapter Protocol
|
|
34
|
+
*/
|
|
35
|
+
export interface ObservabilityAdapter {
|
|
36
|
+
startTrace(name: string, metadata?: Record<string, any>): TraceContext;
|
|
37
|
+
endTrace(traceId: string, status?: SpanStatus): void;
|
|
38
|
+
startSpan(traceId: string, name: string, kind: SpanKind, parentId?: string): SpanContext;
|
|
39
|
+
endSpan(spanId: string, status?: SpanStatus, attributes?: Record<string, any>): void;
|
|
40
|
+
addEvent(spanId: string, name: string, attributes?: Record<string, any>): void;
|
|
41
|
+
flush(): Promise<void>;
|
|
42
|
+
}
|
|
43
|
+
export interface TraceContext {
|
|
44
|
+
traceId: string;
|
|
45
|
+
startSpan(name: string, kind: SpanKind): SpanContext;
|
|
46
|
+
end(status?: SpanStatus): void;
|
|
47
|
+
}
|
|
48
|
+
export interface SpanContext {
|
|
49
|
+
spanId: string;
|
|
50
|
+
traceId: string;
|
|
51
|
+
addEvent(name: string, attributes?: Record<string, any>): void;
|
|
52
|
+
setAttributes(attributes: Record<string, any>): void;
|
|
53
|
+
end(status?: SpanStatus): void;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* In-memory observability adapter for development
|
|
57
|
+
*/
|
|
58
|
+
export declare class MemoryObservabilityAdapter implements ObservabilityAdapter {
|
|
59
|
+
private traces;
|
|
60
|
+
private spans;
|
|
61
|
+
startTrace(name: string, metadata?: Record<string, any>): TraceContext;
|
|
62
|
+
endTrace(traceId: string, status?: SpanStatus): void;
|
|
63
|
+
startSpan(traceId: string, name: string, kind: SpanKind, parentId?: string): SpanContext;
|
|
64
|
+
endSpan(spanId: string, status?: SpanStatus, attributes?: Record<string, any>): void;
|
|
65
|
+
addEvent(spanId: string, name: string, attributes?: Record<string, any>): void;
|
|
66
|
+
flush(): Promise<void>;
|
|
67
|
+
getTrace(traceId: string): TraceData | undefined;
|
|
68
|
+
getSpan(spanId: string): SpanData | undefined;
|
|
69
|
+
getAllTraces(): TraceData[];
|
|
70
|
+
clear(): void;
|
|
71
|
+
private generateId;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Console observability adapter for debugging
|
|
75
|
+
*/
|
|
76
|
+
export declare class ConsoleObservabilityAdapter implements ObservabilityAdapter {
|
|
77
|
+
private memory;
|
|
78
|
+
startTrace(name: string, metadata?: Record<string, any>): TraceContext;
|
|
79
|
+
endTrace(traceId: string, status?: SpanStatus): void;
|
|
80
|
+
startSpan(traceId: string, name: string, kind: SpanKind, parentId?: string): SpanContext;
|
|
81
|
+
endSpan(spanId: string, status?: SpanStatus, attributes?: Record<string, any>): void;
|
|
82
|
+
addEvent(spanId: string, name: string, attributes?: Record<string, any>): void;
|
|
83
|
+
flush(): Promise<void>;
|
|
84
|
+
}
|
|
85
|
+
export declare function setObservabilityAdapter(adapter: ObservabilityAdapter): void;
|
|
86
|
+
export declare function getObservabilityAdapter(): ObservabilityAdapter;
|