praisonai 1.0.15 → 1.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -1
- package/dist/agent/proxy.d.ts +1 -0
- package/dist/agent/proxy.js +32 -1
- package/dist/agent/simple.d.ts +15 -0
- package/dist/agent/simple.js +96 -5
- package/dist/llm/openai.d.ts +24 -6
- package/dist/llm/openai.js +133 -15
- package/package.json +4 -4
package/README.md
CHANGED
|
@@ -220,4 +220,30 @@ src/
|
|
|
220
220
|
|
|
221
221
|
## License
|
|
222
222
|
|
|
223
|
-
MIT License - see the LICENSE file for details
|
|
223
|
+
MIT License - see the LICENSE file for details
|
|
224
|
+
|
|
225
|
+
## Testing
|
|
226
|
+
|
|
227
|
+
### Manual Testing
|
|
228
|
+
|
|
229
|
+
```bash
|
|
230
|
+
export OPENAI_API_KEY='your-api-key'
|
|
231
|
+
npx ts-node tests/development/simple/single-agent.ts
|
|
232
|
+
npx ts-node tests/development/simple/multi-agent.ts
|
|
233
|
+
npx ts-node tests/development/simple/multi-agents-simple.js
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
## Examples Testing
|
|
237
|
+
|
|
238
|
+
```bash
|
|
239
|
+
export OPENAI_API_KEY='your-api-key'
|
|
240
|
+
npx ts-node examples/simple/single-agent.ts
|
|
241
|
+
npx ts-node examples/simple/multi-agent.ts
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
### Automated Testing (WIP)
|
|
245
|
+
|
|
246
|
+
```bash
|
|
247
|
+
npm run test
|
|
248
|
+
```
|
|
249
|
+
|
package/dist/agent/proxy.d.ts
CHANGED
package/dist/agent/proxy.js
CHANGED
|
@@ -28,10 +28,41 @@ class Agent {
|
|
|
28
28
|
name: config.name,
|
|
29
29
|
verbose: config.verbose,
|
|
30
30
|
llm: config.llm,
|
|
31
|
-
markdown: config.markdown
|
|
31
|
+
markdown: config.markdown,
|
|
32
|
+
tools: config.tools
|
|
32
33
|
};
|
|
33
34
|
this.simpleAgent = new simple_1.Agent(simpleConfig);
|
|
34
35
|
}
|
|
36
|
+
// Register tool functions if provided
|
|
37
|
+
if (config.tools && this.simpleAgent) {
|
|
38
|
+
// Look for tool functions in the global scope
|
|
39
|
+
for (const tool of config.tools) {
|
|
40
|
+
if (tool.type === 'function' && tool.function && tool.function.name) {
|
|
41
|
+
const funcName = tool.function.name;
|
|
42
|
+
// Check if function exists in global scope using a safer approach
|
|
43
|
+
const globalAny = global;
|
|
44
|
+
if (typeof globalAny[funcName] === 'function') {
|
|
45
|
+
this.simpleAgent.registerToolFunction(funcName, globalAny[funcName]);
|
|
46
|
+
}
|
|
47
|
+
else if (typeof globalAny['get_' + funcName] === 'function') {
|
|
48
|
+
// Try with 'get_' prefix (common convention)
|
|
49
|
+
this.simpleAgent.registerToolFunction(funcName, globalAny['get_' + funcName]);
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
// Try to find the function in the global scope by iterating through all properties
|
|
53
|
+
for (const key in globalAny) {
|
|
54
|
+
if (key.toLowerCase() === funcName.toLowerCase() ||
|
|
55
|
+
key.toLowerCase() === 'get_' + funcName.toLowerCase()) {
|
|
56
|
+
if (typeof globalAny[key] === 'function') {
|
|
57
|
+
this.simpleAgent.registerToolFunction(funcName, globalAny[key]);
|
|
58
|
+
break;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
35
66
|
}
|
|
36
67
|
getInstructions() {
|
|
37
68
|
return this.instructions;
|
package/dist/agent/simple.d.ts
CHANGED
|
@@ -6,6 +6,7 @@ export interface SimpleAgentConfig {
|
|
|
6
6
|
llm?: string;
|
|
7
7
|
markdown?: boolean;
|
|
8
8
|
stream?: boolean;
|
|
9
|
+
tools?: any[];
|
|
9
10
|
}
|
|
10
11
|
export declare class Agent {
|
|
11
12
|
private instructions;
|
|
@@ -16,8 +17,22 @@ export declare class Agent {
|
|
|
16
17
|
private markdown;
|
|
17
18
|
private stream;
|
|
18
19
|
private llmService;
|
|
20
|
+
private tools?;
|
|
21
|
+
private toolFunctions;
|
|
19
22
|
constructor(config: SimpleAgentConfig);
|
|
20
23
|
private createSystemPrompt;
|
|
24
|
+
/**
|
|
25
|
+
* Register a tool function that can be called by the model
|
|
26
|
+
* @param name Function name
|
|
27
|
+
* @param fn Function implementation
|
|
28
|
+
*/
|
|
29
|
+
registerToolFunction(name: string, fn: Function): void;
|
|
30
|
+
/**
|
|
31
|
+
* Process tool calls from the model
|
|
32
|
+
* @param toolCalls Tool calls from the model
|
|
33
|
+
* @returns Array of tool results
|
|
34
|
+
*/
|
|
35
|
+
private processToolCalls;
|
|
21
36
|
start(prompt: string, previousResult?: string): Promise<string>;
|
|
22
37
|
chat(prompt: string, previousResult?: string): Promise<string>;
|
|
23
38
|
execute(previousResult?: string): Promise<string>;
|
package/dist/agent/simple.js
CHANGED
|
@@ -5,6 +5,7 @@ const openai_1 = require("../llm/openai");
|
|
|
5
5
|
const logger_1 = require("../utils/logger");
|
|
6
6
|
class Agent {
|
|
7
7
|
constructor(config) {
|
|
8
|
+
this.toolFunctions = {};
|
|
8
9
|
this.instructions = config.instructions;
|
|
9
10
|
this.name = config.name || `Agent_${Math.random().toString(36).substr(2, 9)}`;
|
|
10
11
|
this.verbose = config.verbose ?? process.env.PRAISON_VERBOSE !== 'false';
|
|
@@ -12,6 +13,7 @@ class Agent {
|
|
|
12
13
|
this.llm = config.llm || 'gpt-4o-mini';
|
|
13
14
|
this.markdown = config.markdown ?? true;
|
|
14
15
|
this.stream = config.stream ?? true;
|
|
16
|
+
this.tools = config.tools;
|
|
15
17
|
this.llmService = new openai_1.OpenAIService(this.llm);
|
|
16
18
|
// Configure logging
|
|
17
19
|
logger_1.Logger.setVerbose(this.verbose);
|
|
@@ -24,6 +26,53 @@ class Agent {
|
|
|
24
26
|
}
|
|
25
27
|
return prompt;
|
|
26
28
|
}
|
|
29
|
+
/**
|
|
30
|
+
* Register a tool function that can be called by the model
|
|
31
|
+
* @param name Function name
|
|
32
|
+
* @param fn Function implementation
|
|
33
|
+
*/
|
|
34
|
+
registerToolFunction(name, fn) {
|
|
35
|
+
this.toolFunctions[name] = fn;
|
|
36
|
+
logger_1.Logger.debug(`Registered tool function: ${name}`);
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Process tool calls from the model
|
|
40
|
+
* @param toolCalls Tool calls from the model
|
|
41
|
+
* @returns Array of tool results
|
|
42
|
+
*/
|
|
43
|
+
async processToolCalls(toolCalls) {
|
|
44
|
+
const results = [];
|
|
45
|
+
for (const toolCall of toolCalls) {
|
|
46
|
+
const { id, function: { name, arguments: argsString } } = toolCall;
|
|
47
|
+
await logger_1.Logger.debug(`Processing tool call: ${name}`, { arguments: argsString });
|
|
48
|
+
try {
|
|
49
|
+
// Parse arguments
|
|
50
|
+
const args = JSON.parse(argsString);
|
|
51
|
+
// Check if function exists
|
|
52
|
+
if (!this.toolFunctions[name]) {
|
|
53
|
+
throw new Error(`Function ${name} not registered`);
|
|
54
|
+
}
|
|
55
|
+
// Call the function
|
|
56
|
+
const result = await this.toolFunctions[name](...Object.values(args));
|
|
57
|
+
// Add result to messages
|
|
58
|
+
results.push({
|
|
59
|
+
role: 'tool',
|
|
60
|
+
tool_call_id: id,
|
|
61
|
+
content: result.toString()
|
|
62
|
+
});
|
|
63
|
+
await logger_1.Logger.debug(`Tool call result for ${name}:`, { result });
|
|
64
|
+
}
|
|
65
|
+
catch (error) {
|
|
66
|
+
await logger_1.Logger.error(`Error executing tool ${name}:`, error);
|
|
67
|
+
results.push({
|
|
68
|
+
role: 'tool',
|
|
69
|
+
tool_call_id: id,
|
|
70
|
+
content: `Error: ${error.message || 'Unknown error'}`
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
return results;
|
|
75
|
+
}
|
|
27
76
|
async start(prompt, previousResult) {
|
|
28
77
|
await logger_1.Logger.debug(`Agent ${this.name} starting with prompt: ${prompt}`);
|
|
29
78
|
try {
|
|
@@ -31,19 +80,61 @@ class Agent {
|
|
|
31
80
|
if (previousResult) {
|
|
32
81
|
prompt = prompt.replace('{{previous}}', previousResult);
|
|
33
82
|
}
|
|
34
|
-
|
|
35
|
-
|
|
83
|
+
// Initialize messages array
|
|
84
|
+
const messages = [
|
|
85
|
+
{ role: 'system', content: this.createSystemPrompt() },
|
|
86
|
+
{ role: 'user', content: prompt }
|
|
87
|
+
];
|
|
88
|
+
let finalResponse = '';
|
|
89
|
+
if (this.stream && !this.tools) {
|
|
90
|
+
// Use streaming without tools
|
|
36
91
|
let fullResponse = '';
|
|
37
92
|
await this.llmService.streamText(prompt, this.createSystemPrompt(), 0.7, (token) => {
|
|
38
93
|
process.stdout.write(token);
|
|
39
94
|
fullResponse += token;
|
|
40
95
|
});
|
|
41
|
-
|
|
96
|
+
finalResponse = fullResponse;
|
|
97
|
+
}
|
|
98
|
+
else if (this.tools) {
|
|
99
|
+
// Use tools (non-streaming for now to simplify implementation)
|
|
100
|
+
let continueConversation = true;
|
|
101
|
+
let iterations = 0;
|
|
102
|
+
const maxIterations = 5; // Prevent infinite loops
|
|
103
|
+
while (continueConversation && iterations < maxIterations) {
|
|
104
|
+
iterations++;
|
|
105
|
+
// Get response from LLM
|
|
106
|
+
const response = await this.llmService.generateChat(messages, 0.7, this.tools);
|
|
107
|
+
// Add assistant response to messages
|
|
108
|
+
messages.push({
|
|
109
|
+
role: 'assistant',
|
|
110
|
+
content: response.content || '',
|
|
111
|
+
tool_calls: response.tool_calls
|
|
112
|
+
});
|
|
113
|
+
// Check if there are tool calls to process
|
|
114
|
+
if (response.tool_calls && response.tool_calls.length > 0) {
|
|
115
|
+
// Process tool calls
|
|
116
|
+
const toolResults = await this.processToolCalls(response.tool_calls);
|
|
117
|
+
// Add tool results to messages
|
|
118
|
+
messages.push(...toolResults);
|
|
119
|
+
// Continue conversation to get final response
|
|
120
|
+
continueConversation = true;
|
|
121
|
+
}
|
|
122
|
+
else {
|
|
123
|
+
// No tool calls, we have our final response
|
|
124
|
+
finalResponse = response.content || '';
|
|
125
|
+
continueConversation = false;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
if (iterations >= maxIterations) {
|
|
129
|
+
await logger_1.Logger.warn(`Reached maximum iterations (${maxIterations}) for tool calls`);
|
|
130
|
+
}
|
|
42
131
|
}
|
|
43
132
|
else {
|
|
44
|
-
|
|
133
|
+
// Use regular text generation without streaming
|
|
134
|
+
const response = await this.llmService.generateText(prompt, this.createSystemPrompt());
|
|
135
|
+
finalResponse = response;
|
|
45
136
|
}
|
|
46
|
-
return
|
|
137
|
+
return finalResponse;
|
|
47
138
|
}
|
|
48
139
|
catch (error) {
|
|
49
140
|
await logger_1.Logger.error('Error in agent execution', error);
|
package/dist/llm/openai.d.ts
CHANGED
|
@@ -1,20 +1,38 @@
|
|
|
1
|
+
import type { ChatCompletionTool, ChatCompletionToolChoiceOption } from 'openai/resources/chat/completions';
|
|
1
2
|
export interface LLMResponse {
|
|
2
3
|
content: string;
|
|
3
4
|
role: string;
|
|
5
|
+
tool_calls?: Array<{
|
|
6
|
+
id: string;
|
|
7
|
+
type: string;
|
|
8
|
+
function: {
|
|
9
|
+
name: string;
|
|
10
|
+
arguments: string;
|
|
11
|
+
};
|
|
12
|
+
}>;
|
|
4
13
|
}
|
|
5
|
-
type ChatRole = 'system' | 'user' | 'assistant';
|
|
14
|
+
type ChatRole = 'system' | 'user' | 'assistant' | 'tool';
|
|
6
15
|
interface ChatMessage {
|
|
7
16
|
role: ChatRole;
|
|
8
|
-
content: string;
|
|
17
|
+
content: string | null;
|
|
18
|
+
tool_call_id?: string;
|
|
19
|
+
tool_calls?: Array<{
|
|
20
|
+
id: string;
|
|
21
|
+
type: string;
|
|
22
|
+
function: {
|
|
23
|
+
name: string;
|
|
24
|
+
arguments: string;
|
|
25
|
+
};
|
|
26
|
+
}>;
|
|
9
27
|
}
|
|
10
28
|
export declare class OpenAIService {
|
|
11
29
|
private model;
|
|
12
30
|
private client;
|
|
13
31
|
constructor(model?: string);
|
|
14
32
|
private getClient;
|
|
15
|
-
generateText(prompt: string, systemPrompt?: string, temperature?: number): Promise<string>;
|
|
16
|
-
generateChat(messages: ChatMessage[], temperature?: number): Promise<LLMResponse>;
|
|
17
|
-
streamText(prompt: string, systemPrompt: string | undefined, temperature: number | undefined, onToken: (token: string) => void): Promise<void>;
|
|
18
|
-
chatCompletion(messages: ChatMessage[], temperature?: number): Promise<LLMResponse>;
|
|
33
|
+
generateText(prompt: string, systemPrompt?: string, temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<string>;
|
|
34
|
+
generateChat(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
|
|
35
|
+
streamText(prompt: string, systemPrompt: string | undefined, temperature: number | undefined, onToken: (token: string) => void, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption, onToolCall?: (toolCall: any) => void): Promise<void>;
|
|
36
|
+
chatCompletion(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
|
|
19
37
|
}
|
|
20
38
|
export {};
|
package/dist/llm/openai.js
CHANGED
|
@@ -12,6 +12,46 @@ dotenv_1.default.config();
|
|
|
12
12
|
if (!process.env.OPENAI_API_KEY) {
|
|
13
13
|
throw new Error('OPENAI_API_KEY not found in environment variables');
|
|
14
14
|
}
|
|
15
|
+
// Convert our ChatMessage to OpenAI's ChatCompletionMessageParam
|
|
16
|
+
function convertToOpenAIMessage(message) {
|
|
17
|
+
// Basic conversion for common message types
|
|
18
|
+
if (message.role === 'system' || message.role === 'user' || message.role === 'assistant') {
|
|
19
|
+
return {
|
|
20
|
+
role: message.role,
|
|
21
|
+
content: message.content || '',
|
|
22
|
+
...(message.tool_calls ? { tool_calls: message.tool_calls } : {})
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
// Handle tool messages
|
|
26
|
+
if (message.role === 'tool') {
|
|
27
|
+
return {
|
|
28
|
+
role: 'tool',
|
|
29
|
+
content: message.content || '',
|
|
30
|
+
tool_call_id: message.tool_call_id || ''
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
// Default fallback
|
|
34
|
+
return {
|
|
35
|
+
role: 'user',
|
|
36
|
+
content: message.content || ''
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
// Convert custom tool format to OpenAI's ChatCompletionTool format
|
|
40
|
+
function convertToOpenAITool(tool) {
|
|
41
|
+
// If it's already in the correct format, return it
|
|
42
|
+
if (tool.type === 'function' && typeof tool.type === 'string') {
|
|
43
|
+
return tool;
|
|
44
|
+
}
|
|
45
|
+
// Otherwise, try to convert it
|
|
46
|
+
return {
|
|
47
|
+
type: 'function',
|
|
48
|
+
function: {
|
|
49
|
+
name: tool.function?.name || '',
|
|
50
|
+
description: tool.function?.description || '',
|
|
51
|
+
parameters: tool.function?.parameters || {}
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
}
|
|
15
55
|
// Singleton instance for OpenAI client
|
|
16
56
|
let openAIInstance = null;
|
|
17
57
|
// Get cached OpenAI client instance
|
|
@@ -37,7 +77,7 @@ class OpenAIService {
|
|
|
37
77
|
}
|
|
38
78
|
return this.client;
|
|
39
79
|
}
|
|
40
|
-
async generateText(prompt, systemPrompt = '', temperature = 0.7) {
|
|
80
|
+
async generateText(prompt, systemPrompt = '', temperature = 0.7, tools, tool_choice) {
|
|
41
81
|
await logger_1.Logger.startSpinner('Generating text with OpenAI...');
|
|
42
82
|
const messages = [];
|
|
43
83
|
if (systemPrompt) {
|
|
@@ -45,15 +85,31 @@ class OpenAIService {
|
|
|
45
85
|
}
|
|
46
86
|
messages.push({ role: 'user', content: prompt });
|
|
47
87
|
try {
|
|
88
|
+
// Convert messages to OpenAI format
|
|
89
|
+
const openAIMessages = messages.map(convertToOpenAIMessage);
|
|
90
|
+
// Convert tools to OpenAI format if provided
|
|
91
|
+
const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
|
|
48
92
|
const completion = await this.getClient().then(client => client.chat.completions.create({
|
|
49
93
|
model: this.model,
|
|
50
94
|
temperature,
|
|
51
|
-
messages
|
|
95
|
+
messages: openAIMessages,
|
|
96
|
+
tools: openAITools,
|
|
97
|
+
tool_choice
|
|
52
98
|
}));
|
|
53
|
-
const
|
|
54
|
-
if (!
|
|
99
|
+
const message = completion.choices[0]?.message;
|
|
100
|
+
if (!message) {
|
|
55
101
|
throw new Error('No response from OpenAI');
|
|
56
102
|
}
|
|
103
|
+
// Check for tool calls
|
|
104
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
105
|
+
await logger_1.Logger.debug('Tool calls detected in generateText', { tool_calls: message.tool_calls });
|
|
106
|
+
// For backward compatibility, we return a message about tool calls
|
|
107
|
+
return 'The model wants to use tools. Please use generateChat or chatCompletion instead.';
|
|
108
|
+
}
|
|
109
|
+
const response = message.content;
|
|
110
|
+
if (!response) {
|
|
111
|
+
throw new Error('No content in response from OpenAI');
|
|
112
|
+
}
|
|
57
113
|
await logger_1.Logger.stopSpinner(true);
|
|
58
114
|
await logger_1.Logger.section('Generated Response', response);
|
|
59
115
|
return response;
|
|
@@ -64,13 +120,19 @@ class OpenAIService {
|
|
|
64
120
|
throw error;
|
|
65
121
|
}
|
|
66
122
|
}
|
|
67
|
-
async generateChat(messages, temperature = 0.7) {
|
|
123
|
+
async generateChat(messages, temperature = 0.7, tools, tool_choice) {
|
|
68
124
|
await logger_1.Logger.startSpinner('Generating chat response...');
|
|
69
125
|
try {
|
|
126
|
+
// Convert messages to OpenAI format
|
|
127
|
+
const openAIMessages = messages.map(convertToOpenAIMessage);
|
|
128
|
+
// Convert tools to OpenAI format if provided
|
|
129
|
+
const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
|
|
70
130
|
const completion = await this.getClient().then(client => client.chat.completions.create({
|
|
71
131
|
model: this.model,
|
|
72
132
|
temperature,
|
|
73
|
-
messages
|
|
133
|
+
messages: openAIMessages,
|
|
134
|
+
tools: openAITools,
|
|
135
|
+
tool_choice
|
|
74
136
|
}));
|
|
75
137
|
const response = completion.choices[0]?.message;
|
|
76
138
|
if (!response) {
|
|
@@ -81,6 +143,11 @@ class OpenAIService {
|
|
|
81
143
|
content: response.content || '',
|
|
82
144
|
role: response.role
|
|
83
145
|
};
|
|
146
|
+
// Add tool calls if they exist
|
|
147
|
+
if (response.tool_calls && response.tool_calls.length > 0) {
|
|
148
|
+
result.tool_calls = response.tool_calls;
|
|
149
|
+
await logger_1.Logger.debug('Tool calls detected', { tool_calls: result.tool_calls });
|
|
150
|
+
}
|
|
84
151
|
await logger_1.Logger.section('Chat Response', result.content);
|
|
85
152
|
return result;
|
|
86
153
|
}
|
|
@@ -90,7 +157,7 @@ class OpenAIService {
|
|
|
90
157
|
throw error;
|
|
91
158
|
}
|
|
92
159
|
}
|
|
93
|
-
async streamText(prompt, systemPrompt = '', temperature = 0.7, onToken) {
|
|
160
|
+
async streamText(prompt, systemPrompt = '', temperature = 0.7, onToken, tools, tool_choice, onToolCall) {
|
|
94
161
|
await logger_1.Logger.debug('Starting text stream...', {
|
|
95
162
|
model: this.model,
|
|
96
163
|
temperature
|
|
@@ -101,17 +168,52 @@ class OpenAIService {
|
|
|
101
168
|
}
|
|
102
169
|
messages.push({ role: 'user', content: prompt });
|
|
103
170
|
try {
|
|
171
|
+
// Convert messages to OpenAI format
|
|
172
|
+
const openAIMessages = messages.map(convertToOpenAIMessage);
|
|
173
|
+
// Convert tools to OpenAI format if provided
|
|
174
|
+
const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
|
|
104
175
|
const stream = await this.getClient().then(client => client.chat.completions.create({
|
|
105
176
|
model: this.model,
|
|
106
177
|
temperature,
|
|
107
|
-
messages,
|
|
178
|
+
messages: openAIMessages,
|
|
108
179
|
stream: true,
|
|
180
|
+
tools: openAITools,
|
|
181
|
+
tool_choice
|
|
109
182
|
}));
|
|
110
183
|
let fullResponse = '';
|
|
184
|
+
const toolCalls = {};
|
|
111
185
|
for await (const chunk of stream) {
|
|
112
|
-
const
|
|
113
|
-
|
|
114
|
-
|
|
186
|
+
const delta = chunk.choices[0]?.delta;
|
|
187
|
+
// Handle content tokens
|
|
188
|
+
if (delta?.content) {
|
|
189
|
+
const token = delta.content;
|
|
190
|
+
fullResponse += token;
|
|
191
|
+
onToken(token);
|
|
192
|
+
}
|
|
193
|
+
// Handle tool calls
|
|
194
|
+
if (delta?.tool_calls && delta.tool_calls.length > 0) {
|
|
195
|
+
for (const toolCall of delta.tool_calls) {
|
|
196
|
+
const { index } = toolCall;
|
|
197
|
+
if (!toolCalls[index]) {
|
|
198
|
+
toolCalls[index] = {
|
|
199
|
+
id: toolCall.id,
|
|
200
|
+
type: toolCall.type,
|
|
201
|
+
function: {
|
|
202
|
+
name: toolCall.function?.name || '',
|
|
203
|
+
arguments: ''
|
|
204
|
+
}
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
// Accumulate function arguments
|
|
208
|
+
if (toolCall.function?.arguments) {
|
|
209
|
+
toolCalls[index].function.arguments += toolCall.function.arguments;
|
|
210
|
+
}
|
|
211
|
+
// Call the onToolCall callback if provided
|
|
212
|
+
if (onToolCall) {
|
|
213
|
+
onToolCall(toolCalls[index]);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
115
217
|
}
|
|
116
218
|
await logger_1.Logger.debug('Stream completed successfully');
|
|
117
219
|
}
|
|
@@ -120,18 +222,34 @@ class OpenAIService {
|
|
|
120
222
|
throw error;
|
|
121
223
|
}
|
|
122
224
|
}
|
|
123
|
-
async chatCompletion(messages, temperature = 0.7) {
|
|
225
|
+
async chatCompletion(messages, temperature = 0.7, tools, tool_choice) {
|
|
124
226
|
await logger_1.Logger.startSpinner('Chat completion with OpenAI...');
|
|
125
227
|
try {
|
|
228
|
+
// Convert messages to OpenAI format
|
|
229
|
+
const openAIMessages = messages.map(convertToOpenAIMessage);
|
|
230
|
+
// Convert tools to OpenAI format if provided
|
|
231
|
+
const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
|
|
126
232
|
const completion = await this.getClient().then(client => client.chat.completions.create({
|
|
127
233
|
model: this.model,
|
|
128
234
|
temperature,
|
|
129
|
-
messages
|
|
235
|
+
messages: openAIMessages,
|
|
236
|
+
tools: openAITools,
|
|
237
|
+
tool_choice
|
|
130
238
|
}));
|
|
239
|
+
// Safely access the message
|
|
240
|
+
if (!completion.choices || completion.choices.length === 0 || !completion.choices[0].message) {
|
|
241
|
+
throw new Error('No response from OpenAI');
|
|
242
|
+
}
|
|
243
|
+
const message = completion.choices[0].message;
|
|
131
244
|
const response = {
|
|
132
|
-
content:
|
|
133
|
-
role:
|
|
245
|
+
content: message.content || '',
|
|
246
|
+
role: message.role
|
|
134
247
|
};
|
|
248
|
+
// Add tool calls if they exist
|
|
249
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
250
|
+
response.tool_calls = message.tool_calls;
|
|
251
|
+
await logger_1.Logger.debug('Tool calls detected', { tool_calls: response.tool_calls });
|
|
252
|
+
}
|
|
135
253
|
await logger_1.Logger.stopSpinner(true);
|
|
136
254
|
await logger_1.Logger.section('Chat Completion Response', response.content);
|
|
137
255
|
return response;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "praisonai",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.17",
|
|
4
4
|
"description": "PraisonAI TypeScript AI Agents Framework - Node.js, npm, and Javascript AI Agents Framework",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -51,7 +51,7 @@
|
|
|
51
51
|
"eslint": "^9.19.0",
|
|
52
52
|
"jest": "^29.7.0",
|
|
53
53
|
"rimraf": "^5.0.5",
|
|
54
|
-
"ts-jest": "^29.
|
|
54
|
+
"ts-jest": "^29.2.5",
|
|
55
55
|
"ts-node": "^10.9.2",
|
|
56
56
|
"ts-node-dev": "^2.0.0",
|
|
57
57
|
"typescript": "^5.7.3"
|
|
@@ -60,7 +60,7 @@
|
|
|
60
60
|
"axios": "^1.7.9",
|
|
61
61
|
"dotenv": "^16.4.7",
|
|
62
62
|
"fast-xml-parser": "^4.5.1",
|
|
63
|
-
"node-fetch": "^
|
|
63
|
+
"node-fetch": "^2.6.9",
|
|
64
64
|
"openai": "^4.81.0",
|
|
65
65
|
"praisonai": "^1.0.12"
|
|
66
66
|
},
|
|
@@ -73,7 +73,7 @@
|
|
|
73
73
|
},
|
|
74
74
|
"overrides": {
|
|
75
75
|
"whatwg-url": "^14.1.0",
|
|
76
|
-
"node-fetch": "^
|
|
76
|
+
"node-fetch": "^2.6.9"
|
|
77
77
|
},
|
|
78
78
|
"engines": {
|
|
79
79
|
"node": ">=14.0.0"
|