praisonai 1.0.16 → 1.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -220,4 +220,30 @@ src/
220
220
 
221
221
  ## License
222
222
 
223
- MIT License - see the LICENSE file for details
223
+ MIT License - see the LICENSE file for details
224
+
225
+ ## Testing
226
+
227
+ ### Manual Testing
228
+
229
+ ```bash
230
+ export OPENAI_API_KEY='your-api-key'
231
+ npx ts-node tests/development/simple/single-agent.ts
232
+ npx ts-node tests/development/simple/multi-agent.ts
233
+ npx ts-node tests/development/simple/multi-agents-simple.js
234
+ ```
235
+
236
+ ## Examples Testing
237
+
238
+ ```bash
239
+ export OPENAI_API_KEY='your-api-key'
240
+ npx ts-node examples/simple/single-agent.ts
241
+ npx ts-node examples/simple/multi-agent.ts
242
+ ```
243
+
244
+ ### Automated Testing (WIP)
245
+
246
+ ```bash
247
+ npm run test
248
+ ```
249
+
@@ -1,5 +1,6 @@
1
1
  export declare function setTaskMode(enabled: boolean): void;
2
2
  export { Agent, PraisonAIAgents, Task } from './proxy';
3
+ export type { ProxyAgentConfig } from './proxy';
3
4
  export type { AgentConfig } from './types';
4
5
  export type { TaskConfig } from './types';
5
- export type { PraisonAIAgentsConfig } from './simple';
6
+ export type { PraisonAIAgentsConfig, SimpleAgentConfig } from './simple';
@@ -3,6 +3,8 @@ import { TaskAgentConfig } from './types';
3
3
  import { Task } from './types';
4
4
  export interface ProxyAgentConfig extends Partial<SimpleAgentConfig>, Partial<TaskAgentConfig> {
5
5
  task?: Task;
6
+ tools?: any[];
7
+ toolFunctions?: Record<string, Function>;
6
8
  }
7
9
  export declare class Agent {
8
10
  private simpleAgent;
@@ -28,10 +28,42 @@ class Agent {
28
28
  name: config.name,
29
29
  verbose: config.verbose,
30
30
  llm: config.llm,
31
- markdown: config.markdown
31
+ markdown: config.markdown,
32
+ tools: config.tools,
33
+ toolFunctions: config.toolFunctions
32
34
  };
33
35
  this.simpleAgent = new simple_1.Agent(simpleConfig);
34
36
  }
37
+ // Register tool functions if provided
38
+ if (config.tools && this.simpleAgent) {
39
+ // Look for tool functions in the global scope
40
+ for (const tool of config.tools) {
41
+ if (tool.type === 'function' && tool.function && tool.function.name) {
42
+ const funcName = tool.function.name;
43
+ // Check if function exists in global scope using a safer approach
44
+ const globalAny = global;
45
+ if (typeof globalAny[funcName] === 'function') {
46
+ this.simpleAgent.registerToolFunction(funcName, globalAny[funcName]);
47
+ }
48
+ else if (typeof globalAny['get_' + funcName] === 'function') {
49
+ // Try with 'get_' prefix (common convention)
50
+ this.simpleAgent.registerToolFunction(funcName, globalAny['get_' + funcName]);
51
+ }
52
+ else {
53
+ // Try to find the function in the global scope by iterating through all properties
54
+ for (const key in globalAny) {
55
+ if (key.toLowerCase() === funcName.toLowerCase() ||
56
+ key.toLowerCase() === 'get_' + funcName.toLowerCase()) {
57
+ if (typeof globalAny[key] === 'function') {
58
+ this.simpleAgent.registerToolFunction(funcName, globalAny[key]);
59
+ break;
60
+ }
61
+ }
62
+ }
63
+ }
64
+ }
65
+ }
66
+ }
35
67
  }
36
68
  getInstructions() {
37
69
  return this.instructions;
@@ -6,6 +6,8 @@ export interface SimpleAgentConfig {
6
6
  llm?: string;
7
7
  markdown?: boolean;
8
8
  stream?: boolean;
9
+ tools?: any[];
10
+ toolFunctions?: Record<string, Function>;
9
11
  }
10
12
  export declare class Agent {
11
13
  private instructions;
@@ -16,8 +18,34 @@ export declare class Agent {
16
18
  private markdown;
17
19
  private stream;
18
20
  private llmService;
21
+ private tools?;
22
+ private toolFunctions;
19
23
  constructor(config: SimpleAgentConfig);
20
24
  private createSystemPrompt;
25
+ /**
26
+ * Register a tool function that can be called by the model
27
+ * @param name Function name
28
+ * @param fn Function implementation
29
+ */
30
+ registerToolFunction(name: string, fn: Function): void;
31
+ /**
32
+ * Check if a tool definition exists for the given function name
33
+ * @param name Function name
34
+ * @returns True if a tool definition exists
35
+ */
36
+ private hasToolDefinition;
37
+ /**
38
+ * Auto-generate a tool definition based on the function
39
+ * @param name Function name
40
+ * @param func Function implementation
41
+ */
42
+ private addAutoGeneratedToolDefinition;
43
+ /**
44
+ * Process tool calls from the model
45
+ * @param toolCalls Tool calls from the model
46
+ * @returns Array of tool results
47
+ */
48
+ private processToolCalls;
21
49
  start(prompt: string, previousResult?: string): Promise<string>;
22
50
  chat(prompt: string, previousResult?: string): Promise<string>;
23
51
  execute(previousResult?: string): Promise<string>;
@@ -5,6 +5,7 @@ const openai_1 = require("../llm/openai");
5
5
  const logger_1 = require("../utils/logger");
6
6
  class Agent {
7
7
  constructor(config) {
8
+ this.toolFunctions = {};
8
9
  this.instructions = config.instructions;
9
10
  this.name = config.name || `Agent_${Math.random().toString(36).substr(2, 9)}`;
10
11
  this.verbose = config.verbose ?? process.env.PRAISON_VERBOSE !== 'false';
@@ -12,10 +13,21 @@ class Agent {
12
13
  this.llm = config.llm || 'gpt-4o-mini';
13
14
  this.markdown = config.markdown ?? true;
14
15
  this.stream = config.stream ?? true;
16
+ this.tools = config.tools;
15
17
  this.llmService = new openai_1.OpenAIService(this.llm);
16
18
  // Configure logging
17
19
  logger_1.Logger.setVerbose(this.verbose);
18
20
  logger_1.Logger.setPretty(this.pretty);
21
+ // Register directly provided tool functions if any
22
+ if (config.toolFunctions) {
23
+ for (const [name, func] of Object.entries(config.toolFunctions)) {
24
+ this.registerToolFunction(name, func);
25
+ // Auto-generate tool definition if not already provided
26
+ if (!this.hasToolDefinition(name)) {
27
+ this.addAutoGeneratedToolDefinition(name, func);
28
+ }
29
+ }
30
+ }
19
31
  }
20
32
  createSystemPrompt() {
21
33
  let prompt = this.instructions;
@@ -24,6 +36,115 @@ class Agent {
24
36
  }
25
37
  return prompt;
26
38
  }
39
+ /**
40
+ * Register a tool function that can be called by the model
41
+ * @param name Function name
42
+ * @param fn Function implementation
43
+ */
44
+ registerToolFunction(name, fn) {
45
+ this.toolFunctions[name] = fn;
46
+ logger_1.Logger.debug(`Registered tool function: ${name}`);
47
+ }
48
+ /**
49
+ * Check if a tool definition exists for the given function name
50
+ * @param name Function name
51
+ * @returns True if a tool definition exists
52
+ */
53
+ hasToolDefinition(name) {
54
+ if (!this.tools)
55
+ return false;
56
+ return this.tools.some(tool => {
57
+ if (tool.type === 'function' && tool.function) {
58
+ return tool.function.name === name;
59
+ }
60
+ return false;
61
+ });
62
+ }
63
+ /**
64
+ * Auto-generate a tool definition based on the function
65
+ * @param name Function name
66
+ * @param func Function implementation
67
+ */
68
+ addAutoGeneratedToolDefinition(name, func) {
69
+ if (!this.tools) {
70
+ this.tools = [];
71
+ }
72
+ // Extract parameter names from function
73
+ const funcStr = func.toString();
74
+ const paramMatch = funcStr.match(/\(([^)]*)\)/);
75
+ const params = paramMatch ? paramMatch[1].split(',').map(p => p.trim()).filter(p => p) : [];
76
+ // Create a basic tool definition
77
+ const toolDef = {
78
+ type: "function",
79
+ function: {
80
+ name,
81
+ description: `Auto-generated function for ${name}`,
82
+ parameters: {
83
+ type: "object",
84
+ properties: {},
85
+ required: []
86
+ }
87
+ }
88
+ };
89
+ // Add parameters to the definition
90
+ if (params.length > 0) {
91
+ const properties = {};
92
+ const required = [];
93
+ params.forEach(param => {
94
+ // Remove type annotations if present
95
+ const paramName = param.split(':')[0].trim();
96
+ if (paramName) {
97
+ properties[paramName] = {
98
+ type: "string",
99
+ description: `Parameter ${paramName} for function ${name}`
100
+ };
101
+ required.push(paramName);
102
+ }
103
+ });
104
+ toolDef.function.parameters.properties = properties;
105
+ toolDef.function.parameters.required = required;
106
+ }
107
+ this.tools.push(toolDef);
108
+ logger_1.Logger.debug(`Auto-generated tool definition for ${name}`);
109
+ }
110
+ /**
111
+ * Process tool calls from the model
112
+ * @param toolCalls Tool calls from the model
113
+ * @returns Array of tool results
114
+ */
115
+ async processToolCalls(toolCalls) {
116
+ const results = [];
117
+ for (const toolCall of toolCalls) {
118
+ const { id, function: { name, arguments: argsString } } = toolCall;
119
+ await logger_1.Logger.debug(`Processing tool call: ${name}`, { arguments: argsString });
120
+ try {
121
+ // Parse arguments
122
+ const args = JSON.parse(argsString);
123
+ // Check if function exists
124
+ if (!this.toolFunctions[name]) {
125
+ throw new Error(`Function ${name} not registered`);
126
+ }
127
+ // Call the function
128
+ const result = await this.toolFunctions[name](...Object.values(args));
129
+ // Add result to messages
130
+ results.push({
131
+ role: 'tool',
132
+ tool_call_id: id,
133
+ content: result.toString()
134
+ });
135
+ await logger_1.Logger.debug(`Tool call result for ${name}:`, { result });
136
+ }
137
+ catch (error) {
138
+ await logger_1.Logger.error(`Error executing tool ${name}:`, error);
139
+ results.push({
140
+ role: 'tool',
141
+ tool_call_id: id,
142
+ content: `Error: ${error.message || 'Unknown error'}`
143
+ });
144
+ }
145
+ }
146
+ return results;
147
+ }
27
148
  async start(prompt, previousResult) {
28
149
  await logger_1.Logger.debug(`Agent ${this.name} starting with prompt: ${prompt}`);
29
150
  try {
@@ -31,19 +152,61 @@ class Agent {
31
152
  if (previousResult) {
32
153
  prompt = prompt.replace('{{previous}}', previousResult);
33
154
  }
34
- let response;
35
- if (this.stream) {
155
+ // Initialize messages array
156
+ const messages = [
157
+ { role: 'system', content: this.createSystemPrompt() },
158
+ { role: 'user', content: prompt }
159
+ ];
160
+ let finalResponse = '';
161
+ if (this.stream && !this.tools) {
162
+ // Use streaming without tools
36
163
  let fullResponse = '';
37
164
  await this.llmService.streamText(prompt, this.createSystemPrompt(), 0.7, (token) => {
38
165
  process.stdout.write(token);
39
166
  fullResponse += token;
40
167
  });
41
- response = fullResponse;
168
+ finalResponse = fullResponse;
169
+ }
170
+ else if (this.tools) {
171
+ // Use tools (non-streaming for now to simplify implementation)
172
+ let continueConversation = true;
173
+ let iterations = 0;
174
+ const maxIterations = 5; // Prevent infinite loops
175
+ while (continueConversation && iterations < maxIterations) {
176
+ iterations++;
177
+ // Get response from LLM
178
+ const response = await this.llmService.generateChat(messages, 0.7, this.tools);
179
+ // Add assistant response to messages
180
+ messages.push({
181
+ role: 'assistant',
182
+ content: response.content || '',
183
+ tool_calls: response.tool_calls
184
+ });
185
+ // Check if there are tool calls to process
186
+ if (response.tool_calls && response.tool_calls.length > 0) {
187
+ // Process tool calls
188
+ const toolResults = await this.processToolCalls(response.tool_calls);
189
+ // Add tool results to messages
190
+ messages.push(...toolResults);
191
+ // Continue conversation to get final response
192
+ continueConversation = true;
193
+ }
194
+ else {
195
+ // No tool calls, we have our final response
196
+ finalResponse = response.content || '';
197
+ continueConversation = false;
198
+ }
199
+ }
200
+ if (iterations >= maxIterations) {
201
+ await logger_1.Logger.warn(`Reached maximum iterations (${maxIterations}) for tool calls`);
202
+ }
42
203
  }
43
204
  else {
44
- response = await this.llmService.generateText(prompt, this.createSystemPrompt());
205
+ // Use regular text generation without streaming
206
+ const response = await this.llmService.generateText(prompt, this.createSystemPrompt());
207
+ finalResponse = response;
45
208
  }
46
- return response;
209
+ return finalResponse;
47
210
  }
48
211
  catch (error) {
49
212
  await logger_1.Logger.error('Error in agent execution', error);
@@ -1,20 +1,38 @@
1
+ import type { ChatCompletionTool, ChatCompletionToolChoiceOption } from 'openai/resources/chat/completions';
1
2
  export interface LLMResponse {
2
3
  content: string;
3
4
  role: string;
5
+ tool_calls?: Array<{
6
+ id: string;
7
+ type: string;
8
+ function: {
9
+ name: string;
10
+ arguments: string;
11
+ };
12
+ }>;
4
13
  }
5
- type ChatRole = 'system' | 'user' | 'assistant';
14
+ type ChatRole = 'system' | 'user' | 'assistant' | 'tool';
6
15
  interface ChatMessage {
7
16
  role: ChatRole;
8
- content: string;
17
+ content: string | null;
18
+ tool_call_id?: string;
19
+ tool_calls?: Array<{
20
+ id: string;
21
+ type: string;
22
+ function: {
23
+ name: string;
24
+ arguments: string;
25
+ };
26
+ }>;
9
27
  }
10
28
  export declare class OpenAIService {
11
29
  private model;
12
30
  private client;
13
31
  constructor(model?: string);
14
32
  private getClient;
15
- generateText(prompt: string, systemPrompt?: string, temperature?: number): Promise<string>;
16
- generateChat(messages: ChatMessage[], temperature?: number): Promise<LLMResponse>;
17
- streamText(prompt: string, systemPrompt: string | undefined, temperature: number | undefined, onToken: (token: string) => void): Promise<void>;
18
- chatCompletion(messages: ChatMessage[], temperature?: number): Promise<LLMResponse>;
33
+ generateText(prompt: string, systemPrompt?: string, temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<string>;
34
+ generateChat(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
35
+ streamText(prompt: string, systemPrompt: string | undefined, temperature: number | undefined, onToken: (token: string) => void, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption, onToolCall?: (toolCall: any) => void): Promise<void>;
36
+ chatCompletion(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
19
37
  }
20
38
  export {};
@@ -12,6 +12,46 @@ dotenv_1.default.config();
12
12
  if (!process.env.OPENAI_API_KEY) {
13
13
  throw new Error('OPENAI_API_KEY not found in environment variables');
14
14
  }
15
+ // Convert our ChatMessage to OpenAI's ChatCompletionMessageParam
16
+ function convertToOpenAIMessage(message) {
17
+ // Basic conversion for common message types
18
+ if (message.role === 'system' || message.role === 'user' || message.role === 'assistant') {
19
+ return {
20
+ role: message.role,
21
+ content: message.content || '',
22
+ ...(message.tool_calls ? { tool_calls: message.tool_calls } : {})
23
+ };
24
+ }
25
+ // Handle tool messages
26
+ if (message.role === 'tool') {
27
+ return {
28
+ role: 'tool',
29
+ content: message.content || '',
30
+ tool_call_id: message.tool_call_id || ''
31
+ };
32
+ }
33
+ // Default fallback
34
+ return {
35
+ role: 'user',
36
+ content: message.content || ''
37
+ };
38
+ }
39
+ // Convert custom tool format to OpenAI's ChatCompletionTool format
40
+ function convertToOpenAITool(tool) {
41
+ // If it's already in the correct format, return it
42
+ if (tool.type === 'function' && typeof tool.type === 'string') {
43
+ return tool;
44
+ }
45
+ // Otherwise, try to convert it
46
+ return {
47
+ type: 'function',
48
+ function: {
49
+ name: tool.function?.name || '',
50
+ description: tool.function?.description || '',
51
+ parameters: tool.function?.parameters || {}
52
+ }
53
+ };
54
+ }
15
55
  // Singleton instance for OpenAI client
16
56
  let openAIInstance = null;
17
57
  // Get cached OpenAI client instance
@@ -37,7 +77,7 @@ class OpenAIService {
37
77
  }
38
78
  return this.client;
39
79
  }
40
- async generateText(prompt, systemPrompt = '', temperature = 0.7) {
80
+ async generateText(prompt, systemPrompt = '', temperature = 0.7, tools, tool_choice) {
41
81
  await logger_1.Logger.startSpinner('Generating text with OpenAI...');
42
82
  const messages = [];
43
83
  if (systemPrompt) {
@@ -45,15 +85,31 @@ class OpenAIService {
45
85
  }
46
86
  messages.push({ role: 'user', content: prompt });
47
87
  try {
88
+ // Convert messages to OpenAI format
89
+ const openAIMessages = messages.map(convertToOpenAIMessage);
90
+ // Convert tools to OpenAI format if provided
91
+ const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
48
92
  const completion = await this.getClient().then(client => client.chat.completions.create({
49
93
  model: this.model,
50
94
  temperature,
51
- messages
95
+ messages: openAIMessages,
96
+ tools: openAITools,
97
+ tool_choice
52
98
  }));
53
- const response = completion.choices[0]?.message?.content;
54
- if (!response) {
99
+ const message = completion.choices[0]?.message;
100
+ if (!message) {
55
101
  throw new Error('No response from OpenAI');
56
102
  }
103
+ // Check for tool calls
104
+ if (message.tool_calls && message.tool_calls.length > 0) {
105
+ await logger_1.Logger.debug('Tool calls detected in generateText', { tool_calls: message.tool_calls });
106
+ // For backward compatibility, we return a message about tool calls
107
+ return 'The model wants to use tools. Please use generateChat or chatCompletion instead.';
108
+ }
109
+ const response = message.content;
110
+ if (!response) {
111
+ throw new Error('No content in response from OpenAI');
112
+ }
57
113
  await logger_1.Logger.stopSpinner(true);
58
114
  await logger_1.Logger.section('Generated Response', response);
59
115
  return response;
@@ -64,13 +120,19 @@ class OpenAIService {
64
120
  throw error;
65
121
  }
66
122
  }
67
- async generateChat(messages, temperature = 0.7) {
123
+ async generateChat(messages, temperature = 0.7, tools, tool_choice) {
68
124
  await logger_1.Logger.startSpinner('Generating chat response...');
69
125
  try {
126
+ // Convert messages to OpenAI format
127
+ const openAIMessages = messages.map(convertToOpenAIMessage);
128
+ // Convert tools to OpenAI format if provided
129
+ const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
70
130
  const completion = await this.getClient().then(client => client.chat.completions.create({
71
131
  model: this.model,
72
132
  temperature,
73
- messages
133
+ messages: openAIMessages,
134
+ tools: openAITools,
135
+ tool_choice
74
136
  }));
75
137
  const response = completion.choices[0]?.message;
76
138
  if (!response) {
@@ -81,6 +143,11 @@ class OpenAIService {
81
143
  content: response.content || '',
82
144
  role: response.role
83
145
  };
146
+ // Add tool calls if they exist
147
+ if (response.tool_calls && response.tool_calls.length > 0) {
148
+ result.tool_calls = response.tool_calls;
149
+ await logger_1.Logger.debug('Tool calls detected', { tool_calls: result.tool_calls });
150
+ }
84
151
  await logger_1.Logger.section('Chat Response', result.content);
85
152
  return result;
86
153
  }
@@ -90,7 +157,7 @@ class OpenAIService {
90
157
  throw error;
91
158
  }
92
159
  }
93
- async streamText(prompt, systemPrompt = '', temperature = 0.7, onToken) {
160
+ async streamText(prompt, systemPrompt = '', temperature = 0.7, onToken, tools, tool_choice, onToolCall) {
94
161
  await logger_1.Logger.debug('Starting text stream...', {
95
162
  model: this.model,
96
163
  temperature
@@ -101,17 +168,52 @@ class OpenAIService {
101
168
  }
102
169
  messages.push({ role: 'user', content: prompt });
103
170
  try {
171
+ // Convert messages to OpenAI format
172
+ const openAIMessages = messages.map(convertToOpenAIMessage);
173
+ // Convert tools to OpenAI format if provided
174
+ const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
104
175
  const stream = await this.getClient().then(client => client.chat.completions.create({
105
176
  model: this.model,
106
177
  temperature,
107
- messages,
178
+ messages: openAIMessages,
108
179
  stream: true,
180
+ tools: openAITools,
181
+ tool_choice
109
182
  }));
110
183
  let fullResponse = '';
184
+ const toolCalls = {};
111
185
  for await (const chunk of stream) {
112
- const token = chunk.choices[0]?.delta?.content || '';
113
- fullResponse += token;
114
- onToken(token);
186
+ const delta = chunk.choices[0]?.delta;
187
+ // Handle content tokens
188
+ if (delta?.content) {
189
+ const token = delta.content;
190
+ fullResponse += token;
191
+ onToken(token);
192
+ }
193
+ // Handle tool calls
194
+ if (delta?.tool_calls && delta.tool_calls.length > 0) {
195
+ for (const toolCall of delta.tool_calls) {
196
+ const { index } = toolCall;
197
+ if (!toolCalls[index]) {
198
+ toolCalls[index] = {
199
+ id: toolCall.id,
200
+ type: toolCall.type,
201
+ function: {
202
+ name: toolCall.function?.name || '',
203
+ arguments: ''
204
+ }
205
+ };
206
+ }
207
+ // Accumulate function arguments
208
+ if (toolCall.function?.arguments) {
209
+ toolCalls[index].function.arguments += toolCall.function.arguments;
210
+ }
211
+ // Call the onToolCall callback if provided
212
+ if (onToolCall) {
213
+ onToolCall(toolCalls[index]);
214
+ }
215
+ }
216
+ }
115
217
  }
116
218
  await logger_1.Logger.debug('Stream completed successfully');
117
219
  }
@@ -120,18 +222,34 @@ class OpenAIService {
120
222
  throw error;
121
223
  }
122
224
  }
123
- async chatCompletion(messages, temperature = 0.7) {
225
+ async chatCompletion(messages, temperature = 0.7, tools, tool_choice) {
124
226
  await logger_1.Logger.startSpinner('Chat completion with OpenAI...');
125
227
  try {
228
+ // Convert messages to OpenAI format
229
+ const openAIMessages = messages.map(convertToOpenAIMessage);
230
+ // Convert tools to OpenAI format if provided
231
+ const openAITools = tools ? tools.map(convertToOpenAITool) : undefined;
126
232
  const completion = await this.getClient().then(client => client.chat.completions.create({
127
233
  model: this.model,
128
234
  temperature,
129
- messages
235
+ messages: openAIMessages,
236
+ tools: openAITools,
237
+ tool_choice
130
238
  }));
239
+ // Safely access the message
240
+ if (!completion.choices || completion.choices.length === 0 || !completion.choices[0].message) {
241
+ throw new Error('No response from OpenAI');
242
+ }
243
+ const message = completion.choices[0].message;
131
244
  const response = {
132
- content: completion.choices[0].message.content || '',
133
- role: completion.choices[0].message.role
245
+ content: message.content || '',
246
+ role: message.role
134
247
  };
248
+ // Add tool calls if they exist
249
+ if (message.tool_calls && message.tool_calls.length > 0) {
250
+ response.tool_calls = message.tool_calls;
251
+ await logger_1.Logger.debug('Tool calls detected', { tool_calls: response.tool_calls });
252
+ }
135
253
  await logger_1.Logger.stopSpinner(true);
136
254
  await logger_1.Logger.section('Chat Completion Response', response.content);
137
255
  return response;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "praisonai",
3
- "version": "1.0.16",
3
+ "version": "1.0.18",
4
4
  "description": "PraisonAI TypeScript AI Agents Framework - Node.js, npm, and Javascript AI Agents Framework",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -62,7 +62,7 @@
62
62
  "fast-xml-parser": "^4.5.1",
63
63
  "node-fetch": "^2.6.9",
64
64
  "openai": "^4.81.0",
65
- "praisonai": "^1.0.12"
65
+ "praisonai": "^1.0.17"
66
66
  },
67
67
  "optionalDependencies": {
68
68
  "boxen": "^7.1.1",