praisonai 1.3.1 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/simple.js +12 -8
- package/dist/llm/openai.d.ts +1 -0
- package/dist/llm/openai.js +30 -0
- package/package.json +1 -1
package/dist/agent/simple.js
CHANGED
|
@@ -277,20 +277,24 @@ class Agent {
|
|
|
277
277
|
if (previousResult) {
|
|
278
278
|
prompt = prompt.replace('{{previous}}', previousResult);
|
|
279
279
|
}
|
|
280
|
-
// Initialize messages array
|
|
280
|
+
// Initialize messages array with system prompt and conversation history
|
|
281
281
|
const messages = [
|
|
282
|
-
{ role: 'system', content: this.createSystemPrompt() }
|
|
283
|
-
{ role: 'user', content: prompt }
|
|
282
|
+
{ role: 'system', content: this.createSystemPrompt() }
|
|
284
283
|
];
|
|
284
|
+
// Add conversation history (excluding the current prompt which will be added below)
|
|
285
|
+
for (const msg of this.messages) {
|
|
286
|
+
if (msg.role && msg.content) {
|
|
287
|
+
messages.push({ role: msg.role, content: msg.content });
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
// Add current user prompt
|
|
291
|
+
messages.push({ role: 'user', content: prompt });
|
|
285
292
|
let finalResponse = '';
|
|
286
293
|
if (this.stream && !this.tools) {
|
|
287
|
-
// Use streaming
|
|
288
|
-
|
|
289
|
-
await this.llmService.streamText(prompt, this.createSystemPrompt(), 0.7, (token) => {
|
|
294
|
+
// Use streaming with full conversation history
|
|
295
|
+
finalResponse = await this.llmService.streamChat(messages, 0.7, (token) => {
|
|
290
296
|
process.stdout.write(token);
|
|
291
|
-
fullResponse += token;
|
|
292
297
|
});
|
|
293
|
-
finalResponse = fullResponse;
|
|
294
298
|
}
|
|
295
299
|
else if (this.tools) {
|
|
296
300
|
// Use tools (non-streaming for now to simplify implementation)
|
package/dist/llm/openai.d.ts
CHANGED
|
@@ -33,6 +33,7 @@ export declare class OpenAIService {
|
|
|
33
33
|
generateText(prompt: string, systemPrompt?: string, temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<string>;
|
|
34
34
|
generateChat(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
|
|
35
35
|
streamText(prompt: string, systemPrompt: string | undefined, temperature: number | undefined, onToken: (token: string) => void, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption, onToolCall?: (toolCall: any) => void): Promise<void>;
|
|
36
|
+
streamChat(messages: ChatMessage[], temperature: number | undefined, onToken: (token: string) => void): Promise<string>;
|
|
36
37
|
chatCompletion(messages: ChatMessage[], temperature?: number, tools?: ChatCompletionTool[], tool_choice?: ChatCompletionToolChoiceOption): Promise<LLMResponse>;
|
|
37
38
|
}
|
|
38
39
|
export {};
|
package/dist/llm/openai.js
CHANGED
|
@@ -230,6 +230,36 @@ class OpenAIService {
|
|
|
230
230
|
throw error;
|
|
231
231
|
}
|
|
232
232
|
}
|
|
233
|
+
async streamChat(messages, temperature = 0.7, onToken) {
|
|
234
|
+
await logger_1.Logger.debug('Starting chat stream with messages...', {
|
|
235
|
+
model: this.model,
|
|
236
|
+
messageCount: messages.length
|
|
237
|
+
});
|
|
238
|
+
try {
|
|
239
|
+
const openAIMessages = messages.map(convertToOpenAIMessage);
|
|
240
|
+
const stream = await this.getClient().then(client => client.chat.completions.create({
|
|
241
|
+
model: this.model,
|
|
242
|
+
temperature,
|
|
243
|
+
messages: openAIMessages,
|
|
244
|
+
stream: true
|
|
245
|
+
}));
|
|
246
|
+
let fullResponse = '';
|
|
247
|
+
for await (const chunk of stream) {
|
|
248
|
+
const delta = chunk.choices[0]?.delta;
|
|
249
|
+
if (delta?.content) {
|
|
250
|
+
const token = delta.content;
|
|
251
|
+
fullResponse += token;
|
|
252
|
+
onToken(token);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
await logger_1.Logger.debug('Chat stream completed');
|
|
256
|
+
return fullResponse;
|
|
257
|
+
}
|
|
258
|
+
catch (error) {
|
|
259
|
+
await logger_1.Logger.error('Error in chat stream', error);
|
|
260
|
+
throw error;
|
|
261
|
+
}
|
|
262
|
+
}
|
|
233
263
|
async chatCompletion(messages, temperature = 0.7, tools, tool_choice) {
|
|
234
264
|
await logger_1.Logger.startSpinner('Chat completion with OpenAI...');
|
|
235
265
|
try {
|