@proteinjs/conversation 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/LICENSE +21 -0
- package/dist/index.js +27 -0
- package/dist/jest.config.js +10 -0
- package/dist/src/CodegenConversation.js +120 -0
- package/dist/src/Conversation.js +193 -0
- package/dist/src/ConversationModule.js +2 -0
- package/dist/src/Function.js +2 -0
- package/dist/src/OpenAi.js +209 -0
- package/dist/src/Paragraph.js +18 -0
- package/dist/src/Sentence.js +22 -0
- package/dist/src/code_template/Code.js +41 -0
- package/dist/src/code_template/CodeTemplate.js +39 -0
- package/dist/src/code_template/CodeTemplateModule.js +46 -0
- package/dist/src/code_template/Repo.js +127 -0
- package/dist/src/fs/conversation_fs/ConversationFsModerator.js +99 -0
- package/dist/src/fs/conversation_fs/ConversationFsModule.js +68 -0
- package/dist/src/fs/conversation_fs/FsFunctions.js +256 -0
- package/dist/src/fs/git/GitModule.js +45 -0
- package/dist/src/fs/keyword_to_files_index/KeywordToFilesIndexFunctions.js +65 -0
- package/dist/src/fs/keyword_to_files_index/KeywordToFilesIndexModule.js +89 -0
- package/dist/src/fs/package/PackageFunctions.js +214 -0
- package/dist/src/fs/package/PackageModule.js +102 -0
- package/dist/src/history/MessageHistory.js +44 -0
- package/dist/src/history/MessageModerator.js +2 -0
- package/dist/src/template/ConversationTemplate.js +2 -0
- package/dist/src/template/ConversationTemplateFunctions.js +54 -0
- package/dist/src/template/ConversationTemplateModule.js +80 -0
- package/dist/src/template/createApp/CreateAppTemplate.js +40 -0
- package/dist/src/template/createCode/CreateCodeConversationTemplate.js +51 -0
- package/dist/src/template/createPackage/CreatePackageConversationTemplate.js +54 -0
- package/dist/src/template/createPackage/jest.config.js +10 -0
- package/dist/src/template/createPackage/tsconfig.json +13 -0
- package/dist/test/createKeywordFilesIndex.test.js +17 -0
- package/dist/test/openai/openai.generateList.test.js +16 -0
- package/dist/test/openai/openai.parseCodeFromMarkdown.test.js +18 -0
- package/dist/test/repo/repo.test.js +29 -0
- package/dist/test/setup.js +1 -0
- package/index.ts +11 -0
- package/jest.config.js +9 -0
- package/package.json +34 -0
- package/src/CodegenConversation.ts +92 -0
- package/src/Conversation.ts +207 -0
- package/src/ConversationModule.ts +13 -0
- package/src/Function.ts +8 -0
- package/src/OpenAi.ts +212 -0
- package/src/Paragraph.ts +17 -0
- package/src/Sentence.ts +20 -0
- package/src/code_template/Code.ts +53 -0
- package/src/code_template/CodeTemplate.ts +39 -0
- package/src/code_template/CodeTemplateModule.ts +50 -0
- package/src/code_template/Repo.ts +156 -0
- package/src/fs/conversation_fs/ConversationFsModerator.ts +121 -0
- package/src/fs/conversation_fs/ConversationFsModule.ts +64 -0
- package/src/fs/conversation_fs/FsFunctions.ts +253 -0
- package/src/fs/git/GitModule.ts +39 -0
- package/src/fs/keyword_to_files_index/KeywordToFilesIndexFunctions.ts +55 -0
- package/src/fs/keyword_to_files_index/KeywordToFilesIndexModule.ts +90 -0
- package/src/fs/package/PackageFunctions.ts +210 -0
- package/src/fs/package/PackageModule.ts +106 -0
- package/src/history/MessageHistory.ts +57 -0
- package/src/history/MessageModerator.ts +6 -0
- package/src/template/ConversationTemplate.ts +12 -0
- package/src/template/ConversationTemplateFunctions.ts +43 -0
- package/src/template/ConversationTemplateModule.ts +83 -0
- package/src/template/createApp/CreateAppTemplate.ts +33 -0
- package/src/template/createCode/CreateCodeConversationTemplate.ts +41 -0
- package/src/template/createPackage/CreatePackageConversationTemplate.ts +42 -0
- package/src/template/createPackage/jest.config.js +9 -0
- package/src/template/createPackage/tsconfig.json +13 -0
- package/test/createKeywordFilesIndex.test.ts +7 -0
- package/test/openai/openai.generateList.test.ts +6 -0
- package/test/openai/openai.parseCodeFromMarkdown.test.ts +20 -0
- package/test/repo/repo.test.ts +33 -0
- package/test/setup.js +0 -0
- package/tsconfig.json +109 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import * as readline from 'readline-sync';
|
|
2
|
+
import { Conversation } from './Conversation';
|
|
3
|
+
import { KeywordToFilesIndexModuleFactory } from './fs/keyword_to_files_index/KeywordToFilesIndexModule';
|
|
4
|
+
import { ConversationTemplateModuleFactory } from './template/ConversationTemplateModule';
|
|
5
|
+
import { ConversationFsModuleFactory } from './fs/conversation_fs/ConversationFsModule';
|
|
6
|
+
import { PackageModuleFactory } from './fs/package/PackageModule';
|
|
7
|
+
import { ConversationModule, ConversationModuleFactory } from './ConversationModule';
|
|
8
|
+
import { Reset, textColorMap } from '@proteinjs/util';
|
|
9
|
+
import { GitModuleFactory } from './fs/git/GitModule';
|
|
10
|
+
import { TiktokenModel } from 'tiktoken';
|
|
11
|
+
import { searchLibrariesFunctionName, searchPackagesFunctionName } from './fs/package/PackageFunctions';
|
|
12
|
+
import { getRecentlyAccessedFilePathsFunctionName, readFilesFunctionName } from './fs/conversation_fs/FsFunctions';
|
|
13
|
+
import { searchFilesFunctionName } from './fs/keyword_to_files_index/KeywordToFilesIndexFunctions';
|
|
14
|
+
|
|
15
|
+
export class CodegenConversation {
|
|
16
|
+
private static INITIAL_QUESTION = 'What would you like to create?';
|
|
17
|
+
private static BOT_NAME = 'Alina';
|
|
18
|
+
private static MODEL: TiktokenModel = 'gpt-4'; //'gpt-3.5-turbo-16k';
|
|
19
|
+
private repoPath: string;
|
|
20
|
+
|
|
21
|
+
constructor(repoPath: string) {
|
|
22
|
+
this.repoPath = repoPath;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
async start() {
|
|
26
|
+
const conversation = await this.createConversation();
|
|
27
|
+
conversation.addAssistantMessagesToHistory([CodegenConversation.INITIAL_QUESTION]);
|
|
28
|
+
const initialUserInput = this.respondToUser(CodegenConversation.INITIAL_QUESTION);
|
|
29
|
+
let response = await conversation.generateResponse([initialUserInput], CodegenConversation.MODEL);
|
|
30
|
+
while (true) {
|
|
31
|
+
const userInput = this.respondToUser(response);
|
|
32
|
+
response = await conversation.generateResponse([userInput], CodegenConversation.MODEL);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
private async createConversation() {
|
|
37
|
+
const conversation = new Conversation({
|
|
38
|
+
name: this.constructor.name,
|
|
39
|
+
modules: await this.getModules(),
|
|
40
|
+
logLevel: 'info',
|
|
41
|
+
});
|
|
42
|
+
conversation.addSystemMessagesToHistory(this.getSystemMessages());
|
|
43
|
+
return conversation;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
private async getModules(): Promise<ConversationModule[]> {
|
|
47
|
+
const moduleFactories: ConversationModuleFactory[] = [
|
|
48
|
+
new ConversationFsModuleFactory(),
|
|
49
|
+
new KeywordToFilesIndexModuleFactory(),
|
|
50
|
+
new PackageModuleFactory(),
|
|
51
|
+
new ConversationTemplateModuleFactory(),
|
|
52
|
+
new GitModuleFactory(),
|
|
53
|
+
];
|
|
54
|
+
const modules: ConversationModule[] = [];
|
|
55
|
+
for (let moduleFactory of moduleFactories)
|
|
56
|
+
modules.push(await moduleFactory.createModule(this.repoPath));
|
|
57
|
+
|
|
58
|
+
return modules;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
private getSystemMessages() {
|
|
62
|
+
return [
|
|
63
|
+
`We are going to have a conversation with the user to generate code`,
|
|
64
|
+
`Await all function calls that return a promise`,
|
|
65
|
+
`Try to repspond to the user with as few words as possible while still having a conversational tone`,
|
|
66
|
+
`When generating code, export the objects you create inline; do not use 'export default' syntax`,
|
|
67
|
+
// `After finding a file to work with, assume the user's following question pertains to that file and use ${readFilesFunctionName} to read the file if needed`,
|
|
68
|
+
// `If a conversation summary exists, if you aren't already working with a file, use the most relevant keyword mentioned in the conversation summary to find a file to read (using the ${searchFilesFunctionName} function) and then respond to the user after reading the file`,
|
|
69
|
+
// `Use the most relevant keyword mentioned in the conversation summary to find a file to read (using the ${searchFilesFunctionName} function) and then respond to the user after reading the file`,
|
|
70
|
+
// `If the conversation summary indicates the user was working with a file get the file path (use the ${searchFilesFunctionName} function if needed) and read the file with the ${readFilesFunctionName} function. Use that file as context to respond to the user.`,
|
|
71
|
+
//
|
|
72
|
+
// `Use the conversation summary to identify a file as context for the user interaction`,
|
|
73
|
+
// `Use the ${searchFilesFunctionName} function to find the file if needed; read the file if needed`,
|
|
74
|
+
// `If the user is referring to a function, object, class, or type and you don't have the relevant file content, first inspect the conversation summary in the chat history (if it exists) to find a file name, and call the ${searchFilesFunctionName} function and read the file before responding to the user`,
|
|
75
|
+
// `Before calling ${searchFilesFunctionName}, ${searchLibrariesFunctionName} or ${searchPackagesFunctionName}, use the conversation summary in the chat history to identify a file or keyword to search for instead; after reading that file, respond to the user's request`,
|
|
76
|
+
//
|
|
77
|
+
// `Use the ${getRecentlyAccessedFilePathsFunctionName} function find a file that might pertain to the user's request before searching files, libraries or packages; read that file then respond to the user`,
|
|
78
|
+
// `When trying to locate code, use the ${getRecentlyAccessedFilePathsFunctionName} function to search recently accessed files first, then proceed to calling other functions: ${searchLibrariesFunctionName}, ${searchPackagesFunctionName}, ${searchFilesFunctionName}`,
|
|
79
|
+
// `The conversation summary indicates files recently worked in as well`,
|
|
80
|
+
// `If that doesn't yield results, proceed to calling the ${searchLibrariesFunctionName} function, then fall back to functions: ${searchPackagesFunctionName}, ${searchFilesFunctionName}`,
|
|
81
|
+
//
|
|
82
|
+
// `To find code, a file, or a library, call ${getRecentlyAccessedFilePathsFunctionName} and read the most recent file, after trying that call ${searchLibrariesFunctionName} then ${searchFilesFunctionName} to find a relevant file`,
|
|
83
|
+
// `The file mentioned in the conversation summary should be read if we're not already working in a file`,
|
|
84
|
+
// `If there is a conversation summary assistant message, use that to pick a file to read before responding to the user if not already working with a specific file`,
|
|
85
|
+
// `Check for a previous conversation summary assistant message in the chat history; if there is one and it mentions a file the user was working with, call ${searchLibrariesFunctionName} to find the file path then call ${readFilesFunctionName} to read the file. Do this to build context before responding to the user`,
|
|
86
|
+
];
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
private respondToUser(message: string) {
|
|
90
|
+
return readline.question(`${textColorMap.cyan}[${CodegenConversation.BOT_NAME}] ${message}${Reset}\n`);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
import { ChatCompletionMessageParam } from 'openai/resources/chat';
|
|
2
|
+
import { DEFAULT_MODEL, OpenAi } from './OpenAi';
|
|
3
|
+
import { MessageHistory } from './history/MessageHistory';
|
|
4
|
+
import { Function } from './Function';
|
|
5
|
+
import { Logger, LogLevel } from '@proteinjs/util';
|
|
6
|
+
import { Fs } from '@proteinjs/util-node';
|
|
7
|
+
import { MessageModerator } from './history/MessageModerator';
|
|
8
|
+
import { ConversationModule } from './ConversationModule';
|
|
9
|
+
import { TiktokenModel, encoding_for_model } from 'tiktoken';
|
|
10
|
+
import { searchLibrariesFunctionName } from './fs/package/PackageFunctions';
|
|
11
|
+
|
|
12
|
+
export type ConversationParams = {
|
|
13
|
+
name: string,
|
|
14
|
+
modules?: ConversationModule[];
|
|
15
|
+
logLevel?: LogLevel;
|
|
16
|
+
limits?: {
|
|
17
|
+
enforceLimits?: boolean;
|
|
18
|
+
maxMessagesInHistory?: number;
|
|
19
|
+
tokenLimit?: number;
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export class Conversation {
|
|
24
|
+
private tokenLimit = 3000;
|
|
25
|
+
private history;
|
|
26
|
+
private systemMessages: ChatCompletionMessageParam[] = [];
|
|
27
|
+
private functions: Function[] = [];
|
|
28
|
+
private messageModerators: MessageModerator[] = [];
|
|
29
|
+
private generatedCode = false;
|
|
30
|
+
private generatedList = false;
|
|
31
|
+
private logger: Logger;
|
|
32
|
+
private params: ConversationParams;
|
|
33
|
+
|
|
34
|
+
constructor(params: ConversationParams) {
|
|
35
|
+
this.params = params;
|
|
36
|
+
this.history = new MessageHistory({ maxMessages: params.limits?.maxMessagesInHistory, enforceMessageLimit: params.limits?.enforceLimits });
|
|
37
|
+
this.logger = new Logger(params.name, params.logLevel);
|
|
38
|
+
|
|
39
|
+
if (params.modules)
|
|
40
|
+
this.addModules(params.modules);
|
|
41
|
+
|
|
42
|
+
if (typeof params.limits?.enforceLimits === 'undefined' || params.limits.enforceLimits) {
|
|
43
|
+
this.addFunctions('Conversation', [
|
|
44
|
+
summarizeConversationHistoryFunction(this),
|
|
45
|
+
]);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (params.limits?.tokenLimit)
|
|
49
|
+
this.tokenLimit = params.limits.tokenLimit;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
private addModules(modules: ConversationModule[]) {
|
|
53
|
+
for (let module of modules) {
|
|
54
|
+
if (module.getSystemMessages().length < 1)
|
|
55
|
+
continue;
|
|
56
|
+
|
|
57
|
+
this.addSystemMessagesToHistory([
|
|
58
|
+
`The following are instructions from the ${module.getName()} module: ${module.getSystemMessages().join('. ')}`,
|
|
59
|
+
]);
|
|
60
|
+
this.addFunctions(module.getName(), module.getFunctions());
|
|
61
|
+
this.addMessageModerators(module.getMessageModerators());
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
private addFunctions(moduleName: string, functions: Function[]) {
|
|
66
|
+
this.functions.push(...functions);
|
|
67
|
+
let functionInstructions = `The following are instructions from functions in the ${moduleName} module:`;
|
|
68
|
+
let functionInstructionsAdded = false;
|
|
69
|
+
for (let f of functions) {
|
|
70
|
+
if (f.instructions) {
|
|
71
|
+
if (!f.instructions || f.instructions.length < 1)
|
|
72
|
+
continue;
|
|
73
|
+
|
|
74
|
+
functionInstructionsAdded = true;
|
|
75
|
+
const instructionsParagraph = f.instructions.join('. ');
|
|
76
|
+
functionInstructions += ` ${f.definition.name}: ${instructionsParagraph}.`;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if (!functionInstructionsAdded)
|
|
81
|
+
return;
|
|
82
|
+
|
|
83
|
+
this.addSystemMessagesToHistory([functionInstructions]);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private addMessageModerators(messageModerators: MessageModerator[]) {
|
|
87
|
+
this.messageModerators.push(...messageModerators);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
private async enforceTokenLimit(messages: string[], model?: TiktokenModel) {
|
|
91
|
+
if (this.params.limits?.enforceLimits === false)
|
|
92
|
+
return;
|
|
93
|
+
|
|
94
|
+
const resolvedModel = model ? model : DEFAULT_MODEL;
|
|
95
|
+
const encoder = encoding_for_model(resolvedModel);
|
|
96
|
+
const conversation = this.history.toString() + messages.join('. ');
|
|
97
|
+
const encoded = encoder.encode(conversation);
|
|
98
|
+
console.log(`current tokens: ${encoded.length}`);
|
|
99
|
+
if (encoded.length < this.tokenLimit)
|
|
100
|
+
return;
|
|
101
|
+
|
|
102
|
+
const summarizeConversationRequest = `First, call the ${summarizeConversationHistoryFunctionName} function`;
|
|
103
|
+
await OpenAi.generateResponse([summarizeConversationRequest], model, this.history, this.functions, this.messageModerators, this.params.logLevel);
|
|
104
|
+
const referenceSummaryRequest = `If there's a file mentioned in the conversation summary, find and read the file to better respond to my next request. If that doesn't find anything, call the ${searchLibrariesFunctionName} function on other keywords in the conversation summary to find a file to read`;
|
|
105
|
+
await OpenAi.generateResponse([referenceSummaryRequest], model, this.history, this.functions, this.messageModerators, this.params.logLevel);
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
summarizeConversationHistory(summary: string) {
|
|
109
|
+
this.clearHistory();
|
|
110
|
+
this.history.push([{ role: 'assistant', content: `Previous conversation summary: ${summary}` }]);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
private clearHistory() {
|
|
114
|
+
this.history = new MessageHistory();
|
|
115
|
+
this.history.push(this.systemMessages);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
addSystemMessagesToHistory(messages: string[], unshift = false) {
|
|
119
|
+
const chatCompletions: ChatCompletionMessageParam[] = messages.map(message => { return { role: 'system', content: message }});
|
|
120
|
+
if (unshift) {
|
|
121
|
+
this.history.getMessages().unshift(...chatCompletions);
|
|
122
|
+
this.history.prune();
|
|
123
|
+
this.systemMessages.unshift(...chatCompletions);
|
|
124
|
+
} else {
|
|
125
|
+
this.history.push(chatCompletions);
|
|
126
|
+
this.systemMessages.push(...chatCompletions);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
addAssistantMessagesToHistory(messages: string[], unshift = false) {
|
|
131
|
+
const chatCompletions: ChatCompletionMessageParam[] = messages.map(message => { return { role: 'assistant', content: message }});
|
|
132
|
+
if (unshift) {
|
|
133
|
+
this.history.getMessages().unshift(...chatCompletions);
|
|
134
|
+
this.history.prune();
|
|
135
|
+
} else
|
|
136
|
+
this.history.push(chatCompletions);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
addUserMessagesToHistory(messages: string[], unshift = false) {
|
|
140
|
+
const chatCompletions: ChatCompletionMessageParam[] = messages.map(message => { return { role: 'user', content: message }});
|
|
141
|
+
if (unshift) {
|
|
142
|
+
this.history.getMessages().unshift(...chatCompletions);
|
|
143
|
+
this.history.prune();
|
|
144
|
+
} else
|
|
145
|
+
this.history.push(chatCompletions);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
async generateResponse(messages: string[], model?: TiktokenModel) {
|
|
149
|
+
await this.enforceTokenLimit(messages, model);
|
|
150
|
+
return await OpenAi.generateResponse(messages, model, this.history, this.functions, this.messageModerators, this.params.logLevel);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
async generateCode(description: string[], model?: TiktokenModel) {
|
|
154
|
+
this.logger.info(`Generating code for description:\n${description.join('\n')}`);
|
|
155
|
+
const code = await OpenAi.generateCode(description, model, this.history, this.functions, this.messageModerators, !this.generatedCode, this.params.logLevel);
|
|
156
|
+
this.logger.info(`Generated code:\n${code.slice(0, 150)}${code.length > 150 ? '...' : ''}`);
|
|
157
|
+
this.generatedCode = true;
|
|
158
|
+
return code;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
async updateCodeFromFile(codeToUpdateFilePath: string, dependencyCodeFilePaths: string[], description: string, model?: TiktokenModel) {
|
|
162
|
+
const codeToUpdate = await Fs.readFile(codeToUpdateFilePath);
|
|
163
|
+
let dependencyDescription = `Assume the following exists:\n`;
|
|
164
|
+
for (let dependencyCodeFilePath of dependencyCodeFilePaths) {
|
|
165
|
+
const dependencCode = await Fs.readFile(dependencyCodeFilePath);
|
|
166
|
+
dependencyDescription += dependencCode + '\n\n';
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
this.logger.info(`Updating code from file: ${codeToUpdateFilePath}`);
|
|
170
|
+
return await this.updateCode(codeToUpdate, dependencyDescription + description, model);
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
async updateCode(code: string, description: string, model?: TiktokenModel) {
|
|
174
|
+
this.logger.info(`Updating code:\n${code.slice(0, 150)}${code.length > 150 ? '...' : ''}\nFrom description: ${description}`);
|
|
175
|
+
const updatedCode = await OpenAi.updateCode(code, description, model, this.history, this.functions, this.messageModerators, !this.generatedCode, this.params.logLevel);
|
|
176
|
+
this.logger.info(`Updated code:\n${updatedCode.slice(0, 150)}${updatedCode.length > 150 ? '...' : ''}`);
|
|
177
|
+
this.generatedCode = true;
|
|
178
|
+
return updatedCode;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
async generateList(description: string[], model?: TiktokenModel) {
|
|
182
|
+
const list = await OpenAi.generateList(description, model, this.history, this.functions, this.messageModerators, !this.generatedList, this.params.logLevel);
|
|
183
|
+
this.generatedList = true;
|
|
184
|
+
return list;
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
export const summarizeConversationHistoryFunctionName = 'summarizeConversationHistory';
|
|
189
|
+
export const summarizeConversationHistoryFunction = (conversation: Conversation) => {
|
|
190
|
+
return {
|
|
191
|
+
definition: {
|
|
192
|
+
name: summarizeConversationHistoryFunctionName,
|
|
193
|
+
description: 'Clear the conversation history and summarize what was in it',
|
|
194
|
+
parameters: {
|
|
195
|
+
type: 'object',
|
|
196
|
+
properties: {
|
|
197
|
+
summary: {
|
|
198
|
+
type: 'string',
|
|
199
|
+
description: 'A 1-3 sentence summary of the current chat history',
|
|
200
|
+
},
|
|
201
|
+
},
|
|
202
|
+
required: ['summary']
|
|
203
|
+
},
|
|
204
|
+
},
|
|
205
|
+
call: async (params: { summary: string }) => conversation.summarizeConversationHistory(params.summary),
|
|
206
|
+
}
|
|
207
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { Function } from './Function';
|
|
2
|
+
import { MessageModerator } from './history/MessageModerator';
|
|
3
|
+
|
|
4
|
+
export interface ConversationModule {
|
|
5
|
+
getName(): string,
|
|
6
|
+
getSystemMessages(): string[];
|
|
7
|
+
getFunctions(): Function[];
|
|
8
|
+
getMessageModerators(): MessageModerator[];
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface ConversationModuleFactory {
|
|
12
|
+
createModule(repoPath: string): Promise<ConversationModule>;
|
|
13
|
+
}
|
package/src/Function.ts
ADDED
package/src/OpenAi.ts
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
import { OpenAI as OpenAIApi } from 'openai';
|
|
2
|
+
import { ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletion } from 'openai/resources/chat';
|
|
3
|
+
import { LogLevel, Logger } from '@proteinjs/util';
|
|
4
|
+
import { MessageModerator } from './history/MessageModerator';
|
|
5
|
+
import { Function } from './Function';
|
|
6
|
+
import { MessageHistory } from './history/MessageHistory';
|
|
7
|
+
import { TiktokenModel } from 'tiktoken';
|
|
8
|
+
|
|
9
|
+
function delay(ms: number) {
|
|
10
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export const DEFAULT_MODEL: TiktokenModel = 'gpt-3.5-turbo';
|
|
14
|
+
export class OpenAi {
|
|
15
|
+
static async generateResponse(messages: string[], model?: string, history?: MessageHistory, functions?: Function[], messageModerators?: MessageModerator[], logLevel: LogLevel = 'info'): Promise<string> {
|
|
16
|
+
const logger = new Logger('OpenAi.generateResponse', logLevel);
|
|
17
|
+
const messageParams: ChatCompletionMessageParam[] = messages.map(message => { return { role: 'user', content: message }});
|
|
18
|
+
if (history)
|
|
19
|
+
history.push(messageParams);
|
|
20
|
+
let messageParamsWithHistory = history ? history : new MessageHistory().push(messageParams);
|
|
21
|
+
if (messageModerators)
|
|
22
|
+
messageParamsWithHistory = OpenAi.moderateHistory(messageParamsWithHistory, messageModerators);
|
|
23
|
+
const response = await OpenAi.executeRequest(messageParamsWithHistory, logLevel, functions, model);
|
|
24
|
+
const responseMessage = response.choices[0].message;
|
|
25
|
+
if (responseMessage.function_call) {
|
|
26
|
+
messageParamsWithHistory.push([responseMessage]);
|
|
27
|
+
const functionReturnMessage = await this.callFunction(logLevel, responseMessage.function_call, functions);
|
|
28
|
+
if (functionReturnMessage)
|
|
29
|
+
messageParamsWithHistory.push([functionReturnMessage])
|
|
30
|
+
return await this.generateResponse([], model, messageParamsWithHistory, functions, messageModerators, logLevel);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const responseText = responseMessage.content;
|
|
34
|
+
if (!responseText) {
|
|
35
|
+
logger.error(`Received response: ${JSON.stringify(response)}`);
|
|
36
|
+
throw new Error(`Response was empty for messages: ${messages.join('\n')}`);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
messageParamsWithHistory.push([responseMessage]);
|
|
40
|
+
return responseText;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
private static moderateHistory(history: MessageHistory, messageModerators: MessageModerator[]) {
|
|
44
|
+
for (let messageModerator of messageModerators)
|
|
45
|
+
history.setMessages(messageModerator.observe(history.getMessages()));
|
|
46
|
+
|
|
47
|
+
return history;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
private static async executeRequest(messageParamsWithHistory: MessageHistory, logLevel: LogLevel, functions?: Function[], model?: string): Promise<ChatCompletion> {
|
|
51
|
+
const logger = new Logger('OpenAi.executeRequest', logLevel);
|
|
52
|
+
const openaiApi = new OpenAIApi();
|
|
53
|
+
let response: ChatCompletion;
|
|
54
|
+
try {
|
|
55
|
+
const latestMessage = messageParamsWithHistory.getMessages()[messageParamsWithHistory.getMessages().length - 1];
|
|
56
|
+
if (latestMessage.content)
|
|
57
|
+
logger.info(`Sending request: ${latestMessage.content}`);
|
|
58
|
+
else if (latestMessage.role == 'function')
|
|
59
|
+
logger.info(`Sending request: returning output of ${latestMessage.name} function`);
|
|
60
|
+
else
|
|
61
|
+
logger.info(`Sending request`);
|
|
62
|
+
logger.debug(`Sending messages: ${JSON.stringify(messageParamsWithHistory.getMessages(), null, 2)}`, true);
|
|
63
|
+
response = await openaiApi.chat.completions.create({
|
|
64
|
+
model: model ? model : DEFAULT_MODEL,
|
|
65
|
+
temperature: 0,
|
|
66
|
+
messages: messageParamsWithHistory.getMessages(),
|
|
67
|
+
functions: functions?.map(f => f.definition),
|
|
68
|
+
});
|
|
69
|
+
const responseMessage = response.choices[0].message;
|
|
70
|
+
if (responseMessage.content)
|
|
71
|
+
logger.info(`Received response: ${responseMessage.content}`);
|
|
72
|
+
else if (responseMessage.function_call)
|
|
73
|
+
logger.info(`Received response: call ${responseMessage.function_call.name} function`);
|
|
74
|
+
else
|
|
75
|
+
logger.info(`Received response`);
|
|
76
|
+
if (response.usage)
|
|
77
|
+
logger.info(JSON.stringify(response.usage));
|
|
78
|
+
else
|
|
79
|
+
logger.info(JSON.stringify(`Usage data missing`));
|
|
80
|
+
} catch(error: any) {
|
|
81
|
+
logger.info(`Received error response, error type: ${error.type}`);
|
|
82
|
+
if (typeof error.status !== 'undefined' && error.status == 429) {
|
|
83
|
+
if (error.type == 'tokens' && typeof error.headers['x-ratelimit-reset-tokens'] === 'string') {
|
|
84
|
+
const waitTime = parseInt(error.headers['x-ratelimit-reset-tokens']);
|
|
85
|
+
const remainingTokens = error.headers['x-ratelimit-remaining-tokens'];
|
|
86
|
+
const delayMs = 15000;
|
|
87
|
+
logger.warn(`Waiting to retry in ${delayMs/1000}s, token reset in: ${waitTime}s, remaining tokens: ${remainingTokens}`);
|
|
88
|
+
await delay(delayMs);
|
|
89
|
+
return await OpenAi.executeRequest(messageParamsWithHistory, logLevel, functions, model);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
throw error;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
return response;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
private static async callFunction(logLevel: LogLevel, functionCall: ChatCompletionMessage.FunctionCall, functions?: Function[]): Promise<ChatCompletionMessageParam|undefined> {
|
|
100
|
+
const logger = new Logger('OpenAi.callFunction', logLevel);
|
|
101
|
+
if (!functions) {
|
|
102
|
+
const warning = `Assistant attempted to call a function when no functions were provided`;
|
|
103
|
+
logger.warn(warning);
|
|
104
|
+
const message: ChatCompletionMessageParam = { role: 'user', content: warning }
|
|
105
|
+
return message;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
functionCall.name = functionCall.name.split('.').pop() as string;
|
|
109
|
+
const f = functions.find(f => f.definition.name === functionCall.name);
|
|
110
|
+
if (!f) {
|
|
111
|
+
const warning = `Assistant attempted to call nonexistent function: ${functionCall.name}`;
|
|
112
|
+
logger.warn(warning);
|
|
113
|
+
const message: ChatCompletionMessageParam = { role: 'user', content: warning }
|
|
114
|
+
return message;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
let returnObject = null;
|
|
118
|
+
try {
|
|
119
|
+
logger.info(`Assistant calling function: ${f.definition.name}(${functionCall.arguments})`);
|
|
120
|
+
returnObject = JSON.stringify(await f.call(JSON.parse(functionCall.arguments)));
|
|
121
|
+
logger.info(`Assistant called function: ${f.definition.name}(${functionCall.arguments}) => ${returnObject}`, 1000);
|
|
122
|
+
} catch (error: any) {
|
|
123
|
+
logger.error(error.message);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (!returnObject)
|
|
127
|
+
return;
|
|
128
|
+
|
|
129
|
+
return {
|
|
130
|
+
role: 'function',
|
|
131
|
+
name: f.definition.name,
|
|
132
|
+
content: returnObject,
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
static async generateCode(messages: string[], model?: string, history?: MessageHistory, functions?: Function[], messageModerators?: MessageModerator[], includeSystemMessages: boolean = true, logLevel: LogLevel = 'info') {
|
|
137
|
+
const systemMessages: ChatCompletionMessageParam[] = [
|
|
138
|
+
{ role: 'system', content: 'Return only the code and exclude example usage, markdown, explanations, comments and notes.' },
|
|
139
|
+
{ role: 'system', content: `Write code in typescript.` },
|
|
140
|
+
{ role: 'system', content: `Declare explicit types for all function parameters.` },
|
|
141
|
+
{ role: 'system', content: 'Export all functions and objects generated.' },
|
|
142
|
+
{ role: 'system', content: 'Do not omit function implementations.' },
|
|
143
|
+
];
|
|
144
|
+
const resolvedHistory = history ?
|
|
145
|
+
includeSystemMessages ?
|
|
146
|
+
history.push(systemMessages)
|
|
147
|
+
:
|
|
148
|
+
history
|
|
149
|
+
:
|
|
150
|
+
includeSystemMessages ?
|
|
151
|
+
new MessageHistory().push(systemMessages)
|
|
152
|
+
:
|
|
153
|
+
undefined
|
|
154
|
+
;
|
|
155
|
+
const code = await this.generateResponse(messages, model, resolvedHistory, functions, messageModerators, logLevel);
|
|
156
|
+
return this.parseCodeFromMarkdown(code);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
static async updateCode(code: string, description: string, model?: string, history?: MessageHistory, functions?: Function[], messageModerators?: MessageModerator[], includeSystemMessages: boolean = true, logLevel: LogLevel = 'info') {
|
|
160
|
+
return await this.generateCode([this.updateCodeDescription(code, description)], model, history, functions, messageModerators, includeSystemMessages, logLevel);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
static updateCodeDescription(code: string, description: string) {
|
|
164
|
+
return `Update this code:\n\n${code}\n\n${description}`;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
static parseCodeFromMarkdown(code: string) {
|
|
168
|
+
if (!code.match(/```([\s\S]+?)```/g))
|
|
169
|
+
return code;
|
|
170
|
+
|
|
171
|
+
const filteredLines: string[] = [];
|
|
172
|
+
let inCodeBlock = false;
|
|
173
|
+
for (let line of code.split('\n')) {
|
|
174
|
+
if (line.startsWith('```')) {
|
|
175
|
+
inCodeBlock = !inCodeBlock;
|
|
176
|
+
if (!inCodeBlock)
|
|
177
|
+
filteredLines.push('');
|
|
178
|
+
|
|
179
|
+
continue;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (inCodeBlock)
|
|
183
|
+
filteredLines.push(line);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// remove the last '' that will become a \n
|
|
187
|
+
// we only want spaces between code blocks
|
|
188
|
+
filteredLines.pop();
|
|
189
|
+
|
|
190
|
+
return filteredLines.join('\n');
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
static async generateList(messages: string[], model?: string, history?: MessageHistory, functions?: Function[], messageModerators?: MessageModerator[], includeSystemMessages: boolean = true, logLevel: LogLevel = 'info'): Promise<string[]> {
|
|
194
|
+
const systemMessages: ChatCompletionMessageParam[] = [
|
|
195
|
+
{ role: 'system', content: 'Return only the list and exclude example usage, markdown and all explanations, comments and notes.' },
|
|
196
|
+
{ role: 'system', content: 'Separate each item in the list by a ;' },
|
|
197
|
+
];
|
|
198
|
+
const resolvedHistory = history ?
|
|
199
|
+
includeSystemMessages ?
|
|
200
|
+
history.push(systemMessages)
|
|
201
|
+
:
|
|
202
|
+
history
|
|
203
|
+
:
|
|
204
|
+
includeSystemMessages ?
|
|
205
|
+
new MessageHistory().push(systemMessages)
|
|
206
|
+
:
|
|
207
|
+
undefined
|
|
208
|
+
;
|
|
209
|
+
const list = await this.generateResponse(messages, model, resolvedHistory, functions, messageModerators, logLevel);
|
|
210
|
+
return list.split(';').map(item => item.trim());
|
|
211
|
+
}
|
|
212
|
+
}
|
package/src/Paragraph.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { Sentence } from './Sentence';
|
|
2
|
+
|
|
3
|
+
export class Paragraph {
|
|
4
|
+
private sentences: Sentence[] = [];
|
|
5
|
+
|
|
6
|
+
add(sentence: Sentence) {
|
|
7
|
+
this.sentences.push(sentence);
|
|
8
|
+
return this;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
toString(): string {
|
|
12
|
+
if (this.sentences.length == 0)
|
|
13
|
+
return '';
|
|
14
|
+
|
|
15
|
+
return this.sentences.join(' ');
|
|
16
|
+
}
|
|
17
|
+
}
|
package/src/Sentence.ts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
// Alina was here
|
|
2
|
+
export class Sentence {
|
|
3
|
+
private lines: string[] = [];
|
|
4
|
+
|
|
5
|
+
add(line: string) {
|
|
6
|
+
this.lines.push(line);
|
|
7
|
+
return this;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
toString(): string {
|
|
11
|
+
if (this.lines.length == 0)
|
|
12
|
+
return '';
|
|
13
|
+
|
|
14
|
+
let sentence = this.lines.join(', ');
|
|
15
|
+
if (sentence.lastIndexOf('.') != (sentence.length - 1))
|
|
16
|
+
sentence += '.';
|
|
17
|
+
|
|
18
|
+
return sentence;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { PackageUtil } from '@proteinjs/util-node';
|
|
2
|
+
import { Conversation } from '../Conversation';
|
|
3
|
+
import { Repo } from './Repo';
|
|
4
|
+
|
|
5
|
+
export type SourceFile = {
|
|
6
|
+
relativePath: string,
|
|
7
|
+
code: Code
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export type Import = {
|
|
11
|
+
moduleNames: string[],
|
|
12
|
+
importPathFromGeneratedFile: string,
|
|
13
|
+
sourceFilePath: string,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export type CodeArgs = {
|
|
17
|
+
conversation: Conversation,
|
|
18
|
+
description: string[],
|
|
19
|
+
imports?: Import[],
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export class Code {
|
|
23
|
+
private args: CodeArgs;
|
|
24
|
+
|
|
25
|
+
constructor(args: CodeArgs) {
|
|
26
|
+
this.args = args;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
async generate(): Promise<string> {
|
|
30
|
+
if (this.args.imports)
|
|
31
|
+
this.addImports(this.args.imports, this.args.conversation);
|
|
32
|
+
|
|
33
|
+
return await this.args.conversation.generateCode(this.args.description, 'gpt-4');
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
private addImports(imports: Import[], conversation: Conversation) {
|
|
37
|
+
conversation.addSystemMessagesToHistory([
|
|
38
|
+
this.declarationMessage(imports.map(d => d.sourceFilePath)),
|
|
39
|
+
this.importMessage(imports),
|
|
40
|
+
]);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
private declarationMessage(tsFilePaths: string[]) {
|
|
44
|
+
const declarationMap = PackageUtil.generateTypescriptDeclarations({ tsFilePaths, includeDependencyDeclarations: true });
|
|
45
|
+
const declarations = Object.values(declarationMap).join('\n');
|
|
46
|
+
return `Assume the following code exists in other files:\n${declarations}`;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
private importMessage(imports: Omit<Import, 'filePath'>[]) {
|
|
50
|
+
const importStatements = imports.map(i => `import { ${i.moduleNames.join(', ')} } from '${i.importPathFromGeneratedFile}'`);
|
|
51
|
+
return `Add the following imports:\n${importStatements}`;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { Logger } from '@proteinjs/util';
|
|
2
|
+
import { Fs, PackageUtil, Package } from '@proteinjs/util-node';
|
|
3
|
+
import { SourceFile } from './Code';
|
|
4
|
+
|
|
5
|
+
export type TemplateArgs = {
|
|
6
|
+
srcPath: string,
|
|
7
|
+
additionalPackages?: Package[],
|
|
8
|
+
replacePackages?: boolean,
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export abstract class CodeTemplate {
|
|
12
|
+
protected logger = new Logger(this.constructor.name);
|
|
13
|
+
protected templateArgs: TemplateArgs;
|
|
14
|
+
|
|
15
|
+
constructor(templateArgs: TemplateArgs) {
|
|
16
|
+
this.templateArgs = templateArgs;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
abstract dependencyPackages(): Package[];
|
|
20
|
+
abstract sourceFiles(): SourceFile[];
|
|
21
|
+
|
|
22
|
+
async generate() {
|
|
23
|
+
await PackageUtil.installPackages(this.resolvePackages());
|
|
24
|
+
for (let sourceFile of this.sourceFiles()) {
|
|
25
|
+
const filePath = Fs.baseContainedJoin(this.templateArgs.srcPath, sourceFile.relativePath);
|
|
26
|
+
this.logger.info(`Generating source file: ${filePath}`);
|
|
27
|
+
const code = await sourceFile.code.generate();
|
|
28
|
+
await Fs.writeFiles([{ path: filePath, content: code }]);
|
|
29
|
+
this.logger.info(`Generated source file: ${filePath}`);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
private resolvePackages() {
|
|
34
|
+
const packages: Package[] = this.templateArgs.replacePackages ? [] : this.dependencyPackages();
|
|
35
|
+
if (this.templateArgs.additionalPackages)
|
|
36
|
+
packages.push(...this.templateArgs.additionalPackages);
|
|
37
|
+
return packages;
|
|
38
|
+
}
|
|
39
|
+
}
|