@far-world-labs/verblets 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.json +42 -0
- package/.husky/pre-commit +4 -0
- package/.release-it.json +9 -0
- package/.vite.config.examples.js +8 -0
- package/.vite.config.js +8 -0
- package/docker-compose.yml +7 -0
- package/docs/README.md +41 -0
- package/docs/babel.config.js +3 -0
- package/docs/blog/2019-05-28-first-blog-post.md +12 -0
- package/docs/blog/2019-05-29-long-blog-post.md +44 -0
- package/docs/blog/2021-08-01-mdx-blog-post.mdx +20 -0
- package/docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg +0 -0
- package/docs/blog/2021-08-26-welcome/index.md +25 -0
- package/docs/blog/authors.yml +17 -0
- package/docs/docs/api/bool.md +74 -0
- package/docs/docs/api/search.md +51 -0
- package/docs/docs/intro.md +47 -0
- package/docs/docs/tutorial-basics/_category_.json +8 -0
- package/docs/docs/tutorial-basics/congratulations.md +23 -0
- package/docs/docs/tutorial-basics/create-a-blog-post.md +34 -0
- package/docs/docs/tutorial-basics/create-a-document.md +57 -0
- package/docs/docs/tutorial-basics/create-a-page.md +43 -0
- package/docs/docs/tutorial-basics/deploy-your-site.md +31 -0
- package/docs/docs/tutorial-basics/markdown-features.mdx +152 -0
- package/docs/docs/tutorial-extras/_category_.json +7 -0
- package/docs/docs/tutorial-extras/img/docsVersionDropdown.png +0 -0
- package/docs/docs/tutorial-extras/img/localeDropdown.png +0 -0
- package/docs/docs/tutorial-extras/manage-docs-versions.md +55 -0
- package/docs/docs/tutorial-extras/translate-your-site.md +88 -0
- package/docs/docusaurus.config.js +120 -0
- package/docs/package.json +44 -0
- package/docs/sidebars.js +31 -0
- package/docs/src/components/HomepageFeatures/index.js +61 -0
- package/docs/src/components/HomepageFeatures/styles.module.css +11 -0
- package/docs/src/css/custom.css +30 -0
- package/docs/src/pages/index.js +43 -0
- package/docs/src/pages/index.module.css +23 -0
- package/docs/src/pages/markdown-page.md +7 -0
- package/docs/static/.nojekyll +0 -0
- package/docs/static/img/docusaurus-social-card.jpg +0 -0
- package/docs/static/img/docusaurus.png +0 -0
- package/docs/static/img/favicon.ico +0 -0
- package/docs/static/img/logo.svg +1 -0
- package/docs/static/img/undraw_docusaurus_mountain.svg +171 -0
- package/docs/static/img/undraw_docusaurus_react.svg +170 -0
- package/docs/static/img/undraw_docusaurus_tree.svg +40 -0
- package/package.json +75 -0
- package/scripts/generate-chain/index.js +111 -0
- package/scripts/generate-lib/index.js +68 -0
- package/scripts/generate-test/index.js +111 -0
- package/scripts/generate-verblet/README.md +17 -0
- package/scripts/generate-verblet/index.js +110 -0
- package/scripts/run.sh +15 -0
- package/scripts/runner/index.js +30 -0
- package/scripts/simple-editor/README.md +34 -0
- package/scripts/simple-editor/index.js +68 -0
- package/scripts/summarize-files/index.js +46 -0
- package/src/chains/dismantle/dismantle.examples.js +0 -0
- package/src/chains/dismantle/index.examples.js +30 -0
- package/src/chains/dismantle/index.js +314 -0
- package/src/chains/dismantle/index.spec.js +33 -0
- package/src/chains/list/index.examples.js +72 -0
- package/src/chains/list/index.js +161 -0
- package/src/chains/list/index.spec.js +68 -0
- package/src/chains/list/schema.json +24 -0
- package/src/chains/questions/index.examples.js +68 -0
- package/src/chains/questions/index.js +136 -0
- package/src/chains/questions/index.spec.js +29 -0
- package/src/chains/scan-js/index.js +119 -0
- package/src/chains/sort/index.examples.js +40 -0
- package/src/chains/sort/index.js +113 -0
- package/src/chains/sort/index.spec.js +115 -0
- package/src/chains/summary-map/README.md +33 -0
- package/src/chains/summary-map/index.examples.js +57 -0
- package/src/chains/summary-map/index.js +208 -0
- package/src/chains/summary-map/index.spec.js +78 -0
- package/src/chains/test/index.js +118 -0
- package/src/chains/test-advice/index.js +36 -0
- package/src/constants/common.js +9 -0
- package/src/constants/messages.js +3 -0
- package/src/constants/openai.js +65 -0
- package/src/index.js +33 -0
- package/src/json-schemas/cars-test.json +11 -0
- package/src/json-schemas/index.js +18 -0
- package/src/json-schemas/intent.json +38 -0
- package/src/json-schemas/schema-dot-org-photograph.json +127 -0
- package/src/json-schemas/schema-dot-org-place.json +56 -0
- package/src/lib/any-signal/index.js +28 -0
- package/src/lib/chatgpt/index.js +143 -0
- package/src/lib/editor/index.js +31 -0
- package/src/lib/parse-js-parts/index.js +333 -0
- package/src/lib/parse-js-parts/index.spec.js +156 -0
- package/src/lib/path-aliases/index.js +39 -0
- package/src/lib/path-aliases/index.spec.js +70 -0
- package/src/lib/pave/index.js +34 -0
- package/src/lib/pave/index.spec.js +73 -0
- package/src/lib/prompt-cache/index.js +46 -0
- package/src/lib/retry/index.js +63 -0
- package/src/lib/retry/index.spec.js +86 -0
- package/src/lib/search-best-first/index.js +66 -0
- package/src/lib/search-js-files/code-features-property-definitions.json +123 -0
- package/src/lib/search-js-files/index.examples.js +22 -0
- package/src/lib/search-js-files/index.js +158 -0
- package/src/lib/search-js-files/index.spec.js +34 -0
- package/src/lib/search-js-files/scan-file.js +253 -0
- package/src/lib/shorten-text/index.js +30 -0
- package/src/lib/shorten-text/index.spec.js +68 -0
- package/src/lib/strip-numeric/index.js +5 -0
- package/src/lib/strip-response/index.js +35 -0
- package/src/lib/timed-abort-controller/index.js +41 -0
- package/src/lib/to-bool/index.js +8 -0
- package/src/lib/to-enum/index.js +14 -0
- package/src/lib/to-number/index.js +12 -0
- package/src/lib/to-number-with-units/index.js +51 -0
- package/src/lib/transcribe/index.js +61 -0
- package/src/prompts/README.md +15 -0
- package/src/prompts/as-enum.js +5 -0
- package/src/prompts/as-json-schema.js +9 -0
- package/src/prompts/as-object-with-schema.js +31 -0
- package/src/prompts/as-schema-org-text.js +17 -0
- package/src/prompts/as-schema-org-type.js +1 -0
- package/src/prompts/blog-post.js +7 -0
- package/src/prompts/code-features.js +28 -0
- package/src/prompts/constants.js +101 -0
- package/src/prompts/features-json-schema.js +27 -0
- package/src/prompts/generate-collection.js +26 -0
- package/src/prompts/generate-list.js +48 -0
- package/src/prompts/generate-questions.js +19 -0
- package/src/prompts/index.js +20 -0
- package/src/prompts/intent.js +66 -0
- package/src/prompts/output-succinct-names.js +3 -0
- package/src/prompts/select-from-threshold.js +18 -0
- package/src/prompts/sort.js +35 -0
- package/src/prompts/style.js +41 -0
- package/src/prompts/summarize.js +13 -0
- package/src/prompts/token-budget.js +3 -0
- package/src/prompts/wrap-list.js +14 -0
- package/src/prompts/wrap-variable.js +36 -0
- package/src/services/llm-model/index.js +114 -0
- package/src/services/llm-model/model.js +21 -0
- package/src/services/redis/index.js +84 -0
- package/src/verblets/auto/index.examples.js +28 -0
- package/src/verblets/auto/index.js +28 -0
- package/src/verblets/auto/index.spec.js +34 -0
- package/src/verblets/bool/index.examples.js +28 -0
- package/src/verblets/bool/index.js +28 -0
- package/src/verblets/bool/index.schema.json +14 -0
- package/src/verblets/bool/index.spec.js +35 -0
- package/src/verblets/enum/index.examples.js +33 -0
- package/src/verblets/enum/index.js +15 -0
- package/src/verblets/enum/index.spec.js +35 -0
- package/src/verblets/intent/index.examples.js +51 -0
- package/src/verblets/intent/index.js +72 -0
- package/src/verblets/intent/index.spec.js +31 -0
- package/src/verblets/number/index.examples.js +33 -0
- package/src/verblets/number/index.js +22 -0
- package/src/verblets/number/index.spec.js +35 -0
- package/src/verblets/number-with-units/index.examples.js +34 -0
- package/src/verblets/number-with-units/index.js +19 -0
- package/src/verblets/number-with-units/index.spec.js +46 -0
- package/src/verblets/schema-org/index.examples.js +56 -0
- package/src/verblets/schema-org/index.js +8 -0
- package/src/verblets/schema-org/index.spec.js +39 -0
- package/src/verblets/to-object/README.md +38 -0
- package/src/verblets/to-object/index.examples.js +29 -0
- package/src/verblets/to-object/index.js +136 -0
- package/src/verblets/to-object/index.spec.js +74 -0
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import fs from 'node:fs/promises';
|
|
2
|
+
import path from 'node:path';
|
|
3
|
+
import { describe, expect, it } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import { longTestTimeout } from '../../constants/common.js';
|
|
6
|
+
import questions from './index.js';
|
|
7
|
+
|
|
8
|
+
const ensureDirectoryExists = async (directoryPath) => {
|
|
9
|
+
try {
|
|
10
|
+
await fs.access(directoryPath);
|
|
11
|
+
} catch (error) {
|
|
12
|
+
await fs.mkdir(directoryPath, { recursive: true });
|
|
13
|
+
}
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
const readFileOrUndefined = async (filePath) => {
|
|
17
|
+
let result;
|
|
18
|
+
try {
|
|
19
|
+
result = (await fs.readFile(filePath)).toString();
|
|
20
|
+
// eslint-disable-next-line no-empty
|
|
21
|
+
} catch (error) {
|
|
22
|
+
// do nothing
|
|
23
|
+
}
|
|
24
|
+
return result;
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
const cacheDir = path.join(process.env.HOME, '.cache', 'puck');
|
|
28
|
+
const cacheFile = `${cacheDir}/questions-verblet-test-cache-1.json`;
|
|
29
|
+
|
|
30
|
+
const examples = [
|
|
31
|
+
{
|
|
32
|
+
inputs: {
|
|
33
|
+
text: 'Writing a prompt toolkit for ChatGPT',
|
|
34
|
+
searchBreadth: 0.5,
|
|
35
|
+
},
|
|
36
|
+
want: { minLength: 10 },
|
|
37
|
+
},
|
|
38
|
+
];
|
|
39
|
+
|
|
40
|
+
describe('Questions verblet', () => {
|
|
41
|
+
examples.forEach((example) => {
|
|
42
|
+
it(
|
|
43
|
+
example.inputs.text,
|
|
44
|
+
async () => {
|
|
45
|
+
const canUseCache = process.env.RUN_TESTS_WITH_RANDOMNESS_ONCE;
|
|
46
|
+
|
|
47
|
+
const cache = await readFileOrUndefined(cacheFile);
|
|
48
|
+
|
|
49
|
+
let result;
|
|
50
|
+
if (canUseCache && cache) {
|
|
51
|
+
result = JSON.parse(cache);
|
|
52
|
+
} else {
|
|
53
|
+
result = await questions(example.inputs.text);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (canUseCache) {
|
|
57
|
+
await ensureDirectoryExists(cacheDir);
|
|
58
|
+
await fs.writeFile(cacheFile, JSON.stringify(result));
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (example.want.minLength) {
|
|
62
|
+
expect(result.length).gt(example.want.minLength);
|
|
63
|
+
}
|
|
64
|
+
},
|
|
65
|
+
longTestTimeout
|
|
66
|
+
);
|
|
67
|
+
});
|
|
68
|
+
});
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
/* eslint-disable no-await-in-loop */
|
|
2
|
+
|
|
3
|
+
import * as R from 'ramda';
|
|
4
|
+
|
|
5
|
+
import chatGPT from '../../lib/chatgpt/index.js';
|
|
6
|
+
import {
|
|
7
|
+
constants as promptConstants,
|
|
8
|
+
generateQuestions as generateQuestionsPrompt,
|
|
9
|
+
} from '../../prompts/index.js';
|
|
10
|
+
import modelService from '../../services/llm-model/index.js';
|
|
11
|
+
import toObject from '../../verblets/to-object/index.js';
|
|
12
|
+
|
|
13
|
+
const { asSplitIntoJSONArray, contentIsChoices, onlyJSON } = promptConstants;
|
|
14
|
+
|
|
15
|
+
// Returns a random subset of a list with length between 1 and the length of the list
|
|
16
|
+
// based on an input value between 0 and 1
|
|
17
|
+
const getRandomSubset = (list, value) => {
|
|
18
|
+
const numItems = Math.round(value * list.length) + 1;
|
|
19
|
+
const listShuffled = R.sort(() => Math.random() - 0.5, list);
|
|
20
|
+
return listShuffled.slice(0, numItems);
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
const pickInterestingQuestion = (originalQuestion, { existing = [] }) => {
|
|
24
|
+
const existingJoined = existing.map((item) => ` - ${item}`).join('\n');
|
|
25
|
+
|
|
26
|
+
return `Choose one interesting question from the following list of questions. The main goal is to determine "${originalQuestion}".
|
|
27
|
+
|
|
28
|
+
${contentIsChoices}
|
|
29
|
+
\`\`\`
|
|
30
|
+
${existingJoined}
|
|
31
|
+
\`\`\`
|
|
32
|
+
`;
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
const shouldSkipNull = (result, resultsAll) => {
|
|
36
|
+
return resultsAll.includes(result);
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
const shouldStopNull = (result, resultsAll, resultsNew, attempts = 0) => {
|
|
40
|
+
return resultsAll.length > 50 || attempts > 5;
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
const generateQuestions = async function* generateQuestionsGenerator(
|
|
44
|
+
text,
|
|
45
|
+
options = {}
|
|
46
|
+
) {
|
|
47
|
+
const resultsAll = [];
|
|
48
|
+
const resultsAllMap = {};
|
|
49
|
+
const drilldownResults = [];
|
|
50
|
+
let isDone = false;
|
|
51
|
+
let textSelected = text;
|
|
52
|
+
|
|
53
|
+
const {
|
|
54
|
+
searchBreadth = 0.5,
|
|
55
|
+
shouldSkip = shouldSkipNull,
|
|
56
|
+
shouldStop = shouldStopNull,
|
|
57
|
+
model = modelService.getBestAvailableModel(),
|
|
58
|
+
} = options;
|
|
59
|
+
|
|
60
|
+
let attempts = 0;
|
|
61
|
+
while (!isDone) {
|
|
62
|
+
if (resultsAll.length) {
|
|
63
|
+
const choices = resultsAll.filter((item) => {
|
|
64
|
+
return !drilldownResults.includes(item);
|
|
65
|
+
});
|
|
66
|
+
const pickInterestingQuestionPrompt = pickInterestingQuestion(
|
|
67
|
+
textSelected,
|
|
68
|
+
{ existing: choices }
|
|
69
|
+
);
|
|
70
|
+
textSelected = await chatGPT(pickInterestingQuestionPrompt);
|
|
71
|
+
drilldownResults.push(textSelected);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const promptCreated = generateQuestionsPrompt(textSelected, {
|
|
75
|
+
existing: resultsAll,
|
|
76
|
+
});
|
|
77
|
+
const budget = model.budgetTokens(promptCreated);
|
|
78
|
+
const chatGPTConfig = {
|
|
79
|
+
modelOptions: {
|
|
80
|
+
maxTokens: budget.completion,
|
|
81
|
+
temperature: 1,
|
|
82
|
+
},
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
const results = await chatGPT(`${promptCreated}`, chatGPTConfig);
|
|
86
|
+
let resultsParsed;
|
|
87
|
+
try {
|
|
88
|
+
resultsParsed = await toObject(results);
|
|
89
|
+
} catch (error) {
|
|
90
|
+
if (/Unexpected string in JSON/.test(error.message)) {
|
|
91
|
+
const resultsUpdated = await chatGPT(
|
|
92
|
+
`${asSplitIntoJSONArray}${onlyJSON} \`\`\`${results}\`\`\``,
|
|
93
|
+
chatGPTConfig
|
|
94
|
+
);
|
|
95
|
+
resultsParsed = await toObject(resultsUpdated);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
const resultsNew = getRandomSubset(resultsParsed, searchBreadth);
|
|
99
|
+
if (searchBreadth < 0.5) {
|
|
100
|
+
const randomIndex = Math.floor(Math.random() * resultsNew.length);
|
|
101
|
+
textSelected = resultsNew[randomIndex];
|
|
102
|
+
}
|
|
103
|
+
const resultsNewUnique = resultsNew.filter(
|
|
104
|
+
(item) => !(item in resultsAllMap)
|
|
105
|
+
);
|
|
106
|
+
|
|
107
|
+
attempts += 1;
|
|
108
|
+
|
|
109
|
+
for (const result of resultsNewUnique) {
|
|
110
|
+
if (await shouldStop(result, resultsAll, resultsNew, attempts)) {
|
|
111
|
+
isDone = true;
|
|
112
|
+
break;
|
|
113
|
+
}
|
|
114
|
+
if (!(await shouldSkip(result, resultsAll))) {
|
|
115
|
+
resultsAllMap[result] = true;
|
|
116
|
+
resultsAll.push(result);
|
|
117
|
+
yield result;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
export default async (text, options) => {
|
|
124
|
+
const generator = generateQuestions(text, options);
|
|
125
|
+
|
|
126
|
+
const results = [];
|
|
127
|
+
for await (const result of generator) {
|
|
128
|
+
if (!results.includes(result)) {
|
|
129
|
+
results.push(result);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const resultsSorted = R.sort((a, b) => a.localeCompare(b), results);
|
|
134
|
+
|
|
135
|
+
return resultsSorted;
|
|
136
|
+
};
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
// Testing TBD. Randomness makes it challenging.
|
|
2
|
+
|
|
3
|
+
import { describe } from 'vitest';
|
|
4
|
+
|
|
5
|
+
/*
|
|
6
|
+
import questions from "./index.js";
|
|
7
|
+
|
|
8
|
+
vi.mock("../../lib/chatgpt/index.js", () => ({
|
|
9
|
+
default: vi.fn().mockImplementation((text) => {
|
|
10
|
+
if (/a prompt toolkit/.test(text)) {
|
|
11
|
+
return "{}";
|
|
12
|
+
}
|
|
13
|
+
return "undefined";
|
|
14
|
+
}),
|
|
15
|
+
}));
|
|
16
|
+
|
|
17
|
+
const examples = [
|
|
18
|
+
{
|
|
19
|
+
name: "Basic usage",
|
|
20
|
+
inputs: {
|
|
21
|
+
text: "Writing a prompt toolkit for ChatGPT",
|
|
22
|
+
searchBreadth: 0.5,
|
|
23
|
+
},
|
|
24
|
+
want: {},
|
|
25
|
+
},
|
|
26
|
+
];
|
|
27
|
+
|
|
28
|
+
*/
|
|
29
|
+
describe.skip('Questions verblet', () => {});
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import fs from 'node:fs/promises';
|
|
2
|
+
import * as R from 'ramda';
|
|
3
|
+
|
|
4
|
+
import sort from '../sort/index.js';
|
|
5
|
+
import chatGPT from '../../lib/chatgpt/index.js';
|
|
6
|
+
import pathAliases from '../../lib/path-aliases/index.js';
|
|
7
|
+
import retry from '../../lib/retry/index.js';
|
|
8
|
+
import search from '../../lib/search-js-files/index.js';
|
|
9
|
+
import stripResponse from '../../lib/strip-response/index.js';
|
|
10
|
+
import codeFeaturesPrompt from '../../prompts/code-features.js';
|
|
11
|
+
import makeJSONSchema from '../../prompts/features-json-schema.js';
|
|
12
|
+
import modelService from '../../services/llm-model/index.js';
|
|
13
|
+
import toObject from '../../verblets/to-object/index.js';
|
|
14
|
+
|
|
15
|
+
const codeFeatureDefinitions = JSON.parse(
|
|
16
|
+
await fs.readFile(
|
|
17
|
+
new URL(
|
|
18
|
+
'../../lib/search-js-files/code-features-property-definitions.json',
|
|
19
|
+
import.meta.url
|
|
20
|
+
),
|
|
21
|
+
'utf-8'
|
|
22
|
+
)
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
const organizeResult = (result) => {
|
|
26
|
+
return R.sort(
|
|
27
|
+
(a, b) => a.localeCompare(b),
|
|
28
|
+
Object.entries(result).map(([key, value]) => `${key}: ${value}`)
|
|
29
|
+
);
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
const visit = async ({
|
|
33
|
+
node,
|
|
34
|
+
state: stateInitial,
|
|
35
|
+
features: featuresInitial = 'maintainability',
|
|
36
|
+
}) => {
|
|
37
|
+
if (!node.functionName) {
|
|
38
|
+
// skipping
|
|
39
|
+
return stateInitial;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const sortResults = await sort(
|
|
43
|
+
{
|
|
44
|
+
by: `best criteria for looking at "${featuresInitial}" within code`,
|
|
45
|
+
chunkSize: 4,
|
|
46
|
+
extremeK: 4,
|
|
47
|
+
},
|
|
48
|
+
codeFeatureDefinitions.map((d) => d.criteria),
|
|
49
|
+
modelService.getModel('textDavinci003')
|
|
50
|
+
);
|
|
51
|
+
const sortCriteria = sortResults.slice(0, 5);
|
|
52
|
+
const features = codeFeatureDefinitions.filter((def) => {
|
|
53
|
+
return sortCriteria.includes(def.criteria);
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
if (!features?.length) {
|
|
57
|
+
throw new Error('Visit [error]: Features list not defined');
|
|
58
|
+
}
|
|
59
|
+
const schema = makeJSONSchema(features);
|
|
60
|
+
|
|
61
|
+
const fileText = await fs.readFile(node.filename, 'utf-8');
|
|
62
|
+
const functionText = fileText.slice(node.start, node.end);
|
|
63
|
+
|
|
64
|
+
const state = { ...stateInitial };
|
|
65
|
+
|
|
66
|
+
const visitPrompt = codeFeaturesPrompt({
|
|
67
|
+
text: functionText,
|
|
68
|
+
schema,
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
await retry(async () => {
|
|
72
|
+
const results = await chatGPT(visitPrompt, {
|
|
73
|
+
modelOptions: {
|
|
74
|
+
modelName: 'gpt35Turbo',
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
const resultParsed = await toObject(stripResponse(results), schema);
|
|
79
|
+
|
|
80
|
+
const id = `${node.filename}:::${node.functionName}`;
|
|
81
|
+
|
|
82
|
+
state[id] = resultParsed;
|
|
83
|
+
state.nodesFound = (state.nodesFound ?? 0) + 1;
|
|
84
|
+
state.abbreviations = state.abbreviations ?? {};
|
|
85
|
+
state.abbreviations[id] = state.abbreviations[id] ?? state.nodesFound;
|
|
86
|
+
|
|
87
|
+
const idDisplay = (state.pathAliases[id] ?? id).slice(-50).padStart(50);
|
|
88
|
+
|
|
89
|
+
console.error(
|
|
90
|
+
`${`${state.nodesFound}`.padEnd(3, ' ')} ${idDisplay}: ${organizeResult(
|
|
91
|
+
resultParsed
|
|
92
|
+
).join(', ')}`
|
|
93
|
+
);
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
return state;
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
// node: { filename: './src/index.js' },
|
|
100
|
+
export default async (moduleOptions) => {
|
|
101
|
+
const state = await search({
|
|
102
|
+
...moduleOptions,
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
const preState = {
|
|
106
|
+
visited: new Set(),
|
|
107
|
+
pathAliases: pathAliases([...state.visited]),
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
return search({
|
|
111
|
+
...moduleOptions,
|
|
112
|
+
state: preState,
|
|
113
|
+
visit: (options) =>
|
|
114
|
+
visit({
|
|
115
|
+
...options,
|
|
116
|
+
features: moduleOptions.features,
|
|
117
|
+
}),
|
|
118
|
+
});
|
|
119
|
+
};
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { describe, expect, it } from 'vitest';
|
|
2
|
+
|
|
3
|
+
import { longTestTimeout } from '../../constants/common.js';
|
|
4
|
+
import list from '../list/index.js';
|
|
5
|
+
import sort from './index.js';
|
|
6
|
+
|
|
7
|
+
const examples = [
|
|
8
|
+
{
|
|
9
|
+
name: '"The Office" episodes',
|
|
10
|
+
inputs: {
|
|
11
|
+
listText: '"The Office" most famous episodes',
|
|
12
|
+
sortText: 'have scenes that became memes',
|
|
13
|
+
},
|
|
14
|
+
want: { result: true },
|
|
15
|
+
},
|
|
16
|
+
];
|
|
17
|
+
|
|
18
|
+
describe('Sort chain', () => {
|
|
19
|
+
examples.forEach((example) => {
|
|
20
|
+
it(
|
|
21
|
+
example.name,
|
|
22
|
+
async () => {
|
|
23
|
+
const listResults = await list(example.inputs.listText);
|
|
24
|
+
|
|
25
|
+
const result = await sort(
|
|
26
|
+
{
|
|
27
|
+
by: example.inputs.sortText,
|
|
28
|
+
iterations: 1,
|
|
29
|
+
},
|
|
30
|
+
listResults
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
// console.error(result);
|
|
34
|
+
|
|
35
|
+
expect(result).toStrictEqual(result);
|
|
36
|
+
},
|
|
37
|
+
longTestTimeout
|
|
38
|
+
);
|
|
39
|
+
});
|
|
40
|
+
});
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import * as R from 'ramda';
|
|
2
|
+
|
|
3
|
+
import chatGPT from '../../lib/chatgpt/index.js';
|
|
4
|
+
import toObject from '../../verblets/to-object/index.js';
|
|
5
|
+
import { sort as sortPromptInitial } from '../../prompts/index.js';
|
|
6
|
+
import modelService from '../../services/llm-model/index.js';
|
|
7
|
+
|
|
8
|
+
// redeclared so it's clearer how tests can override the sorter
|
|
9
|
+
let sortPrompt = sortPromptInitial;
|
|
10
|
+
|
|
11
|
+
export const defaultSortChunkSize = 10;
|
|
12
|
+
export const defaultSortExtremeK = 10;
|
|
13
|
+
export const defaultSortIterations = 1;
|
|
14
|
+
|
|
15
|
+
// Keeping this here because it's useful for internal debugging
|
|
16
|
+
// eslint-disable-next-line no-unused-vars
|
|
17
|
+
const assertSorted = (list) => {
|
|
18
|
+
const pairwiseComparisons = R.aperture(2, list);
|
|
19
|
+
if (!R.all(([a, b]) => b.localeCompare(a) <= 0, pairwiseComparisons)) {
|
|
20
|
+
throw new Error(JSON.stringify(list, null, 2));
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
export const useTestSortPrompt = () => {
|
|
25
|
+
sortPrompt = (options, list) => ({ options, list });
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
const sanitizeList = (list) => {
|
|
29
|
+
return [...new Set(list.filter((item) => item.trim() !== ''))];
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
const sort = async (
|
|
33
|
+
options,
|
|
34
|
+
listInitial,
|
|
35
|
+
model = modelService.getBestAvailableModel()
|
|
36
|
+
) => {
|
|
37
|
+
const {
|
|
38
|
+
by,
|
|
39
|
+
chunkSize = defaultSortChunkSize,
|
|
40
|
+
extremeK = defaultSortExtremeK,
|
|
41
|
+
iterations = defaultSortIterations,
|
|
42
|
+
} = options;
|
|
43
|
+
|
|
44
|
+
const list = sanitizeList(listInitial);
|
|
45
|
+
let i = iterations;
|
|
46
|
+
let top = [];
|
|
47
|
+
let bottom = [];
|
|
48
|
+
let middle = list;
|
|
49
|
+
|
|
50
|
+
while (i > 0) {
|
|
51
|
+
let newTop = [];
|
|
52
|
+
let newBottom = [];
|
|
53
|
+
let discardedTop = [];
|
|
54
|
+
let discardedBottom = [];
|
|
55
|
+
|
|
56
|
+
for (let j = 0; j < middle.length; j += chunkSize) {
|
|
57
|
+
const batch = middle.slice(j, j + chunkSize);
|
|
58
|
+
const prompt = sortPrompt(
|
|
59
|
+
{
|
|
60
|
+
description: by,
|
|
61
|
+
},
|
|
62
|
+
[...batch, ...newTop, ...newBottom]
|
|
63
|
+
);
|
|
64
|
+
|
|
65
|
+
const budget = model.budgetTokens(prompt);
|
|
66
|
+
|
|
67
|
+
// eslint-disable-next-line no-await-in-loop
|
|
68
|
+
const result = await chatGPT(prompt, {
|
|
69
|
+
modelOptions: {
|
|
70
|
+
maxTokens: budget.completion,
|
|
71
|
+
requestTimeout: model.requestTimeout * 1.5,
|
|
72
|
+
},
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
// eslint-disable-next-line no-await-in-loop
|
|
76
|
+
const batchSorted = await toObject(result);
|
|
77
|
+
|
|
78
|
+
const batchTop = batchSorted.slice(0, extremeK);
|
|
79
|
+
const batchBottom = batchSorted.slice(-extremeK);
|
|
80
|
+
|
|
81
|
+
discardedTop = [
|
|
82
|
+
...newTop.filter((x) => !batchTop.includes(x)),
|
|
83
|
+
...discardedTop,
|
|
84
|
+
];
|
|
85
|
+
|
|
86
|
+
discardedBottom = [
|
|
87
|
+
...discardedBottom,
|
|
88
|
+
...newBottom.filter((x) => !batchBottom.includes(x)),
|
|
89
|
+
];
|
|
90
|
+
|
|
91
|
+
newTop = batchTop;
|
|
92
|
+
newBottom = batchBottom;
|
|
93
|
+
}
|
|
94
|
+
top = [...top, ...newTop];
|
|
95
|
+
bottom = [...newBottom, ...bottom];
|
|
96
|
+
|
|
97
|
+
const middleOld = middle.filter((x) => {
|
|
98
|
+
return (
|
|
99
|
+
!newTop.includes(x) &&
|
|
100
|
+
!discardedTop.includes(x) &&
|
|
101
|
+
!discardedBottom.includes(x) &&
|
|
102
|
+
!newBottom.includes(x)
|
|
103
|
+
);
|
|
104
|
+
});
|
|
105
|
+
middle = [...discardedTop, ...middleOld, ...discardedBottom];
|
|
106
|
+
i -= 1;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const finalList = [...top, ...middle, ...bottom];
|
|
110
|
+
return finalList;
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
export default sort;
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import { describe, expect, it, vi } from 'vitest';
|
|
2
|
+
import * as R from 'ramda';
|
|
3
|
+
|
|
4
|
+
import sort, { useTestSortPrompt } from './index.js';
|
|
5
|
+
|
|
6
|
+
useTestSortPrompt();
|
|
7
|
+
|
|
8
|
+
const extremeK = 4;
|
|
9
|
+
const chunkSize = 12;
|
|
10
|
+
const byAB = (a, b) => b.localeCompare(a);
|
|
11
|
+
|
|
12
|
+
const unsortedStrings = [
|
|
13
|
+
'zebra',
|
|
14
|
+
'apple',
|
|
15
|
+
'quail',
|
|
16
|
+
'mango',
|
|
17
|
+
'giraffe',
|
|
18
|
+
'banana',
|
|
19
|
+
'dog',
|
|
20
|
+
'lion',
|
|
21
|
+
'tiger',
|
|
22
|
+
'elephant',
|
|
23
|
+
'kiwi',
|
|
24
|
+
'raspberry',
|
|
25
|
+
'grape',
|
|
26
|
+
'apricot',
|
|
27
|
+
'kangaroo',
|
|
28
|
+
'owl',
|
|
29
|
+
'peacock',
|
|
30
|
+
'xenon',
|
|
31
|
+
'uranium',
|
|
32
|
+
'platinum',
|
|
33
|
+
'walrus',
|
|
34
|
+
'fox',
|
|
35
|
+
'capybara',
|
|
36
|
+
'iguana',
|
|
37
|
+
'jaguar',
|
|
38
|
+
'koi',
|
|
39
|
+
'lobster',
|
|
40
|
+
'moose',
|
|
41
|
+
'nugget',
|
|
42
|
+
'octopus',
|
|
43
|
+
'python',
|
|
44
|
+
'quokka',
|
|
45
|
+
'raccoon',
|
|
46
|
+
'starfish',
|
|
47
|
+
'tortoise',
|
|
48
|
+
'umbrella',
|
|
49
|
+
'vulture',
|
|
50
|
+
'wombat',
|
|
51
|
+
'xerus',
|
|
52
|
+
'yak',
|
|
53
|
+
'zeppelin',
|
|
54
|
+
'ant',
|
|
55
|
+
'beaver',
|
|
56
|
+
'cat',
|
|
57
|
+
'dolphin',
|
|
58
|
+
'echidna',
|
|
59
|
+
'frog',
|
|
60
|
+
'hamster',
|
|
61
|
+
'impala',
|
|
62
|
+
'jellyfish',
|
|
63
|
+
];
|
|
64
|
+
|
|
65
|
+
vi.mock('../../lib/chatgpt/index.js', () => ({
|
|
66
|
+
default: vi.fn().mockImplementation((text) => {
|
|
67
|
+
if (text.options.description === 'alphabetically') {
|
|
68
|
+
const sorted = R.sort((a, b) => b.localeCompare(a), text.list);
|
|
69
|
+
return JSON.stringify(sorted);
|
|
70
|
+
}
|
|
71
|
+
return '[]';
|
|
72
|
+
}),
|
|
73
|
+
}));
|
|
74
|
+
|
|
75
|
+
const examples = [
|
|
76
|
+
{
|
|
77
|
+
name: 'Basic usage',
|
|
78
|
+
inputs: {
|
|
79
|
+
options: { by: 'alphabetically', iterations: 3, extremeK, chunkSize },
|
|
80
|
+
list: [...unsortedStrings],
|
|
81
|
+
},
|
|
82
|
+
want: {
|
|
83
|
+
highest: R.sort(byAB, [...unsortedStrings]).slice(0, extremeK * 3),
|
|
84
|
+
lowest: R.sort(byAB, [...unsortedStrings]).slice(-(extremeK * 3)),
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
name: 'Empty list',
|
|
89
|
+
inputs: {
|
|
90
|
+
options: { by: 'alphabetically', extremeK, chunkSize },
|
|
91
|
+
list: [],
|
|
92
|
+
},
|
|
93
|
+
want: {
|
|
94
|
+
highest: [],
|
|
95
|
+
lowest: [],
|
|
96
|
+
},
|
|
97
|
+
},
|
|
98
|
+
];
|
|
99
|
+
|
|
100
|
+
describe('Sort', () => {
|
|
101
|
+
examples.forEach((example) => {
|
|
102
|
+
it(example.name, async () => {
|
|
103
|
+
const iterations = example.inputs.options.iterations ?? 1;
|
|
104
|
+
const result = await sort(example.inputs.options, example.inputs.list, {
|
|
105
|
+
budgetTokens: () => ({ completion: 0 }),
|
|
106
|
+
});
|
|
107
|
+
expect(result.slice(0, extremeK * iterations)).toStrictEqual(
|
|
108
|
+
example.want.highest
|
|
109
|
+
);
|
|
110
|
+
expect(result.slice(-(extremeK * iterations))).toStrictEqual(
|
|
111
|
+
example.want.lowest
|
|
112
|
+
);
|
|
113
|
+
});
|
|
114
|
+
});
|
|
115
|
+
});
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# SummaryMap
|
|
2
|
+
|
|
3
|
+
SummaryMap is a utility class designed to help manage a collection of data elements for AI prompts. It allows you to compress incoming variables based on weights relative to an overall target budget for the variables.
|
|
4
|
+
|
|
5
|
+
## Usage
|
|
6
|
+
|
|
7
|
+
```javascript
|
|
8
|
+
import SummaryMap from '../lib/summary-map/index.js';
|
|
9
|
+
import chatGPT from '../../lib/chatgpt/index.js';
|
|
10
|
+
import pave from '../../lib/pave/index.js';
|
|
11
|
+
import modelService from '../../services/llm-model/index.js';
|
|
12
|
+
|
|
13
|
+
const promptFunction = (data) => {
|
|
14
|
+
return `Please solve a problem for me with the following input data:
|
|
15
|
+
${data.example.text}
|
|
16
|
+
|
|
17
|
+
Reference the following code:
|
|
18
|
+
${data.example.code}`;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
const variableTokens = 100;
|
|
22
|
+
const promptTokens = modelService.getBestAvailableModel().toTokens(promptFunction).length;
|
|
23
|
+
const solutionTokens = 200;
|
|
24
|
+
const maxTokens = promptTokens + variableTokens + solutionTokens
|
|
25
|
+
|
|
26
|
+
const summaryMap = new SummaryMap({ targetTokens: variableTokens });
|
|
27
|
+
summaryMap.set('example.text', { value: 'Long text data...', weight: 1, type: 'text' });
|
|
28
|
+
summaryMap.set('example.code', { value: 'Long code data...', weight: 0.5, type: 'code' });
|
|
29
|
+
|
|
30
|
+
const promptInputs = await summaryMap.pavedSummaryResult();
|
|
31
|
+
const prompt = promptFunction(promptInputs);
|
|
32
|
+
const response = await chatGPT(prompt, { modelOptions: { maxTokens }});
|
|
33
|
+
```
|