@digipair/skill-llm 0.90.0 → 0.91.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.swcrc ADDED
@@ -0,0 +1,28 @@
1
+ {
2
+ "jsc": {
3
+ "target": "es2017",
4
+ "parser": {
5
+ "syntax": "typescript",
6
+ "decorators": true,
7
+ "dynamicImport": true
8
+ },
9
+ "transform": {
10
+ "decoratorMetadata": true,
11
+ "legacyDecorator": true
12
+ },
13
+ "keepClassNames": true,
14
+ "externalHelpers": true,
15
+ "loose": true
16
+ },
17
+ "module": {
18
+ "type": "es6"
19
+ },
20
+ "sourceMaps": true,
21
+ "exclude": [
22
+ "jest.config.ts",
23
+ ".*\\.spec.tsx?$",
24
+ ".*\\.test.tsx?$",
25
+ "./src/jest-setup.ts$",
26
+ "./**/jest-setup.ts$"
27
+ ]
28
+ }
package/README.md ADDED
@@ -0,0 +1,7 @@
1
+ # mylib
2
+
3
+ This library was generated with [Nx](https://nx.dev).
4
+
5
+ ## Building
6
+
7
+ Run `nx build mylib` to build the library.
@@ -0,0 +1,22 @@
1
+ import baseConfig from '../../eslint.config.mjs';
2
+
3
+ export default [
4
+ ...baseConfig,
5
+ {
6
+ files: ['**/*.json'],
7
+ rules: {
8
+ '@nx/dependency-checks': [
9
+ 'error',
10
+ {
11
+ ignoredFiles: [
12
+ '{projectRoot}/eslint.config.{js,cjs,mjs}',
13
+ '{projectRoot}/rollup.config.{js,ts,mjs,mts,cjs,cts}',
14
+ ],
15
+ },
16
+ ],
17
+ },
18
+ languageOptions: {
19
+ parser: await import('jsonc-eslint-parser'),
20
+ },
21
+ },
22
+ ];
package/package.json CHANGED
@@ -1,12 +1,28 @@
1
1
  {
2
2
  "name": "@digipair/skill-llm",
3
- "version": "0.90.0",
3
+ "version": "0.91.0-0",
4
+ "type": "module",
5
+ "main": "dist/libs/skill-llm/index.cjs.js",
6
+ "module": "dist/libs/skill-llm/index.esm.js",
7
+ "types": "dist/libs/skill-llm/index.esm.d.ts",
8
+ "exports": {
9
+ "./package.json": "./libs/skill-llm/package.json",
10
+ ".": {
11
+ "development": "./dist/libs/skill-llm/src/index.ts",
12
+ "types": "./dist/libs/skill-llm/index.esm.d.ts",
13
+ "import": "./dist/libs/skill-llm/index.esm.js",
14
+ "default": "./dist/libs/skill-llm/index.cjs.js"
15
+ }
16
+ },
4
17
  "keywords": [
5
18
  "digipair",
6
- "service",
7
- "tool"
19
+ "tool",
20
+ "service"
8
21
  ],
9
- "dependencies": {},
10
- "main": "./index.cjs.js",
11
- "module": "./index.esm.js"
12
- }
22
+ "nx": {
23
+ "name": "skill-llm"
24
+ },
25
+ "dependencies": {
26
+ "@digipair/engine": "0.91.0-0"
27
+ }
28
+ }
@@ -0,0 +1,28 @@
1
+ const { withNx } = require('@nx/rollup/with-nx');
2
+
3
+ module.exports = withNx(
4
+ {
5
+ main: 'libs/skill-llm/src/index.ts',
6
+ outputPath: 'dist/libs/skill-llm',
7
+ tsConfig: 'libs/skill-llm/tsconfig.lib.json',
8
+ compiler: 'swc',
9
+ format: ['esm', "cjs"],
10
+ assets: [
11
+ {
12
+ input: 'libs/skill-llm/',
13
+ glob: 'package.json',
14
+ output: '.'
15
+ },
16
+ {
17
+ input: 'libs/skill-llm/src/',
18
+ glob: '*.json',
19
+ output: '.'
20
+ }
21
+ ]
22
+ },
23
+ {
24
+ // Provide additional rollup configuration here. See: https://rollupjs.org/configuration-options
25
+ // e.g.
26
+ // output: { sourcemap: true },
27
+ }
28
+ );
@@ -0,0 +1 @@
1
+ declare module 'handlebars/dist/handlebars.min.js';
@@ -0,0 +1,7 @@
1
+ import { skillLlm } from './skill-llm';
2
+
3
+ describe('skillLlm', () => {
4
+ it('should work', () => {
5
+ expect(skillLlm()).toEqual('skill-llm');
6
+ });
7
+ });
@@ -0,0 +1,329 @@
1
+ /* eslint-disable @typescript-eslint/no-unused-vars */
2
+ import { PinsSettings, executePinsList } from '@digipair/engine';
3
+ import { HumanMessage } from '@langchain/core/messages';
4
+ import { PromptTemplate } from '@langchain/core/prompts';
5
+ import { RunnableSequence } from '@langchain/core/runnables';
6
+ import { loadSummarizationChain } from 'langchain/chains';
7
+ import { StructuredOutputParser } from 'langchain/output_parsers';
8
+ import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
9
+ import { z } from 'zod';
10
+
11
+ class LLMService {
12
+ private objectToInput(obj: Record<string, any>): Record<string, () => any> {
13
+ const result: Record<string, () => any> = {};
14
+
15
+ for (const key in obj) {
16
+ if (Object.prototype.hasOwnProperty.call(obj, key)) {
17
+ result[key] = () => obj[key];
18
+ }
19
+ }
20
+
21
+ return result;
22
+ }
23
+
24
+ private jsonSchemaToZod(schema: any): any {
25
+ const zodProps: Record<string, any> = {};
26
+
27
+ switch (schema.type) {
28
+ case 'string':
29
+ return z.string().optional();
30
+ case 'number':
31
+ return z.number().optional();
32
+ case 'boolean':
33
+ return z.boolean().optional();
34
+ case 'object':
35
+ for (const prop in schema.properties) {
36
+ zodProps[prop] = this.jsonSchemaToZod(schema.properties[prop]);
37
+
38
+ if (schema.properties[prop].description) {
39
+ zodProps[prop] = zodProps[prop].describe(schema.properties[prop].description);
40
+ }
41
+ }
42
+ return z
43
+ .object(zodProps)
44
+ .required(
45
+ (schema.required ?? []).reduce(
46
+ (acc: any, reqProp: any) => ({ ...acc, [reqProp]: true }),
47
+ {},
48
+ ),
49
+ )
50
+ .optional();
51
+ case 'array':
52
+ if (schema.items) {
53
+ return z.array(this.jsonSchemaToZod(schema.items)).optional();
54
+ }
55
+ return z.array(z.unknown()).optional();
56
+ default:
57
+ throw new Error(`Unsupported JSON Schema type: ${schema.type}`);
58
+ }
59
+ }
60
+
61
+ async invoke(params: any, _pinsSettingsList: PinsSettings[], context: any) {
62
+ const { execute, input = {} }: { execute: PinsSettings[]; input: any } = params;
63
+ const chain = RunnableSequence.from([
64
+ this.objectToInput(input),
65
+ ...(await Promise.all(
66
+ execute.map((pinsSettings: PinsSettings, i: number) =>
67
+ executePinsList([pinsSettings], context, `${context.__PATH__}.execute[${i}]`),
68
+ ),
69
+ )),
70
+ ] as any);
71
+
72
+ let model: string;
73
+ let service: string;
74
+ const result = await chain.invoke(
75
+ {},
76
+ {
77
+ callbacks: [
78
+ {
79
+ handleChatModelStart: async (
80
+ { id }: any,
81
+ _1: any,
82
+ _2: any,
83
+ _3: any,
84
+ extrasParams: any,
85
+ ) => {
86
+ model = (extrasParams?.['invocation_params'] as any).model;
87
+ service = id[2];
88
+ },
89
+ handleLLMStart: async ({ id }: any, _1: any, _2: any, _3: any, extrasParams: any) => {
90
+ model = (extrasParams?.['invocation_params'] as any).model;
91
+ service = id[2];
92
+ },
93
+ handleLLMEnd: async (infos: any) => {
94
+ const { completionTokens, promptTokens } = infos.llmOutput?.['tokenUsage'] || {
95
+ completionTokens: 0,
96
+ promptTokens: 0,
97
+ };
98
+ const skillLogger = require('@digipair/skill-logger');
99
+ await skillLogger.addConsumption(
100
+ context,
101
+ service,
102
+ model,
103
+ promptTokens,
104
+ completionTokens,
105
+ );
106
+ },
107
+ },
108
+ ],
109
+ },
110
+ );
111
+
112
+ return result;
113
+ }
114
+
115
+ async reasoningStep(params: any, _pinsSettingsList: PinsSettings[], context: any) {
116
+ const { attributes } = params;
117
+ const data: { [key: string]: any } = {};
118
+ let i = 0;
119
+
120
+ for (const attribute of attributes) {
121
+ data[attribute.name] = async (previous: any) =>
122
+ await executePinsList(
123
+ attribute.value,
124
+ {
125
+ ...context,
126
+ previous,
127
+ parent: { previous: context.previous, parent: context.parent },
128
+ },
129
+ `${context.__PATH__}.attributes[${i}]`,
130
+ );
131
+ i++;
132
+ }
133
+
134
+ return data;
135
+ }
136
+
137
+ async basic(params: any, _pins: PinsSettings[], context: any) {
138
+ const { model, prompt, schema } = params;
139
+ let chain: RunnableSequence<any, any>;
140
+
141
+ if (!schema) {
142
+ const modelInstance = await executePinsList(
143
+ model ?? context.privates.MODEL_LLM,
144
+ context,
145
+ `${context.__PATH__}.model`,
146
+ );
147
+
148
+ chain = RunnableSequence.from([
149
+ PromptTemplate.fromTemplate(prompt ?? '{prompt}'),
150
+ modelInstance,
151
+ ]);
152
+ } else {
153
+ const modelInstance = await executePinsList(
154
+ model ?? context.privates.MODEL_LLM_JSON ?? context.privates.MODEL_LLM,
155
+ context,
156
+ `${context.__PATH__}.model`,
157
+ );
158
+ const parser = new StructuredOutputParser(this.jsonSchemaToZod(schema) as any);
159
+
160
+ chain = RunnableSequence.from([
161
+ PromptTemplate.fromTemplate(
162
+ `${prompt ?? '{prompt}'}
163
+
164
+ Answer the users question as best as possible.
165
+ {format_instructions}
166
+
167
+ JSON:`,
168
+ {
169
+ partialVariables: {
170
+ format_instructions: parser.getFormatInstructions(),
171
+ },
172
+ },
173
+ ),
174
+ modelInstance,
175
+ parser,
176
+ ]);
177
+ }
178
+
179
+ return chain;
180
+ }
181
+
182
+ async vision(params: any, _pins: PinsSettings[], context: any) {
183
+ const { model, prompt, schema, image } = params;
184
+ let chain: RunnableSequence<any, any>;
185
+
186
+ if (!schema) {
187
+ const modelInstance = await executePinsList(
188
+ model ?? context.privates.MODEL_VISION ?? context.privates.MODEL_LLM,
189
+ context,
190
+ `${context.__PATH__}.model`,
191
+ );
192
+
193
+ chain = RunnableSequence.from([
194
+ PromptTemplate.fromTemplate(prompt ?? '{prompt}'),
195
+ (text: any) => [
196
+ new HumanMessage({
197
+ content: [
198
+ {
199
+ type: 'text',
200
+ text: text.value,
201
+ },
202
+ {
203
+ type: 'image_url',
204
+ image_url: {
205
+ url: image,
206
+ },
207
+ },
208
+ ],
209
+ }),
210
+ ],
211
+ modelInstance,
212
+ ]);
213
+ } else {
214
+ const modelInstance = await executePinsList(
215
+ model ??
216
+ context.privates.MODEL_VISION_JSON ??
217
+ context.privates.MODEL_VISION ??
218
+ context.privates.MODEL_LLM_JSON ??
219
+ context.privates.MODEL_LLM,
220
+ context,
221
+ `${context.__PATH__}.model`,
222
+ );
223
+ const parser = new StructuredOutputParser(this.jsonSchemaToZod(schema) as any);
224
+
225
+ chain = RunnableSequence.from([
226
+ PromptTemplate.fromTemplate(
227
+ `${prompt ?? '{prompt}'}
228
+
229
+ Answer the users question as best as possible.
230
+ {format_instructions}
231
+
232
+ JSON:`,
233
+ {
234
+ partialVariables: {
235
+ format_instructions: parser.getFormatInstructions(),
236
+ },
237
+ },
238
+ ),
239
+ (text: any) => [
240
+ new HumanMessage({
241
+ content: [
242
+ {
243
+ type: 'text',
244
+ text: text.value,
245
+ },
246
+ {
247
+ type: 'image_url',
248
+ image_url: {
249
+ url: image,
250
+ },
251
+ },
252
+ ],
253
+ }),
254
+ ],
255
+ modelInstance,
256
+ parser,
257
+ ]);
258
+ }
259
+
260
+ return chain;
261
+ }
262
+
263
+ async summarization(params: any, _pins: PinsSettings[], context: any) {
264
+ const {
265
+ model = context.privates.MODEL_LLM,
266
+ chunkSize = 1024,
267
+
268
+ type = 'map_reduce',
269
+ verbose = false,
270
+
271
+ prompt,
272
+
273
+ combineMapPrompt,
274
+ combinePrompt,
275
+ returnIntermediateSteps,
276
+
277
+ refinePrompt,
278
+ questionPrompt,
279
+ } = params;
280
+
281
+ const modelInstance = await executePinsList(model, context, `${context.__PATH__}.model`);
282
+ const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize });
283
+
284
+ const summarizationChain = loadSummarizationChain(modelInstance, {
285
+ type,
286
+ verbose,
287
+
288
+ prompt: !prompt ? undefined : (PromptTemplate.fromTemplate(prompt) as any),
289
+
290
+ combineMapPrompt: !combineMapPrompt
291
+ ? undefined
292
+ : (PromptTemplate.fromTemplate(combineMapPrompt) as any),
293
+ combinePrompt: !combinePrompt
294
+ ? undefined
295
+ : (PromptTemplate.fromTemplate(combinePrompt) as any),
296
+ returnIntermediateSteps,
297
+
298
+ refinePrompt: !refinePrompt ? undefined : (PromptTemplate.fromTemplate(refinePrompt) as any),
299
+ questionPrompt: !questionPrompt
300
+ ? undefined
301
+ : (PromptTemplate.fromTemplate(questionPrompt) as any),
302
+ });
303
+
304
+ const chain = RunnableSequence.from([
305
+ {
306
+ input_documents: async ({ document }: any) =>
307
+ await textSplitter.createDocuments([document]),
308
+ },
309
+ summarizationChain as any,
310
+ ]);
311
+
312
+ return chain;
313
+ }
314
+ }
315
+
316
+ export const invoke = (params: any, pinsSettingsList: PinsSettings[], context: any) =>
317
+ new LLMService().invoke(params, pinsSettingsList, context);
318
+
319
+ export const reasoningStep = (params: any, pinsSettingsList: PinsSettings[], context: any) =>
320
+ new LLMService().reasoningStep(params, pinsSettingsList, context);
321
+
322
+ export const basic = (params: any, pinsSettingsList: PinsSettings[], context: any) =>
323
+ new LLMService().basic(params, pinsSettingsList, context);
324
+
325
+ export const vision = (params: any, pinsSettingsList: PinsSettings[], context: any) =>
326
+ new LLMService().vision(params, pinsSettingsList, context);
327
+
328
+ export const summarization = (params: any, pinsSettingsList: PinsSettings[], context: any) =>
329
+ new LLMService().summarization(params, pinsSettingsList, context);
package/tsconfig.json ADDED
@@ -0,0 +1,16 @@
1
+ {
2
+ "extends": "../../tsconfig.base.json",
3
+ "files": [],
4
+ "include": [],
5
+ "references": [
6
+ {
7
+ "path": "../skill-logger"
8
+ },
9
+ {
10
+ "path": "../engine"
11
+ },
12
+ {
13
+ "path": "./tsconfig.lib.json"
14
+ }
15
+ ]
16
+ }
@@ -0,0 +1,22 @@
1
+ {
2
+ "extends": "../../tsconfig.base.json",
3
+ "compilerOptions": {
4
+ "rootDir": "src",
5
+ "outDir": "dist",
6
+ "tsBuildInfoFile": "dist/tsconfig.lib.tsbuildinfo",
7
+ "emitDeclarationOnly": true,
8
+ "module": "esnext",
9
+ "moduleResolution": "node",
10
+ "forceConsistentCasingInFileNames": true,
11
+ "types": ["node"]
12
+ },
13
+ "include": ["src/**/*.ts"],
14
+ "references": [
15
+ {
16
+ "path": "../skill-logger/tsconfig.lib.json"
17
+ },
18
+ {
19
+ "path": "../engine/tsconfig.lib.json"
20
+ }
21
+ ]
22
+ }