@digipair/skill-dsp 0.8.23 → 0.8.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs.js CHANGED
@@ -23510,14 +23510,14 @@ function indent(str, spaces) {
23510
23510
  var match = parseIdentifier(input, i1, namePart) || namePart && parseAdditionalSymbol(input, i1) || maybeSpace && parseSpaces(input, i1);
23511
23511
  // match is required
23512
23512
  if (!match) {
23513
- return nextMatch = nextMatch1, tokens = tokens1, i = i1, {
23513
+ return tokens = tokens1, nextMatch = nextMatch1, i = i1, {
23514
23514
  v: nextMatch1
23515
23515
  };
23516
23516
  }
23517
23517
  var token = match.token, offset = match.offset;
23518
23518
  i1 += offset;
23519
23519
  if (token === " ") {
23520
- return nextMatch = nextMatch1, tokens = tokens1, i = i1, "continue";
23520
+ return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
23521
23521
  }
23522
23522
  tokens1 = _to_consumable_array$1(tokens1).concat([
23523
23523
  token
@@ -23536,7 +23536,7 @@ function indent(str, spaces) {
23536
23536
  if (contextKeys.some(function(el) {
23537
23537
  return el.startsWith(name);
23538
23538
  })) {
23539
- return nextMatch = nextMatch1, tokens = tokens1, i = i1, "continue";
23539
+ return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
23540
23540
  }
23541
23541
  if (dateTimeIdentifiers.some(function(el) {
23542
23542
  return el === name;
@@ -23555,9 +23555,9 @@ function indent(str, spaces) {
23555
23555
  if (dateTimeIdentifiers.some(function(el) {
23556
23556
  return el.startsWith(name);
23557
23557
  })) {
23558
- return nextMatch = nextMatch1, tokens = tokens1, i = i1, "continue";
23558
+ return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
23559
23559
  }
23560
- return nextMatch = nextMatch1, tokens = tokens1, i = i1, {
23560
+ return tokens = tokens1, nextMatch = nextMatch1, i = i1, {
23561
23561
  v: nextMatch1
23562
23562
  };
23563
23563
  };
@@ -27464,23 +27464,49 @@ const preparePinsSettings = async (settings, context)=>{
27464
27464
  };
27465
27465
 
27466
27466
  let DspService = class DspService {
27467
- async model(params, _pinsSettingsList, context) {
27467
+ async model(params, _pinsSettingsList, _context) {
27468
27468
  const { AI } = await eval(`import('llmclient')`);
27469
- var _context_privates_OLLAMA_SERVER, _ref;
27470
- const { apiKey = 'none', modelName = 'mistral', temperature = 0, keepAlive = 0, baseUrl = (_ref = (_context_privates_OLLAMA_SERVER = context.privates.OLLAMA_SERVER) != null ? _context_privates_OLLAMA_SERVER : process.env['OLLAMA_SERVER']) != null ? _ref : 'http://localhost:11434', debug = false } = params;
27471
- const model = AI('openai', {
27469
+ const { name, options } = params;
27470
+ const modelInstance = AI(name, options);
27471
+ return modelInstance;
27472
+ }
27473
+ async modelOpenAI(params, _pinsSettingsList, context) {
27474
+ const { OpenAI } = await eval(`import('llmclient')`);
27475
+ var _context_privates_OPENAI_API_KEY, _context_privates_OPENAI_SERVER;
27476
+ const { apiKey = (_context_privates_OPENAI_API_KEY = context.privates.OPENAI_API_KEY) != null ? _context_privates_OPENAI_API_KEY : process.env['OPENAI_API_KEY'], apiURL = (_context_privates_OPENAI_SERVER = context.privates.OPENAI_SERVER) != null ? _context_privates_OPENAI_SERVER : process.env['OPENAI_SERVER'], config, options } = params;
27477
+ const modelInstance = OpenAI({
27472
27478
  apiKey,
27473
- apiURL: baseUrl + '/v1',
27474
- config: {
27475
- model: modelName,
27476
- temperature,
27477
- keepAlive
27478
- },
27479
- options: {
27480
- debug
27481
- }
27479
+ apiURL,
27480
+ config,
27481
+ options
27482
+ });
27483
+ return modelInstance;
27484
+ }
27485
+ async modelAzureOpenAi(params, _pinsSettingsList, context) {
27486
+ const { AzureOpenAi } = await eval(`import('llmclient')`);
27487
+ var _context_privates_AZURE_OPENAI_API_KEY, _context_privates_AZURE_OPENAI_API_INSTANCE_NAME, _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME, _context_privates_AZURE_OPENAI_API_VERSION;
27488
+ const { apiKey = (_context_privates_AZURE_OPENAI_API_KEY = context.privates.AZURE_OPENAI_API_KEY) != null ? _context_privates_AZURE_OPENAI_API_KEY : process.env['AZURE_OPENAI_API_KEY'], resourceName = (_context_privates_AZURE_OPENAI_API_INSTANCE_NAME = context.privates.AZURE_OPENAI_API_INSTANCE_NAME) != null ? _context_privates_AZURE_OPENAI_API_INSTANCE_NAME : process.env['AZURE_OPENAI_API_INSTANCE_NAME'], deploymentName = (_context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME = context.privates.AZURE_OPENAI_API_DEPLOYMENT_NAME) != null ? _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME : process.env['AZURE_OPENAI_API_DEPLOYMENT_NAME'], version = (_context_privates_AZURE_OPENAI_API_VERSION = context.privates.AZURE_OPENAI_API_VERSION) != null ? _context_privates_AZURE_OPENAI_API_VERSION : process.env['AZURE_OPENAI_API_VERSION'], config, options } = params;
27489
+ const modelInstance = AzureOpenAi({
27490
+ apiKey,
27491
+ resourceName,
27492
+ deploymentName,
27493
+ version,
27494
+ config,
27495
+ options
27496
+ });
27497
+ return modelInstance;
27498
+ }
27499
+ async modelOllama(params, _pinsSettingsList, context) {
27500
+ const { Ollama } = await eval(`import('llmclient')`);
27501
+ const { model, url = context.privates.OLLAMA_SERVER ? context.privates.OLLAMA_SERVER + '/v1' : process.env['OLLAMA_SERVER'] ? process.env['OLLAMA_SERVER'] + '/v1' : 'http://localhost:11434/v1', apiKey, config, options } = params;
27502
+ const modelInstance = Ollama({
27503
+ model,
27504
+ url,
27505
+ apiKey,
27506
+ config,
27507
+ options
27482
27508
  });
27483
- return model;
27509
+ return modelInstance;
27484
27510
  }
27485
27511
  async generate(params, _pinsSettingsList, context) {
27486
27512
  const { Generate } = await eval(`import('llmclient')`);
@@ -27492,7 +27518,13 @@ let DspService = class DspService {
27492
27518
  }
27493
27519
  };
27494
27520
  const model = (params, pinsSettingsList, context)=>new DspService().model(params, pinsSettingsList, context);
27521
+ const modelOpenAI = (params, pinsSettingsList, context)=>new DspService().modelOpenAI(params, pinsSettingsList, context);
27522
+ const modelAzureOpenAi = (params, pinsSettingsList, context)=>new DspService().modelAzureOpenAi(params, pinsSettingsList, context);
27523
+ const modelOllama = (params, pinsSettingsList, context)=>new DspService().modelOllama(params, pinsSettingsList, context);
27495
27524
  const generate = (params, pinsSettingsList, context)=>new DspService().generate(params, pinsSettingsList, context);
27496
27525
 
27497
27526
  exports.generate = generate;
27498
27527
  exports.model = model;
27528
+ exports.modelAzureOpenAi = modelAzureOpenAi;
27529
+ exports.modelOllama = modelOllama;
27530
+ exports.modelOpenAI = modelOpenAI;
package/index.esm.js CHANGED
@@ -27442,23 +27442,49 @@ const preparePinsSettings = async (settings, context)=>{
27442
27442
  };
27443
27443
 
27444
27444
  let DspService = class DspService {
27445
- async model(params, _pinsSettingsList, context) {
27445
+ async model(params, _pinsSettingsList, _context) {
27446
27446
  const { AI } = await eval(`import('llmclient')`);
27447
- var _context_privates_OLLAMA_SERVER, _ref;
27448
- const { apiKey = 'none', modelName = 'mistral', temperature = 0, keepAlive = 0, baseUrl = (_ref = (_context_privates_OLLAMA_SERVER = context.privates.OLLAMA_SERVER) != null ? _context_privates_OLLAMA_SERVER : process.env['OLLAMA_SERVER']) != null ? _ref : 'http://localhost:11434', debug = false } = params;
27449
- const model = AI('openai', {
27447
+ const { name, options } = params;
27448
+ const modelInstance = AI(name, options);
27449
+ return modelInstance;
27450
+ }
27451
+ async modelOpenAI(params, _pinsSettingsList, context) {
27452
+ const { OpenAI } = await eval(`import('llmclient')`);
27453
+ var _context_privates_OPENAI_API_KEY, _context_privates_OPENAI_SERVER;
27454
+ const { apiKey = (_context_privates_OPENAI_API_KEY = context.privates.OPENAI_API_KEY) != null ? _context_privates_OPENAI_API_KEY : process.env['OPENAI_API_KEY'], apiURL = (_context_privates_OPENAI_SERVER = context.privates.OPENAI_SERVER) != null ? _context_privates_OPENAI_SERVER : process.env['OPENAI_SERVER'], config, options } = params;
27455
+ const modelInstance = OpenAI({
27450
27456
  apiKey,
27451
- apiURL: baseUrl + '/v1',
27452
- config: {
27453
- model: modelName,
27454
- temperature,
27455
- keepAlive
27456
- },
27457
- options: {
27458
- debug
27459
- }
27457
+ apiURL,
27458
+ config,
27459
+ options
27460
+ });
27461
+ return modelInstance;
27462
+ }
27463
+ async modelAzureOpenAi(params, _pinsSettingsList, context) {
27464
+ const { AzureOpenAi } = await eval(`import('llmclient')`);
27465
+ var _context_privates_AZURE_OPENAI_API_KEY, _context_privates_AZURE_OPENAI_API_INSTANCE_NAME, _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME, _context_privates_AZURE_OPENAI_API_VERSION;
27466
+ const { apiKey = (_context_privates_AZURE_OPENAI_API_KEY = context.privates.AZURE_OPENAI_API_KEY) != null ? _context_privates_AZURE_OPENAI_API_KEY : process.env['AZURE_OPENAI_API_KEY'], resourceName = (_context_privates_AZURE_OPENAI_API_INSTANCE_NAME = context.privates.AZURE_OPENAI_API_INSTANCE_NAME) != null ? _context_privates_AZURE_OPENAI_API_INSTANCE_NAME : process.env['AZURE_OPENAI_API_INSTANCE_NAME'], deploymentName = (_context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME = context.privates.AZURE_OPENAI_API_DEPLOYMENT_NAME) != null ? _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME : process.env['AZURE_OPENAI_API_DEPLOYMENT_NAME'], version = (_context_privates_AZURE_OPENAI_API_VERSION = context.privates.AZURE_OPENAI_API_VERSION) != null ? _context_privates_AZURE_OPENAI_API_VERSION : process.env['AZURE_OPENAI_API_VERSION'], config, options } = params;
27467
+ const modelInstance = AzureOpenAi({
27468
+ apiKey,
27469
+ resourceName,
27470
+ deploymentName,
27471
+ version,
27472
+ config,
27473
+ options
27474
+ });
27475
+ return modelInstance;
27476
+ }
27477
+ async modelOllama(params, _pinsSettingsList, context) {
27478
+ const { Ollama } = await eval(`import('llmclient')`);
27479
+ const { model, url = context.privates.OLLAMA_SERVER ? context.privates.OLLAMA_SERVER + '/v1' : process.env['OLLAMA_SERVER'] ? process.env['OLLAMA_SERVER'] + '/v1' : 'http://localhost:11434/v1', apiKey, config, options } = params;
27480
+ const modelInstance = Ollama({
27481
+ model,
27482
+ url,
27483
+ apiKey,
27484
+ config,
27485
+ options
27460
27486
  });
27461
- return model;
27487
+ return modelInstance;
27462
27488
  }
27463
27489
  async generate(params, _pinsSettingsList, context) {
27464
27490
  const { Generate } = await eval(`import('llmclient')`);
@@ -27470,6 +27496,9 @@ let DspService = class DspService {
27470
27496
  }
27471
27497
  };
27472
27498
  const model = (params, pinsSettingsList, context)=>new DspService().model(params, pinsSettingsList, context);
27499
+ const modelOpenAI = (params, pinsSettingsList, context)=>new DspService().modelOpenAI(params, pinsSettingsList, context);
27500
+ const modelAzureOpenAi = (params, pinsSettingsList, context)=>new DspService().modelAzureOpenAi(params, pinsSettingsList, context);
27501
+ const modelOllama = (params, pinsSettingsList, context)=>new DspService().modelOllama(params, pinsSettingsList, context);
27473
27502
  const generate = (params, pinsSettingsList, context)=>new DspService().generate(params, pinsSettingsList, context);
27474
27503
 
27475
- export { generate, model };
27504
+ export { generate, model, modelAzureOpenAi, modelOllama, modelOpenAI };
@@ -1,3 +1,6 @@
1
1
  import { PinsSettings } from '@digipair/engine';
2
2
  export declare const model: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
3
+ export declare const modelOpenAI: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
4
+ export declare const modelAzureOpenAi: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
5
+ export declare const modelOllama: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
3
6
  export declare const generate: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@digipair/skill-dsp",
3
- "version": "0.8.23",
3
+ "version": "0.8.24",
4
4
  "dependencies": {},
5
5
  "main": "./index.cjs.js",
6
6
  "module": "./index.esm.js"
package/schema.json CHANGED
@@ -11,10 +11,10 @@
11
11
  "/model": {
12
12
  "post": {
13
13
  "tags": ["service"],
14
- "summary": "Modèle DSP",
14
+ "summary": "Modèle Générique",
15
15
  "parameters": [
16
16
  {
17
- "name": "modelName",
17
+ "name": "name",
18
18
  "summary": "Nom du model",
19
19
  "required": false,
20
20
  "description": "Nom du model LLM à utiliser pour le résumé système",
@@ -23,22 +23,175 @@
23
23
  }
24
24
  },
25
25
  {
26
- "name": "temperature",
27
- "summary": "Temperature",
26
+ "name": "options",
27
+ "summary": "Options",
28
28
  "required": false,
29
- "description": "Temperature du model LLM",
29
+ "description": "Options du modèle LLM",
30
30
  "schema": {
31
- "type": "number"
31
+ "type": "object"
32
+ }
33
+ }
34
+ ],
35
+ "x-events": []
36
+ }
37
+ },
38
+ "/modelOpenAi": {
39
+ "post": {
40
+ "tags": ["service"],
41
+ "summary": "Modèle Générique",
42
+ "parameters": [
43
+ {
44
+ "name": "apiKey",
45
+ "summary": "Api Key",
46
+ "required": false,
47
+ "description": "Api Key OpenAI",
48
+ "schema": {
49
+ "type": "string"
50
+ }
51
+ },
52
+ {
53
+ "name": "apiURL",
54
+ "summary": "Adresse du serveur",
55
+ "required": false,
56
+ "description": "Adresse du serveur OpenAI",
57
+ "schema": {
58
+ "type": "object"
59
+ }
60
+ },
61
+ {
62
+ "name": "config",
63
+ "summary": "Configuration",
64
+ "required": false,
65
+ "description": "Configuration du modèle OpenAI",
66
+ "schema": {
67
+ "type": "object"
68
+ }
69
+ },
70
+ {
71
+ "name": "options",
72
+ "summary": "Options",
73
+ "required": false,
74
+ "description": "Options du modèle OpenAI",
75
+ "schema": {
76
+ "type": "object"
77
+ }
78
+ }
79
+ ],
80
+ "x-events": []
81
+ }
82
+ },
83
+ "/modelAzureOpenAi": {
84
+ "post": {
85
+ "tags": ["service"],
86
+ "summary": "Modèle Générique",
87
+ "parameters": [
88
+ {
89
+ "name": "apiKey",
90
+ "summary": "Api Key",
91
+ "required": false,
92
+ "description": "Api Key Azure OpenAI",
93
+ "schema": {
94
+ "type": "string"
95
+ }
96
+ },
97
+ {
98
+ "name": "resourceName",
99
+ "summary": "Nom de la ressource",
100
+ "required": false,
101
+ "description": "Nom de la ressource Azure OpenAI",
102
+ "schema": {
103
+ "type": "string"
32
104
  }
33
105
  },
34
106
  {
35
- "name": "baseUrl",
36
- "summary": "Url du serveur LLM",
107
+ "name": "deploymentName",
108
+ "summary": "Nom du déploiement",
37
109
  "required": false,
38
- "description": "Url du serveur LLM compatible Openai",
110
+ "description": "Nom du déploiement Azure OpenAI",
39
111
  "schema": {
40
112
  "type": "string"
41
113
  }
114
+ },
115
+ {
116
+ "name": "version",
117
+ "summary": "Version",
118
+ "required": false,
119
+ "description": "Version d'API OpenAI",
120
+ "schema": {
121
+ "type": "string"
122
+ }
123
+ },
124
+ {
125
+ "name": "config",
126
+ "summary": "Configuration",
127
+ "required": false,
128
+ "description": "Configuration du modèle OpenAI",
129
+ "schema": {
130
+ "type": "object"
131
+ }
132
+ },
133
+ {
134
+ "name": "options",
135
+ "summary": "Options",
136
+ "required": false,
137
+ "description": "Options du modèle OpenAI",
138
+ "schema": {
139
+ "type": "object"
140
+ }
141
+ }
142
+ ],
143
+ "x-events": []
144
+ }
145
+ },
146
+ "/modelOllama": {
147
+ "post": {
148
+ "tags": ["service"],
149
+ "summary": "Modèle Générique",
150
+ "parameters": [
151
+ {
152
+ "name": "model",
153
+ "summary": "Modèle",
154
+ "required": false,
155
+ "description": "Nom du modèle Ollama à utiliser pour la génération",
156
+ "schema": {
157
+ "type": "string"
158
+ }
159
+ },
160
+ {
161
+ "name": "url",
162
+ "summary": "Adresse du serveur",
163
+ "required": false,
164
+ "description": "Addresse du serveur Ollama à utiliser pour la génération",
165
+ "schema": {
166
+ "type": "string"
167
+ }
168
+ },
169
+ {
170
+ "name": "apiKey",
171
+ "summary": "Api Key",
172
+ "required": false,
173
+ "description": "Api Key Ollama",
174
+ "schema": {
175
+ "type": "string"
176
+ }
177
+ },
178
+ {
179
+ "name": "config",
180
+ "summary": "Configuration",
181
+ "required": false,
182
+ "description": "Configuration du modèle Ollama",
183
+ "schema": {
184
+ "type": "object"
185
+ }
186
+ },
187
+ {
188
+ "name": "options",
189
+ "summary": "Options",
190
+ "required": false,
191
+ "description": "Options du modèle Ollama",
192
+ "schema": {
193
+ "type": "object"
194
+ }
42
195
  }
43
196
  ],
44
197
  "x-events": []
@@ -47,13 +200,13 @@
47
200
  "/generate": {
48
201
  "post": {
49
202
  "tags": ["service"],
50
- "summary": "generate",
203
+ "summary": "Génération DSP",
51
204
  "parameters": [
52
205
  {
53
206
  "name": "model",
54
- "summary": "Modèle DSP",
207
+ "summary": "Modèle",
55
208
  "required": false,
56
- "description": "Modèle DSP à utiliser pour la génération",
209
+ "description": "Modèle LLM à utiliser pour la génération",
57
210
  "schema": {
58
211
  "type": "string"
59
212
  }