@digipair/skill-dsp 0.8.22 → 0.8.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs.js +51 -20
- package/index.esm.js +49 -21
- package/libs/skill-dsp/src/lib/skill-dsp.d.ts +3 -0
- package/package.json +1 -1
- package/schema.json +165 -12
package/index.cjs.js
CHANGED
|
@@ -23510,14 +23510,14 @@ function indent(str, spaces) {
|
|
|
23510
23510
|
var match = parseIdentifier(input, i1, namePart) || namePart && parseAdditionalSymbol(input, i1) || maybeSpace && parseSpaces(input, i1);
|
|
23511
23511
|
// match is required
|
|
23512
23512
|
if (!match) {
|
|
23513
|
-
return
|
|
23513
|
+
return tokens = tokens1, nextMatch = nextMatch1, i = i1, {
|
|
23514
23514
|
v: nextMatch1
|
|
23515
23515
|
};
|
|
23516
23516
|
}
|
|
23517
23517
|
var token = match.token, offset = match.offset;
|
|
23518
23518
|
i1 += offset;
|
|
23519
23519
|
if (token === " ") {
|
|
23520
|
-
return
|
|
23520
|
+
return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
|
|
23521
23521
|
}
|
|
23522
23522
|
tokens1 = _to_consumable_array$1(tokens1).concat([
|
|
23523
23523
|
token
|
|
@@ -23536,7 +23536,7 @@ function indent(str, spaces) {
|
|
|
23536
23536
|
if (contextKeys.some(function(el) {
|
|
23537
23537
|
return el.startsWith(name);
|
|
23538
23538
|
})) {
|
|
23539
|
-
return
|
|
23539
|
+
return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
|
|
23540
23540
|
}
|
|
23541
23541
|
if (dateTimeIdentifiers.some(function(el) {
|
|
23542
23542
|
return el === name;
|
|
@@ -23555,9 +23555,9 @@ function indent(str, spaces) {
|
|
|
23555
23555
|
if (dateTimeIdentifiers.some(function(el) {
|
|
23556
23556
|
return el.startsWith(name);
|
|
23557
23557
|
})) {
|
|
23558
|
-
return
|
|
23558
|
+
return tokens = tokens1, nextMatch = nextMatch1, i = i1, "continue";
|
|
23559
23559
|
}
|
|
23560
|
-
return
|
|
23560
|
+
return tokens = tokens1, nextMatch = nextMatch1, i = i1, {
|
|
23561
23561
|
v: nextMatch1
|
|
23562
23562
|
};
|
|
23563
23563
|
};
|
|
@@ -27463,29 +27463,54 @@ const preparePinsSettings = async (settings, context)=>{
|
|
|
27463
27463
|
});
|
|
27464
27464
|
};
|
|
27465
27465
|
|
|
27466
|
-
var _process_env_OLLAMA_SERVER;
|
|
27467
|
-
const OLLAMA_SERVER = (_process_env_OLLAMA_SERVER = process.env['OLLAMA_SERVER']) != null ? _process_env_OLLAMA_SERVER : 'http://localhost:11434';
|
|
27468
27466
|
let DspService = class DspService {
|
|
27469
27467
|
async model(params, _pinsSettingsList, _context) {
|
|
27470
27468
|
const { AI } = await eval(`import('llmclient')`);
|
|
27471
|
-
const {
|
|
27472
|
-
const
|
|
27469
|
+
const { name, options } = params;
|
|
27470
|
+
const modelInstance = AI(name, options);
|
|
27471
|
+
return modelInstance;
|
|
27472
|
+
}
|
|
27473
|
+
async modelOpenAI(params, _pinsSettingsList, context) {
|
|
27474
|
+
const { OpenAI } = await eval(`import('llmclient')`);
|
|
27475
|
+
var _context_privates_OPENAI_API_KEY, _context_privates_OPENAI_SERVER;
|
|
27476
|
+
const { apiKey = (_context_privates_OPENAI_API_KEY = context.privates.OPENAI_API_KEY) != null ? _context_privates_OPENAI_API_KEY : process.env['OPENAI_API_KEY'], apiURL = (_context_privates_OPENAI_SERVER = context.privates.OPENAI_SERVER) != null ? _context_privates_OPENAI_SERVER : process.env['OPENAI_SERVER'], config, options } = params;
|
|
27477
|
+
const modelInstance = OpenAI({
|
|
27473
27478
|
apiKey,
|
|
27474
|
-
apiURL
|
|
27475
|
-
config
|
|
27476
|
-
|
|
27477
|
-
|
|
27478
|
-
|
|
27479
|
-
|
|
27480
|
-
|
|
27481
|
-
|
|
27482
|
-
|
|
27479
|
+
apiURL,
|
|
27480
|
+
config,
|
|
27481
|
+
options
|
|
27482
|
+
});
|
|
27483
|
+
return modelInstance;
|
|
27484
|
+
}
|
|
27485
|
+
async modelAzureOpenAi(params, _pinsSettingsList, context) {
|
|
27486
|
+
const { AzureOpenAi } = await eval(`import('llmclient')`);
|
|
27487
|
+
var _context_privates_AZURE_OPENAI_API_KEY, _context_privates_AZURE_OPENAI_API_INSTANCE_NAME, _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME, _context_privates_AZURE_OPENAI_API_VERSION;
|
|
27488
|
+
const { apiKey = (_context_privates_AZURE_OPENAI_API_KEY = context.privates.AZURE_OPENAI_API_KEY) != null ? _context_privates_AZURE_OPENAI_API_KEY : process.env['AZURE_OPENAI_API_KEY'], resourceName = (_context_privates_AZURE_OPENAI_API_INSTANCE_NAME = context.privates.AZURE_OPENAI_API_INSTANCE_NAME) != null ? _context_privates_AZURE_OPENAI_API_INSTANCE_NAME : process.env['AZURE_OPENAI_API_INSTANCE_NAME'], deploymentName = (_context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME = context.privates.AZURE_OPENAI_API_DEPLOYMENT_NAME) != null ? _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME : process.env['AZURE_OPENAI_API_DEPLOYMENT_NAME'], version = (_context_privates_AZURE_OPENAI_API_VERSION = context.privates.AZURE_OPENAI_API_VERSION) != null ? _context_privates_AZURE_OPENAI_API_VERSION : process.env['AZURE_OPENAI_API_VERSION'], config, options } = params;
|
|
27489
|
+
const modelInstance = AzureOpenAi({
|
|
27490
|
+
apiKey,
|
|
27491
|
+
resourceName,
|
|
27492
|
+
deploymentName,
|
|
27493
|
+
version,
|
|
27494
|
+
config,
|
|
27495
|
+
options
|
|
27496
|
+
});
|
|
27497
|
+
return modelInstance;
|
|
27498
|
+
}
|
|
27499
|
+
async modelOllama(params, _pinsSettingsList, context) {
|
|
27500
|
+
const { Ollama } = await eval(`import('llmclient')`);
|
|
27501
|
+
const { model, url = context.privates.OLLAMA_SERVER ? context.privates.OLLAMA_SERVER + '/v1' : process.env['OLLAMA_SERVER'] ? process.env['OLLAMA_SERVER'] + '/v1' : 'http://localhost:11434/v1', apiKey, config, options } = params;
|
|
27502
|
+
const modelInstance = Ollama({
|
|
27503
|
+
model,
|
|
27504
|
+
url,
|
|
27505
|
+
apiKey,
|
|
27506
|
+
config,
|
|
27507
|
+
options
|
|
27483
27508
|
});
|
|
27484
|
-
return
|
|
27509
|
+
return modelInstance;
|
|
27485
27510
|
}
|
|
27486
27511
|
async generate(params, _pinsSettingsList, context) {
|
|
27487
27512
|
const { Generate } = await eval(`import('llmclient')`);
|
|
27488
|
-
const { model = context.privates.
|
|
27513
|
+
const { model = context.privates.MODEL_DSP, signature, input } = params;
|
|
27489
27514
|
const modelInstance = await executePinsList(model, context);
|
|
27490
27515
|
const gen = new Generate(modelInstance, signature);
|
|
27491
27516
|
const result = await gen.forward(input);
|
|
@@ -27493,7 +27518,13 @@ let DspService = class DspService {
|
|
|
27493
27518
|
}
|
|
27494
27519
|
};
|
|
27495
27520
|
const model = (params, pinsSettingsList, context)=>new DspService().model(params, pinsSettingsList, context);
|
|
27521
|
+
const modelOpenAI = (params, pinsSettingsList, context)=>new DspService().modelOpenAI(params, pinsSettingsList, context);
|
|
27522
|
+
const modelAzureOpenAi = (params, pinsSettingsList, context)=>new DspService().modelAzureOpenAi(params, pinsSettingsList, context);
|
|
27523
|
+
const modelOllama = (params, pinsSettingsList, context)=>new DspService().modelOllama(params, pinsSettingsList, context);
|
|
27496
27524
|
const generate = (params, pinsSettingsList, context)=>new DspService().generate(params, pinsSettingsList, context);
|
|
27497
27525
|
|
|
27498
27526
|
exports.generate = generate;
|
|
27499
27527
|
exports.model = model;
|
|
27528
|
+
exports.modelAzureOpenAi = modelAzureOpenAi;
|
|
27529
|
+
exports.modelOllama = modelOllama;
|
|
27530
|
+
exports.modelOpenAI = modelOpenAI;
|
package/index.esm.js
CHANGED
|
@@ -23488,14 +23488,14 @@ function indent(str, spaces) {
|
|
|
23488
23488
|
var match = parseIdentifier(input, i1, namePart) || namePart && parseAdditionalSymbol(input, i1) || maybeSpace && parseSpaces(input, i1);
|
|
23489
23489
|
// match is required
|
|
23490
23490
|
if (!match) {
|
|
23491
|
-
return
|
|
23491
|
+
return nextMatch = nextMatch1, i = i1, tokens = tokens1, {
|
|
23492
23492
|
v: nextMatch1
|
|
23493
23493
|
};
|
|
23494
23494
|
}
|
|
23495
23495
|
var token = match.token, offset = match.offset;
|
|
23496
23496
|
i1 += offset;
|
|
23497
23497
|
if (token === " ") {
|
|
23498
|
-
return
|
|
23498
|
+
return nextMatch = nextMatch1, i = i1, tokens = tokens1, "continue";
|
|
23499
23499
|
}
|
|
23500
23500
|
tokens1 = _to_consumable_array$1(tokens1).concat([
|
|
23501
23501
|
token
|
|
@@ -23514,7 +23514,7 @@ function indent(str, spaces) {
|
|
|
23514
23514
|
if (contextKeys.some(function(el) {
|
|
23515
23515
|
return el.startsWith(name);
|
|
23516
23516
|
})) {
|
|
23517
|
-
return
|
|
23517
|
+
return nextMatch = nextMatch1, i = i1, tokens = tokens1, "continue";
|
|
23518
23518
|
}
|
|
23519
23519
|
if (dateTimeIdentifiers.some(function(el) {
|
|
23520
23520
|
return el === name;
|
|
@@ -23533,9 +23533,9 @@ function indent(str, spaces) {
|
|
|
23533
23533
|
if (dateTimeIdentifiers.some(function(el) {
|
|
23534
23534
|
return el.startsWith(name);
|
|
23535
23535
|
})) {
|
|
23536
|
-
return
|
|
23536
|
+
return nextMatch = nextMatch1, i = i1, tokens = tokens1, "continue";
|
|
23537
23537
|
}
|
|
23538
|
-
return
|
|
23538
|
+
return nextMatch = nextMatch1, i = i1, tokens = tokens1, {
|
|
23539
23539
|
v: nextMatch1
|
|
23540
23540
|
};
|
|
23541
23541
|
};
|
|
@@ -27441,29 +27441,54 @@ const preparePinsSettings = async (settings, context)=>{
|
|
|
27441
27441
|
});
|
|
27442
27442
|
};
|
|
27443
27443
|
|
|
27444
|
-
var _process_env_OLLAMA_SERVER;
|
|
27445
|
-
const OLLAMA_SERVER = (_process_env_OLLAMA_SERVER = process.env['OLLAMA_SERVER']) != null ? _process_env_OLLAMA_SERVER : 'http://localhost:11434';
|
|
27446
27444
|
let DspService = class DspService {
|
|
27447
27445
|
async model(params, _pinsSettingsList, _context) {
|
|
27448
27446
|
const { AI } = await eval(`import('llmclient')`);
|
|
27449
|
-
const {
|
|
27450
|
-
const
|
|
27447
|
+
const { name, options } = params;
|
|
27448
|
+
const modelInstance = AI(name, options);
|
|
27449
|
+
return modelInstance;
|
|
27450
|
+
}
|
|
27451
|
+
async modelOpenAI(params, _pinsSettingsList, context) {
|
|
27452
|
+
const { OpenAI } = await eval(`import('llmclient')`);
|
|
27453
|
+
var _context_privates_OPENAI_API_KEY, _context_privates_OPENAI_SERVER;
|
|
27454
|
+
const { apiKey = (_context_privates_OPENAI_API_KEY = context.privates.OPENAI_API_KEY) != null ? _context_privates_OPENAI_API_KEY : process.env['OPENAI_API_KEY'], apiURL = (_context_privates_OPENAI_SERVER = context.privates.OPENAI_SERVER) != null ? _context_privates_OPENAI_SERVER : process.env['OPENAI_SERVER'], config, options } = params;
|
|
27455
|
+
const modelInstance = OpenAI({
|
|
27451
27456
|
apiKey,
|
|
27452
|
-
apiURL
|
|
27453
|
-
config
|
|
27454
|
-
|
|
27455
|
-
|
|
27456
|
-
|
|
27457
|
-
|
|
27458
|
-
|
|
27459
|
-
|
|
27460
|
-
|
|
27457
|
+
apiURL,
|
|
27458
|
+
config,
|
|
27459
|
+
options
|
|
27460
|
+
});
|
|
27461
|
+
return modelInstance;
|
|
27462
|
+
}
|
|
27463
|
+
async modelAzureOpenAi(params, _pinsSettingsList, context) {
|
|
27464
|
+
const { AzureOpenAi } = await eval(`import('llmclient')`);
|
|
27465
|
+
var _context_privates_AZURE_OPENAI_API_KEY, _context_privates_AZURE_OPENAI_API_INSTANCE_NAME, _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME, _context_privates_AZURE_OPENAI_API_VERSION;
|
|
27466
|
+
const { apiKey = (_context_privates_AZURE_OPENAI_API_KEY = context.privates.AZURE_OPENAI_API_KEY) != null ? _context_privates_AZURE_OPENAI_API_KEY : process.env['AZURE_OPENAI_API_KEY'], resourceName = (_context_privates_AZURE_OPENAI_API_INSTANCE_NAME = context.privates.AZURE_OPENAI_API_INSTANCE_NAME) != null ? _context_privates_AZURE_OPENAI_API_INSTANCE_NAME : process.env['AZURE_OPENAI_API_INSTANCE_NAME'], deploymentName = (_context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME = context.privates.AZURE_OPENAI_API_DEPLOYMENT_NAME) != null ? _context_privates_AZURE_OPENAI_API_DEPLOYMENT_NAME : process.env['AZURE_OPENAI_API_DEPLOYMENT_NAME'], version = (_context_privates_AZURE_OPENAI_API_VERSION = context.privates.AZURE_OPENAI_API_VERSION) != null ? _context_privates_AZURE_OPENAI_API_VERSION : process.env['AZURE_OPENAI_API_VERSION'], config, options } = params;
|
|
27467
|
+
const modelInstance = AzureOpenAi({
|
|
27468
|
+
apiKey,
|
|
27469
|
+
resourceName,
|
|
27470
|
+
deploymentName,
|
|
27471
|
+
version,
|
|
27472
|
+
config,
|
|
27473
|
+
options
|
|
27474
|
+
});
|
|
27475
|
+
return modelInstance;
|
|
27476
|
+
}
|
|
27477
|
+
async modelOllama(params, _pinsSettingsList, context) {
|
|
27478
|
+
const { Ollama } = await eval(`import('llmclient')`);
|
|
27479
|
+
const { model, url = context.privates.OLLAMA_SERVER ? context.privates.OLLAMA_SERVER + '/v1' : process.env['OLLAMA_SERVER'] ? process.env['OLLAMA_SERVER'] + '/v1' : 'http://localhost:11434/v1', apiKey, config, options } = params;
|
|
27480
|
+
const modelInstance = Ollama({
|
|
27481
|
+
model,
|
|
27482
|
+
url,
|
|
27483
|
+
apiKey,
|
|
27484
|
+
config,
|
|
27485
|
+
options
|
|
27461
27486
|
});
|
|
27462
|
-
return
|
|
27487
|
+
return modelInstance;
|
|
27463
27488
|
}
|
|
27464
27489
|
async generate(params, _pinsSettingsList, context) {
|
|
27465
27490
|
const { Generate } = await eval(`import('llmclient')`);
|
|
27466
|
-
const { model = context.privates.
|
|
27491
|
+
const { model = context.privates.MODEL_DSP, signature, input } = params;
|
|
27467
27492
|
const modelInstance = await executePinsList(model, context);
|
|
27468
27493
|
const gen = new Generate(modelInstance, signature);
|
|
27469
27494
|
const result = await gen.forward(input);
|
|
@@ -27471,6 +27496,9 @@ let DspService = class DspService {
|
|
|
27471
27496
|
}
|
|
27472
27497
|
};
|
|
27473
27498
|
const model = (params, pinsSettingsList, context)=>new DspService().model(params, pinsSettingsList, context);
|
|
27499
|
+
const modelOpenAI = (params, pinsSettingsList, context)=>new DspService().modelOpenAI(params, pinsSettingsList, context);
|
|
27500
|
+
const modelAzureOpenAi = (params, pinsSettingsList, context)=>new DspService().modelAzureOpenAi(params, pinsSettingsList, context);
|
|
27501
|
+
const modelOllama = (params, pinsSettingsList, context)=>new DspService().modelOllama(params, pinsSettingsList, context);
|
|
27474
27502
|
const generate = (params, pinsSettingsList, context)=>new DspService().generate(params, pinsSettingsList, context);
|
|
27475
27503
|
|
|
27476
|
-
export { generate, model };
|
|
27504
|
+
export { generate, model, modelAzureOpenAi, modelOllama, modelOpenAI };
|
|
@@ -1,3 +1,6 @@
|
|
|
1
1
|
import { PinsSettings } from '@digipair/engine';
|
|
2
2
|
export declare const model: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
|
|
3
|
+
export declare const modelOpenAI: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
|
|
4
|
+
export declare const modelAzureOpenAi: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
|
|
5
|
+
export declare const modelOllama: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
|
|
3
6
|
export declare const generate: (params: any, pinsSettingsList: PinsSettings[], context: any) => Promise<any>;
|
package/package.json
CHANGED
package/schema.json
CHANGED
|
@@ -11,10 +11,10 @@
|
|
|
11
11
|
"/model": {
|
|
12
12
|
"post": {
|
|
13
13
|
"tags": ["service"],
|
|
14
|
-
"summary": "Modèle
|
|
14
|
+
"summary": "Modèle Générique",
|
|
15
15
|
"parameters": [
|
|
16
16
|
{
|
|
17
|
-
"name": "
|
|
17
|
+
"name": "name",
|
|
18
18
|
"summary": "Nom du model",
|
|
19
19
|
"required": false,
|
|
20
20
|
"description": "Nom du model LLM à utiliser pour le résumé système",
|
|
@@ -23,22 +23,175 @@
|
|
|
23
23
|
}
|
|
24
24
|
},
|
|
25
25
|
{
|
|
26
|
-
"name": "
|
|
27
|
-
"summary": "
|
|
26
|
+
"name": "options",
|
|
27
|
+
"summary": "Options",
|
|
28
28
|
"required": false,
|
|
29
|
-
"description": "
|
|
29
|
+
"description": "Options du modèle LLM",
|
|
30
30
|
"schema": {
|
|
31
|
-
"type": "
|
|
31
|
+
"type": "object"
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
],
|
|
35
|
+
"x-events": []
|
|
36
|
+
}
|
|
37
|
+
},
|
|
38
|
+
"/modelOpenAi": {
|
|
39
|
+
"post": {
|
|
40
|
+
"tags": ["service"],
|
|
41
|
+
"summary": "Modèle Générique",
|
|
42
|
+
"parameters": [
|
|
43
|
+
{
|
|
44
|
+
"name": "apiKey",
|
|
45
|
+
"summary": "Api Key",
|
|
46
|
+
"required": false,
|
|
47
|
+
"description": "Api Key OpenAI",
|
|
48
|
+
"schema": {
|
|
49
|
+
"type": "string"
|
|
50
|
+
}
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
"name": "apiURL",
|
|
54
|
+
"summary": "Adresse du serveur",
|
|
55
|
+
"required": false,
|
|
56
|
+
"description": "Adresse du serveur OpenAI",
|
|
57
|
+
"schema": {
|
|
58
|
+
"type": "object"
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
"name": "config",
|
|
63
|
+
"summary": "Configuration",
|
|
64
|
+
"required": false,
|
|
65
|
+
"description": "Configuration du modèle OpenAI",
|
|
66
|
+
"schema": {
|
|
67
|
+
"type": "object"
|
|
68
|
+
}
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"name": "options",
|
|
72
|
+
"summary": "Options",
|
|
73
|
+
"required": false,
|
|
74
|
+
"description": "Options du modèle OpenAI",
|
|
75
|
+
"schema": {
|
|
76
|
+
"type": "object"
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
],
|
|
80
|
+
"x-events": []
|
|
81
|
+
}
|
|
82
|
+
},
|
|
83
|
+
"/modelAzureOpenAi": {
|
|
84
|
+
"post": {
|
|
85
|
+
"tags": ["service"],
|
|
86
|
+
"summary": "Modèle Générique",
|
|
87
|
+
"parameters": [
|
|
88
|
+
{
|
|
89
|
+
"name": "apiKey",
|
|
90
|
+
"summary": "Api Key",
|
|
91
|
+
"required": false,
|
|
92
|
+
"description": "Api Key Azure OpenAI",
|
|
93
|
+
"schema": {
|
|
94
|
+
"type": "string"
|
|
95
|
+
}
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
"name": "resourceName",
|
|
99
|
+
"summary": "Nom de la ressource",
|
|
100
|
+
"required": false,
|
|
101
|
+
"description": "Nom de la ressource Azure OpenAI",
|
|
102
|
+
"schema": {
|
|
103
|
+
"type": "string"
|
|
32
104
|
}
|
|
33
105
|
},
|
|
34
106
|
{
|
|
35
|
-
"name": "
|
|
36
|
-
"summary": "
|
|
107
|
+
"name": "deploymentName",
|
|
108
|
+
"summary": "Nom du déploiement",
|
|
37
109
|
"required": false,
|
|
38
|
-
"description": "
|
|
110
|
+
"description": "Nom du déploiement Azure OpenAI",
|
|
39
111
|
"schema": {
|
|
40
112
|
"type": "string"
|
|
41
113
|
}
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
"name": "version",
|
|
117
|
+
"summary": "Version",
|
|
118
|
+
"required": false,
|
|
119
|
+
"description": "Version d'API OpenAI",
|
|
120
|
+
"schema": {
|
|
121
|
+
"type": "string"
|
|
122
|
+
}
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
"name": "config",
|
|
126
|
+
"summary": "Configuration",
|
|
127
|
+
"required": false,
|
|
128
|
+
"description": "Configuration du modèle OpenAI",
|
|
129
|
+
"schema": {
|
|
130
|
+
"type": "object"
|
|
131
|
+
}
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
"name": "options",
|
|
135
|
+
"summary": "Options",
|
|
136
|
+
"required": false,
|
|
137
|
+
"description": "Options du modèle OpenAI",
|
|
138
|
+
"schema": {
|
|
139
|
+
"type": "object"
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
],
|
|
143
|
+
"x-events": []
|
|
144
|
+
}
|
|
145
|
+
},
|
|
146
|
+
"/modelOllama": {
|
|
147
|
+
"post": {
|
|
148
|
+
"tags": ["service"],
|
|
149
|
+
"summary": "Modèle Générique",
|
|
150
|
+
"parameters": [
|
|
151
|
+
{
|
|
152
|
+
"name": "model",
|
|
153
|
+
"summary": "Modèle",
|
|
154
|
+
"required": false,
|
|
155
|
+
"description": "Nom du modèle Ollama à utiliser pour la génération",
|
|
156
|
+
"schema": {
|
|
157
|
+
"type": "string"
|
|
158
|
+
}
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
"name": "url",
|
|
162
|
+
"summary": "Adresse du serveur",
|
|
163
|
+
"required": false,
|
|
164
|
+
"description": "Addresse du serveur Ollama à utiliser pour la génération",
|
|
165
|
+
"schema": {
|
|
166
|
+
"type": "string"
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
{
|
|
170
|
+
"name": "apiKey",
|
|
171
|
+
"summary": "Api Key",
|
|
172
|
+
"required": false,
|
|
173
|
+
"description": "Api Key Ollama",
|
|
174
|
+
"schema": {
|
|
175
|
+
"type": "string"
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
"name": "config",
|
|
180
|
+
"summary": "Configuration",
|
|
181
|
+
"required": false,
|
|
182
|
+
"description": "Configuration du modèle Ollama",
|
|
183
|
+
"schema": {
|
|
184
|
+
"type": "object"
|
|
185
|
+
}
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
"name": "options",
|
|
189
|
+
"summary": "Options",
|
|
190
|
+
"required": false,
|
|
191
|
+
"description": "Options du modèle Ollama",
|
|
192
|
+
"schema": {
|
|
193
|
+
"type": "object"
|
|
194
|
+
}
|
|
42
195
|
}
|
|
43
196
|
],
|
|
44
197
|
"x-events": []
|
|
@@ -47,13 +200,13 @@
|
|
|
47
200
|
"/generate": {
|
|
48
201
|
"post": {
|
|
49
202
|
"tags": ["service"],
|
|
50
|
-
"summary": "
|
|
203
|
+
"summary": "Génération DSP",
|
|
51
204
|
"parameters": [
|
|
52
205
|
{
|
|
53
206
|
"name": "model",
|
|
54
|
-
"summary": "Modèle
|
|
207
|
+
"summary": "Modèle",
|
|
55
208
|
"required": false,
|
|
56
|
-
"description": "Modèle
|
|
209
|
+
"description": "Modèle LLM à utiliser pour la génération",
|
|
57
210
|
"schema": {
|
|
58
211
|
"type": "string"
|
|
59
212
|
}
|