@saltcorn/large-language-model 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/generate.js CHANGED
@@ -1,4 +1,4 @@
1
- const axios = require("axios");
1
+ const fetch = require("node-fetch");
2
2
  const util = require("util");
3
3
  const exec = util.promisify(require("child_process").exec);
4
4
  const db = require("@saltcorn/data/db");
@@ -21,10 +21,10 @@ const getCompletion = async (config, opts) => {
21
21
  bearer: config.bearer,
22
22
  model: config.model,
23
23
  },
24
-
25
24
  opts
26
25
  );
27
26
  case "Local llama.cpp":
27
+ //TODO only check if unsafe plugins not allowed
28
28
  const isRoot = db.getTenantSchema() === db.connectObj.default_schema;
29
29
  if (!isRoot)
30
30
  throw new Error(
@@ -52,12 +52,10 @@ const getCompletionOpenAICompatible = async (
52
52
  ) => {
53
53
  const headers = {
54
54
  "Content-Type": "application/json",
55
+ Accept: "application/json",
55
56
  };
56
57
  if (bearer) headers.Authorization = "Bearer " + bearer;
57
- const client = axios.create({
58
- headers,
59
- });
60
- const params = {
58
+ const body = {
61
59
  //prompt: "How are you?",
62
60
  model,
63
61
  messages: [
@@ -69,9 +67,15 @@ const getCompletionOpenAICompatible = async (
69
67
  ],
70
68
  temperature: temperature || 0.7,
71
69
  };
70
+ const rawResponse = await fetch(chatCompleteEndpoint, {
71
+ method: "POST",
72
+ headers,
73
+ body: JSON.stringify(body),
74
+ });
75
+ const results = await rawResponse.json();
76
+ console.log(results);
72
77
 
73
- const results = await client.post(chatCompleteEndpoint, params);
74
- return results?.data?.choices?.[0]?.message?.content;
78
+ return results?.choices?.[0]?.message?.content;
75
79
  };
76
80
 
77
81
  module.exports = { getCompletion };
package/index.js CHANGED
@@ -98,8 +98,10 @@ const functions = (config) => ({
98
98
  arguments: [{ name: "prompt", type: "String" }],
99
99
  },
100
100
  });
101
+
101
102
  module.exports = {
102
103
  sc_plugin_api_version: 1,
103
104
  configuration_workflow,
104
105
  functions,
106
+ modelpatterns: require("./model.js"),
105
107
  };
package/model.js ADDED
@@ -0,0 +1,149 @@
1
+ const { div } = require("@saltcorn/markup/tags");
2
+
3
+ const Workflow = require("@saltcorn/data/models/workflow");
4
+ const Table = require("@saltcorn/data/models/table");
5
+ const Form = require("@saltcorn/data/models/form");
6
+
7
+ const util = require("util");
8
+ const path = require("path");
9
+ const os = require("os");
10
+ const fs = require("fs");
11
+
12
+ const _ = require("underscore");
13
+
14
+ const { getCompletion } = require("./generate");
15
+
16
+ const configuration_workflow = (config) => (req) =>
17
+ new Workflow({
18
+ steps: [
19
+ {
20
+ name: "Predictors",
21
+ form: async (context) => {
22
+ const table = await Table.findOne(
23
+ context.table_id
24
+ ? { id: context.table_id }
25
+ : { name: context.exttable_name }
26
+ );
27
+ //console.log(context);
28
+ const int_field_options = table.fields.filter(
29
+ (f) => f.type?.name === "Integer"
30
+ );
31
+ let models = [];
32
+ if (config.backend === "Local llama.cpp") {
33
+ models = fs.readdirSync(path.join(config.llama_dir, "models"));
34
+ } else if (config.backend === "OpenAI") {
35
+ models = [
36
+ "gpt-3.5-turbo",
37
+ "gpt-3.5-turbo-16k",
38
+ "gpt-4",
39
+ "gpt-4-32k",
40
+ ];
41
+ }
42
+ return new Form({
43
+ fields: [
44
+ {
45
+ label: "Prompt template",
46
+ name: "prompt_template",
47
+ type: "String",
48
+ fieldview: "textarea",
49
+ sublabel: div(
50
+ "Use handlebars to access fields. Example: <code>My name is {{name}}. How are you?</code>. Variables in scope: " +
51
+ table.fields.map((f) => `<code>${f.name}</code>`).join(", ")
52
+ ),
53
+ },
54
+ ...(config.backend === "Local llama.cpp"
55
+ ? [
56
+ {
57
+ label: "Num. tokens field",
58
+ name: "ntokens_field",
59
+ type: "String",
60
+ attributes: {
61
+ options: int_field_options.map((f) => f.name),
62
+ },
63
+ sublabel:
64
+ "Override number of tokens set in instance parameters with value in this field, if chosen",
65
+ },
66
+ ]
67
+ : []),
68
+ {
69
+ label: "Model",
70
+ name: "model",
71
+ type: "String",
72
+ required: true,
73
+ attributes: { options: models },
74
+ },
75
+ ],
76
+ });
77
+ },
78
+ },
79
+ ],
80
+ });
81
+
82
+ const modelpatterns = (config) => ({
83
+ LargeLanguageModel: {
84
+ prediction_outputs: ({ configuration }) => [
85
+ { name: "output", type: "String" },
86
+ { name: "prompt", type: "String" },
87
+ ],
88
+ configuration_workflow: configuration_workflow(config),
89
+ hyperparameter_fields: ({ table, configuration }) => [
90
+ ...(config.backend === "Local llama.cpp"
91
+ ? [
92
+ {
93
+ name: "ntokens",
94
+ label: "Num tokens",
95
+ type: "Integer",
96
+ attributes: { min: 1 },
97
+ required: true,
98
+ default: 128,
99
+ sublabel: "Can be overridden by number of tokens field, if set",
100
+ },
101
+ {
102
+ name: "repeat_penalty",
103
+ label: "Repeat penalty",
104
+ type: "Float",
105
+ attributes: { min: 0 },
106
+ default: 1.1,
107
+ },
108
+ ]
109
+ : []),
110
+ {
111
+ name: "temp",
112
+ label: "Temperature",
113
+ type: "Float",
114
+ attributes: { min: 0 },
115
+ default: 0.8,
116
+ },
117
+ ],
118
+ predict: async ({
119
+ id, //instance id
120
+ model: {
121
+ configuration: { prompt_template, ntokens_field, model },
122
+ table_id,
123
+ },
124
+ hyperparameters,
125
+ fit_object,
126
+ rows,
127
+ }) => {
128
+ const results = [];
129
+ const template = _.template(prompt_template || "", {
130
+ evaluate: /\{\{#(.+?)\}\}/g,
131
+ interpolate: /\{\{([^#].+?)\}\}/g,
132
+ });
133
+ const mdlConfig = { ...config };
134
+ if (hyperparameters.temp) mdlConfig.temperature = hyperparameters.temp;
135
+ const opts = { ...hyperparameters };
136
+ if (model) opts.model = model;
137
+ for (const row of rows) {
138
+ const prompt = template(row);
139
+
140
+ const output = getCompletion(mdlConfig, { ...opts, prompt });
141
+
142
+ results.push({ output, prompt });
143
+ }
144
+ return results;
145
+ },
146
+ },
147
+ });
148
+
149
+ module.exports = modelpatterns;
package/package.json CHANGED
@@ -1,11 +1,11 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.1.0",
3
+ "version": "0.2.1",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {
7
7
  "@saltcorn/data": "^0.9.0",
8
- "axios": "0.16.2",
8
+ "node-fetch": "2.6.9",
9
9
  "underscore": "1.13.6"
10
10
  },
11
11
  "author": "Tom Nielsen",