@saltcorn/copilot 0.6.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/page-gen-action.js +38 -3
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/copilot",
3
- "version": "0.6.0",
3
+ "version": "0.6.1",
4
4
  "description": "AI assistant for building Saltcorn applications",
5
5
  "main": "index.js",
6
6
  "dependencies": {
@@ -27,12 +27,20 @@ module.exports = {
27
27
  fieldview: "textarea",
28
28
  required: true,
29
29
  },
30
+ {
31
+ name: "image_prompt",
32
+ label: "Prompt image files",
33
+ sublabel:
34
+ "Optional. An expression, based on the context, for file path or array of file paths for prompting",
35
+ class: "validate-expression",
36
+ type: "String",
37
+ },
30
38
  {
31
39
  name: "answer_field",
32
40
  label: "Answer variable",
33
41
  sublabel: "Optional. Set the generated HTML to this context variable",
42
+ class: "validate-identifier",
34
43
  type: "String",
35
- required: true,
36
44
  },
37
45
  // ...override_fields,
38
46
  {
@@ -85,6 +93,7 @@ module.exports = {
85
93
  prompt_formula,
86
94
  prompt_template,
87
95
  answer_field,
96
+ image_prompt,
88
97
  chat_history_field,
89
98
  model,
90
99
  },
@@ -96,7 +105,7 @@ module.exports = {
96
105
  prompt_formula,
97
106
  row,
98
107
  user,
99
- "llm_generate prompt formula"
108
+ "copilot_generate_page prompt formula"
100
109
  );
101
110
  else prompt = row[prompt_field];
102
111
  const opts = {};
@@ -113,9 +122,34 @@ module.exports = {
113
122
  },
114
123
  });
115
124
  const { llm_generate } = getState().functions;
125
+ let chat;
126
+ if (image_prompt) {
127
+ const from_ctx = eval_expression(
128
+ image_prompt,
129
+ row,
130
+ user,
131
+ "copilot_generate_page image prompt"
132
+ );
116
133
 
134
+ chat = [];
135
+ for (const image of Array.isArray(from_ctx) ? from_ctx : [from_ctx]) {
136
+ const file = await File.findOne({ name: image });
137
+ const imageurl = await file.get_contents("base64");
138
+
139
+ chat.push({
140
+ role: "user",
141
+ content: [
142
+ {
143
+ type: "image",
144
+ image: `data:${file.mimetype};base64,${imageurl}`,
145
+ },
146
+ ],
147
+ });
148
+ }
149
+ }
117
150
  const initial_ans = await llm_generate.run(prompt, {
118
151
  tools,
152
+ chat,
119
153
  systemPrompt,
120
154
  });
121
155
  const initial_info = initial_ans.tool_calls[0].input;
@@ -139,6 +173,7 @@ module.exports = {
139
173
  Just generate HTML code, do not wrap in markdown code tags`,
140
174
  {
141
175
  debugResult: true,
176
+ chat,
142
177
  response_format: full.response_schema
143
178
  ? {
144
179
  type: "json_schema",
@@ -170,7 +205,7 @@ module.exports = {
170
205
  min_role: 100,
171
206
  layout: { html_file: file.path_to_serve },
172
207
  });
173
- getState().refresh_pages()
208
+ getState().refresh_pages();
174
209
  }
175
210
  const upd = answer_field ? { [answer_field]: page_html } : {};
176
211
  if (mode === "workflow") return upd;