@aj-archipelago/cortex 1.3.21 → 1.3.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +64 -0
  2. package/config.js +26 -1
  3. package/helper-apps/cortex-realtime-voice-server/src/cortex/memory.ts +2 -2
  4. package/helper-apps/cortex-realtime-voice-server/src/realtime/client.ts +9 -4
  5. package/helper-apps/cortex-realtime-voice-server/src/realtime/realtimeTypes.ts +1 -0
  6. package/lib/util.js +5 -25
  7. package/package.json +5 -2
  8. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +228 -0
  9. package/pathways/system/entity/memory/sys_memory_format.js +30 -0
  10. package/pathways/system/entity/memory/sys_memory_manager.js +85 -27
  11. package/pathways/system/entity/memory/sys_memory_process.js +154 -0
  12. package/pathways/system/entity/memory/sys_memory_required.js +4 -2
  13. package/pathways/system/entity/memory/sys_memory_topic.js +22 -0
  14. package/pathways/system/entity/memory/sys_memory_update.js +50 -150
  15. package/pathways/system/entity/memory/sys_read_memory.js +67 -69
  16. package/pathways/system/entity/memory/sys_save_memory.js +1 -1
  17. package/pathways/system/entity/memory/sys_search_memory.js +1 -1
  18. package/pathways/system/entity/sys_entity_start.js +9 -6
  19. package/pathways/system/entity/sys_generator_image.js +5 -41
  20. package/pathways/system/entity/sys_generator_memory.js +3 -1
  21. package/pathways/system/entity/sys_generator_reasoning.js +1 -1
  22. package/pathways/system/entity/sys_router_tool.js +3 -4
  23. package/pathways/system/rest_streaming/sys_claude_35_sonnet.js +1 -1
  24. package/pathways/system/rest_streaming/sys_claude_3_haiku.js +1 -1
  25. package/pathways/system/rest_streaming/sys_google_gemini_chat.js +1 -1
  26. package/pathways/system/rest_streaming/sys_ollama_chat.js +21 -0
  27. package/pathways/system/rest_streaming/sys_ollama_completion.js +14 -0
  28. package/pathways/system/rest_streaming/sys_openai_chat_o1.js +1 -1
  29. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +1 -1
  30. package/pathways/transcribe_gemini.js +525 -0
  31. package/server/modelExecutor.js +8 -0
  32. package/server/pathwayResolver.js +13 -8
  33. package/server/plugins/claude3VertexPlugin.js +150 -18
  34. package/server/plugins/gemini15ChatPlugin.js +90 -1
  35. package/server/plugins/gemini15VisionPlugin.js +16 -3
  36. package/server/plugins/modelPlugin.js +12 -9
  37. package/server/plugins/ollamaChatPlugin.js +158 -0
  38. package/server/plugins/ollamaCompletionPlugin.js +147 -0
  39. package/server/rest.js +70 -8
  40. package/tests/claude3VertexToolConversion.test.js +411 -0
  41. package/tests/memoryfunction.test.js +560 -46
  42. package/tests/multimodal_conversion.test.js +169 -0
  43. package/tests/openai_api.test.js +332 -0
  44. package/tests/transcribe_gemini.test.js +217 -0
@@ -15,6 +15,21 @@ async function convertContentItem(item, maxImageSize, plugin) {
15
15
  case "text":
16
16
  return item.text ? { type: "text", text: item.text } : null;
17
17
 
18
+ case "tool_use":
19
+ return {
20
+ type: "tool_use",
21
+ id: item.id,
22
+ name: item.name,
23
+ input: typeof item.input === 'string' ? { query: item.input } : item.input
24
+ };
25
+
26
+ case "tool_result":
27
+ return {
28
+ type: "tool_result",
29
+ tool_use_id: item.tool_use_id,
30
+ content: item.content
31
+ };
32
+
18
33
  case "image_url":
19
34
  imageUrl = item.url || item.image_url?.url || item.image_url;
20
35
 
@@ -126,9 +141,42 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
126
141
 
127
142
  // Filter out system messages and empty messages
128
143
  let modifiedMessages = messagesCopy
129
- .filter(message => message.role !== "system" && message.content)
130
- .map(message => ({ ...message }));
131
-
144
+ .filter(message => message.role !== "system")
145
+ .map(message => {
146
+ // Handle OpenAI tool calls format conversion to Claude format
147
+ if (message.tool_calls) {
148
+ return {
149
+ role: message.role,
150
+ content: message.tool_calls.map(toolCall => ({
151
+ type: "tool_use",
152
+ id: toolCall.id,
153
+ name: toolCall.function.name,
154
+ input: JSON.parse(toolCall.function.arguments)
155
+ }))
156
+ };
157
+ }
158
+
159
+ // Handle OpenAI tool response format conversion to Claude format
160
+ if (message.role === "tool") {
161
+ return {
162
+ role: "user",
163
+ content: [{
164
+ type: "tool_result",
165
+ tool_use_id: message.tool_call_id,
166
+ content: message.content
167
+ }]
168
+ };
169
+ }
170
+
171
+ return { ...message };
172
+ })
173
+ .filter(message => {
174
+ // Filter out messages with empty content
175
+ if (!message.content) return false;
176
+ if (Array.isArray(message.content) && message.content.length === 0) return false;
177
+ return true;
178
+ });
179
+
132
180
  // Combine consecutive messages from the same author
133
181
  const combinedMessages = modifiedMessages.reduce((acc, message) => {
134
182
  if (acc.length === 0 || message.role !== acc[acc.length - 1].role) {
@@ -191,10 +239,68 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
191
239
  prompt,
192
240
  cortexRequest
193
241
  );
242
+
194
243
  const { system, modifiedMessages } =
195
244
  await this.convertMessagesToClaudeVertex(requestParameters.messages);
196
245
  requestParameters.system = system;
197
246
  requestParameters.messages = modifiedMessages;
247
+
248
+ // Convert OpenAI tools format to Claude format if present
249
+ if (parameters.tools) {
250
+ requestParameters.tools = parameters.tools.map(tool => {
251
+ if (tool.type === 'function') {
252
+ return {
253
+ name: tool.function.name,
254
+ description: tool.function.description,
255
+ input_schema: {
256
+ type: "object",
257
+ properties: tool.function.parameters.properties,
258
+ required: tool.function.parameters.required || []
259
+ }
260
+ };
261
+ }
262
+ return tool;
263
+ });
264
+ }
265
+
266
+ // If there are function calls in messages, generate tools block
267
+ if (modifiedMessages?.some(msg =>
268
+ Array.isArray(msg.content) && msg.content.some(item => item.type === 'tool_use')
269
+ )) {
270
+ const toolsMap = new Map();
271
+
272
+ // Collect all unique tool uses from messages
273
+ modifiedMessages.forEach(msg => {
274
+ if (Array.isArray(msg.content)) {
275
+ msg.content.forEach(item => {
276
+ if (item.type === 'tool_use') {
277
+ toolsMap.set(item.name, {
278
+ name: item.name,
279
+ description: `Tool for ${item.name}`,
280
+ input_schema: {
281
+ type: "object",
282
+ properties: item.input ? Object.keys(item.input).reduce((acc, key) => {
283
+ acc[key] = {
284
+ type: typeof item.input[key] === 'string' ? 'string' : 'object',
285
+ description: `Parameter ${key} for ${item.name}`
286
+ };
287
+ return acc;
288
+ }, {}) : {},
289
+ required: item.input ? Object.keys(item.input) : []
290
+ }
291
+ });
292
+ }
293
+ });
294
+ }
295
+ });
296
+
297
+ if (requestParameters.tools) {
298
+ requestParameters.tools.push(...Array.from(toolsMap.values()));
299
+ } else {
300
+ requestParameters.tools = Array.from(toolsMap.values());
301
+ }
302
+ }
303
+
198
304
  requestParameters.max_tokens = this.getModelMaxReturnTokens();
199
305
  requestParameters.anthropic_version = "vertex-2023-10-16";
200
306
  return requestParameters;
@@ -274,7 +380,7 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
274
380
  cortexRequest.params = {}; // query params
275
381
  cortexRequest.stream = stream;
276
382
  cortexRequest.urlSuffix = cortexRequest.stream
277
- ? ":streamRawPredict"
383
+ ? ":streamRawPredict?alt=sse"
278
384
  : ":rawPredict";
279
385
 
280
386
  const gcpAuthTokenHelper = this.config.get("gcpAuthTokenHelper");
@@ -286,33 +392,59 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
286
392
 
287
393
  processStreamEvent(event, requestProgress) {
288
394
  const eventData = JSON.parse(event.data);
395
+ const baseOpenAIResponse = {
396
+ id: eventData.message?.id || `chatcmpl-${Date.now()}`,
397
+ object: "chat.completion.chunk",
398
+ created: Math.floor(Date.now() / 1000),
399
+ model: this.modelName,
400
+ choices: [{
401
+ index: 0,
402
+ delta: {},
403
+ finish_reason: null
404
+ }]
405
+ };
406
+
289
407
  switch (eventData.type) {
290
408
  case "message_start":
291
- requestProgress.data = JSON.stringify(eventData.message);
292
- break;
293
- case "content_block_start":
294
- break;
295
- case "ping":
409
+ // Initial message with role
410
+ baseOpenAIResponse.choices[0].delta = {
411
+ role: "assistant",
412
+ content: ""
413
+ };
414
+ requestProgress.data = JSON.stringify(baseOpenAIResponse);
296
415
  break;
416
+
297
417
  case "content_block_delta":
298
418
  if (eventData.delta.type === "text_delta") {
299
- requestProgress.data = JSON.stringify(eventData.delta.text);
419
+ baseOpenAIResponse.choices[0].delta = {
420
+ content: eventData.delta.text
421
+ };
422
+ requestProgress.data = JSON.stringify(baseOpenAIResponse);
300
423
  }
301
424
  break;
302
- case "content_block_stop":
303
- break;
304
- case "message_delta":
305
- break;
425
+
306
426
  case "message_stop":
307
- requestProgress.data = "[DONE]";
427
+ baseOpenAIResponse.choices[0].delta = {};
428
+ baseOpenAIResponse.choices[0].finish_reason = "stop";
429
+ requestProgress.data = JSON.stringify(baseOpenAIResponse);
308
430
  requestProgress.progress = 1;
309
431
  break;
432
+
310
433
  case "error":
311
- requestProgress.data = `\n\n*** ${
312
- eventData.error.message || eventData.error
313
- } ***`;
434
+ baseOpenAIResponse.choices[0].delta = {
435
+ content: `\n\n*** ${eventData.error.message || eventData.error} ***`
436
+ };
437
+ baseOpenAIResponse.choices[0].finish_reason = "error";
438
+ requestProgress.data = JSON.stringify(baseOpenAIResponse);
314
439
  requestProgress.progress = 1;
315
440
  break;
441
+
442
+ // Ignore other event types as they don't map to OpenAI format
443
+ case "content_block_start":
444
+ case "content_block_stop":
445
+ case "message_delta":
446
+ case "ping":
447
+ break;
316
448
  }
317
449
 
318
450
  return requestProgress;
@@ -56,7 +56,11 @@ class Gemini15ChatPlugin extends ModelPlugin {
56
56
  const { role, author, content } = message;
57
57
 
58
58
  if (role === 'system') {
59
- systemParts.push({ text: content });
59
+ if (Array.isArray(content)) {
60
+ content.forEach(item => systemParts.push({ text: item }));
61
+ } else {
62
+ systemParts.push({ text: content });
63
+ }
60
64
  return;
61
65
  }
62
66
 
@@ -169,6 +173,91 @@ class Gemini15ChatPlugin extends ModelPlugin {
169
173
  return this.executeRequest(cortexRequest);
170
174
  }
171
175
 
176
+ processStreamEvent(event, requestProgress) {
177
+ const eventData = JSON.parse(event.data);
178
+
179
+ // Initialize requestProgress if needed
180
+ requestProgress = requestProgress || {};
181
+ requestProgress.data = requestProgress.data || null;
182
+
183
+ // Create a helper function to generate message chunks
184
+ const createChunk = (delta) => ({
185
+ id: eventData.responseId || `chatcmpl-${Date.now()}`,
186
+ object: "chat.completion.chunk",
187
+ created: Math.floor(Date.now() / 1000),
188
+ model: this.modelName,
189
+ choices: [{
190
+ index: 0,
191
+ delta,
192
+ finish_reason: null
193
+ }]
194
+ });
195
+
196
+ // Handle content chunks - do this first before handling any finish conditions
197
+ if (eventData.candidates?.[0]?.content?.parts?.[0]?.text) {
198
+ if (!requestProgress.started) {
199
+ // First chunk - send role
200
+ requestProgress.data = JSON.stringify(createChunk({ role: "assistant" }));
201
+ requestProgress.started = true;
202
+
203
+ // Immediately follow up with the first content chunk
204
+ requestProgress.data = JSON.stringify(createChunk({
205
+ content: eventData.candidates[0].content.parts[0].text
206
+ }));
207
+ } else {
208
+ // Send content chunk
209
+ requestProgress.data = JSON.stringify(createChunk({
210
+ content: eventData.candidates[0].content.parts[0].text
211
+ }));
212
+ }
213
+
214
+ // If this message also has STOP, mark it for completion but don't overwrite the content
215
+ if (eventData.candidates[0].finishReason === "STOP") {
216
+ requestProgress.progress = 1;
217
+ }
218
+ } else if (eventData.candidates?.[0]?.finishReason === "STOP") {
219
+ // Only send DONE if there was no content in this message
220
+ requestProgress.data = '[DONE]';
221
+ requestProgress.progress = 1;
222
+ }
223
+
224
+ // Handle safety blocks
225
+ if (eventData.candidates?.[0]?.safetyRatings?.some(rating => rating.blocked)) {
226
+ requestProgress.data = JSON.stringify({
227
+ id: eventData.responseId || `chatcmpl-${Date.now()}`,
228
+ object: "chat.completion.chunk",
229
+ created: Math.floor(Date.now() / 1000),
230
+ model: this.modelName,
231
+ choices: [{
232
+ index: 0,
233
+ delta: { content: "\n\n*** Response blocked due to safety ratings ***" },
234
+ finish_reason: "content_filter"
235
+ }]
236
+ });
237
+ requestProgress.progress = 1;
238
+ return requestProgress;
239
+ }
240
+
241
+ // Handle prompt feedback blocks
242
+ if (eventData.promptFeedback?.blockReason) {
243
+ requestProgress.data = JSON.stringify({
244
+ id: eventData.responseId || `chatcmpl-${Date.now()}`,
245
+ object: "chat.completion.chunk",
246
+ created: Math.floor(Date.now() / 1000),
247
+ model: this.modelName,
248
+ choices: [{
249
+ index: 0,
250
+ delta: { content: `\n\n*** Response blocked: ${eventData.promptFeedback.blockReason} ***` },
251
+ finish_reason: "content_filter"
252
+ }]
253
+ });
254
+ requestProgress.progress = 1;
255
+ return requestProgress;
256
+ }
257
+
258
+ return requestProgress;
259
+ }
260
+
172
261
  // Override the logging function to display the messages and responses
173
262
  logRequestData(data, responseData, prompt) {
174
263
  const messages = data && data.contents;
@@ -24,19 +24,24 @@ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
24
24
  const { role, author, content } = message;
25
25
 
26
26
  if (role === 'system') {
27
- systemParts.push({ text: content });
27
+ if (Array.isArray(content)) {
28
+ content.forEach(item => systemParts.push({ text: item }));
29
+ } else {
30
+ systemParts.push({ text: content });
31
+ }
28
32
  return;
29
33
  }
30
34
 
31
35
  // Convert content to Gemini format, trying to maintain compatibility
32
36
  const convertPartToGemini = (inputPart) => {
33
37
  try {
38
+ // First try to parse as JSON if it's a string
34
39
  const part = typeof inputPart === 'string' ? JSON.parse(inputPart) : inputPart;
35
40
  const {type, text, image_url, gcs} = part;
36
41
  let fileUrl = gcs || image_url?.url;
37
42
 
38
43
  if (typeof part === 'string') {
39
- return { text: text };
44
+ return { text: inputPart };
40
45
  } else if (type === 'text') {
41
46
  return { text: text };
42
47
  } else if (type === 'image_url') {
@@ -66,11 +71,19 @@ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
66
71
  data: base64Data
67
72
  }
68
73
  };
74
+ } else if (fileUrl.includes('youtube.com/') || fileUrl.includes('youtu.be/')) {
75
+ return {
76
+ fileData: {
77
+ mimeType: 'video/youtube',
78
+ fileUri: fileUrl
79
+ }
80
+ };
69
81
  }
70
82
  return null;
71
83
  }
72
84
  } catch (e) {
73
- // this space intentionally left blank
85
+ // If JSON parsing fails or any other error, treat as plain text
86
+ return inputPart ? { text: inputPart } : null;
74
87
  }
75
88
  return inputPart ? { text: inputPart } : null;
76
89
  };
@@ -210,7 +210,7 @@ class ModelPlugin {
210
210
 
211
211
  // First run handlebars compile on the pathway messages
212
212
  const compiledMessages = modelPrompt.messages.map((message) => {
213
- if (message.content) {
213
+ if (message.content && typeof message.content === 'string') {
214
214
  const compileText = HandleBars.compile(message.content);
215
215
  return {
216
216
  ...message,
@@ -381,14 +381,17 @@ class ModelPlugin {
381
381
 
382
382
  // finish reason can be in different places in the message
383
383
  const finishReason = parsedMessage?.choices?.[0]?.finish_reason || parsedMessage?.candidates?.[0]?.finishReason;
384
- if (finishReason?.toLowerCase() === 'stop') {
385
- requestProgress.progress = 1;
386
- } else {
387
- if (finishReason?.toLowerCase() === 'safety') {
388
- const safetyRatings = JSON.stringify(parsedMessage?.candidates?.[0]?.safetyRatings) || '';
389
- logger.warn(`Request ${this.requestId} was blocked by the safety filter. ${safetyRatings}`);
390
- requestProgress.data = `\n\nResponse blocked by safety filter: ${safetyRatings}`;
391
- requestProgress.progress = 1;
384
+ if (finishReason) {
385
+ switch (finishReason.toLowerCase()) {
386
+ case 'safety':
387
+ const safetyRatings = JSON.stringify(parsedMessage?.candidates?.[0]?.safetyRatings) || '';
388
+ logger.warn(`Request ${this.requestId} was blocked by the safety filter. ${safetyRatings}`);
389
+ requestProgress.data = `\n\nResponse blocked by safety filter: ${safetyRatings}`;
390
+ requestProgress.progress = 1;
391
+ break;
392
+ default:
393
+ requestProgress.progress = 1;
394
+ break;
392
395
  }
393
396
  }
394
397
  }
@@ -0,0 +1,158 @@
1
+ import ModelPlugin from './modelPlugin.js';
2
+ import logger from '../../lib/logger.js';
3
+ import { Transform } from 'stream';
4
+
5
+ class OllamaChatPlugin extends ModelPlugin {
6
+
7
+ getRequestParameters(text, parameters, prompt) {
8
+ const { modelPromptMessages } = this.getCompiledPrompt(text, parameters, prompt);
9
+ return {
10
+ data: {
11
+ model: parameters.ollamaModel,
12
+ messages: modelPromptMessages,
13
+ stream: parameters.stream
14
+ },
15
+ params: {}
16
+ };
17
+ }
18
+
19
+ logRequestData(data, responseData, prompt) {
20
+ const { stream, messages, model } = data;
21
+
22
+ if (messages && messages.length > 0) {
23
+ logger.info(`[ollama chat request sent to model ${model} containing ${messages.length} messages]`);
24
+ let totalLength = 0;
25
+ let totalUnits;
26
+ messages.forEach((message, index) => {
27
+ const content = message.content;
28
+ const { length, units } = this.getLength(content);
29
+ const preview = this.shortenContent(content);
30
+
31
+ logger.verbose(
32
+ `message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${preview}"`
33
+ );
34
+ totalLength += length;
35
+ totalUnits = units;
36
+ });
37
+ logger.info(`[chat request contained ${totalLength} ${totalUnits}]`);
38
+ }
39
+
40
+ if (stream) {
41
+ logger.info(`[response received as an SSE stream]`);
42
+ } else if (responseData) {
43
+ const responseText = this.parseResponse(responseData);
44
+ const { length, units } = this.getLength(responseText);
45
+ logger.info(`[response received containing ${length} ${units}]`);
46
+ logger.verbose(`${this.shortenContent(responseText)}`);
47
+ }
48
+
49
+ prompt &&
50
+ prompt.debugInfo &&
51
+ (prompt.debugInfo += `\n${JSON.stringify(data)}`);
52
+ }
53
+
54
+ parseResponse(data) {
55
+ // If data is not a string (e.g. streaming), return as is
56
+ if (typeof data !== 'string') {
57
+ return data;
58
+ }
59
+
60
+ // Split into lines and filter empty ones
61
+ const lines = data.split('\n').filter(line => line.trim());
62
+
63
+ let fullResponse = '';
64
+
65
+ for (const line of lines) {
66
+ try {
67
+ const jsonObj = JSON.parse(line);
68
+
69
+ if (jsonObj.message && jsonObj.message.content) {
70
+ // Unescape special sequences
71
+ const content = jsonObj.message.content
72
+ .replace(/\\n/g, '\n')
73
+ .replace(/\\"/g, '"')
74
+ .replace(/\\\\/g, '\\')
75
+ .replace(/\\u003c/g, '<')
76
+ .replace(/\\u003e/g, '>');
77
+
78
+ fullResponse += content;
79
+ }
80
+ } catch (err) {
81
+ // If we can't parse the line as JSON, just skip it
82
+ continue;
83
+ }
84
+ }
85
+
86
+ return fullResponse;
87
+ }
88
+
89
+ processStreamEvent(event, requestProgress) {
90
+ try {
91
+ const data = JSON.parse(event.data);
92
+
93
+ // Handle the streaming response
94
+ if (data.message?.content) {
95
+ // Unescape special sequences in the content
96
+ const content = data.message.content
97
+ .replace(/\\n/g, '\n')
98
+ .replace(/\\"/g, '"')
99
+ .replace(/\\\\/g, '\\')
100
+ .replace(/\\u003c/g, '<')
101
+ .replace(/\\u003e/g, '>');
102
+
103
+ requestProgress.data = JSON.stringify(content);
104
+ }
105
+
106
+ // Check if this is the final message
107
+ if (data.done) {
108
+ requestProgress.data = '[DONE]';
109
+ requestProgress.progress = 1;
110
+ }
111
+
112
+ return requestProgress;
113
+ } catch (err) {
114
+ // If we can't parse the event data, return the progress as is
115
+ return requestProgress;
116
+ }
117
+ }
118
+
119
+ async execute(text, parameters, prompt, cortexRequest) {
120
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
121
+ cortexRequest.data = { ...(cortexRequest.data || {}), ...requestParameters.data };
122
+ cortexRequest.params = { ...(cortexRequest.params || {}), ...requestParameters.params };
123
+
124
+ // For Ollama streaming, transform NDJSON to SSE format
125
+ if (parameters.stream) {
126
+ const response = await this.executeRequest(cortexRequest);
127
+
128
+ // Create a transform stream that converts NDJSON to SSE format
129
+ const transformer = new Transform({
130
+ decodeStrings: false, // Keep as string
131
+ transform(chunk, encoding, callback) {
132
+ try {
133
+ const lines = chunk.toString().split('\n');
134
+ for (const line of lines) {
135
+ if (line.trim()) {
136
+ // Format as SSE data
137
+ this.push(`data: ${line}\n\n`);
138
+ }
139
+ }
140
+ callback();
141
+ } catch (err) {
142
+ callback(err);
143
+ }
144
+ }
145
+ });
146
+
147
+ // Pipe the response through our transformer
148
+ response.pipe(transformer);
149
+
150
+ // Return the transformed stream
151
+ return transformer;
152
+ }
153
+
154
+ return this.executeRequest(cortexRequest);
155
+ }
156
+ }
157
+
158
+ export default OllamaChatPlugin;