@llumiverse/core 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +2 -7
  2. package/lib/cjs/CompletionStream.js +1 -7
  3. package/lib/cjs/CompletionStream.js.map +1 -1
  4. package/lib/cjs/Driver.js +16 -2
  5. package/lib/cjs/Driver.js.map +1 -1
  6. package/lib/cjs/formatters/claude.js +5 -7
  7. package/lib/cjs/formatters/claude.js.map +1 -1
  8. package/lib/cjs/formatters/commons.js +8 -0
  9. package/lib/cjs/formatters/commons.js.map +1 -0
  10. package/lib/cjs/formatters/generic.js +52 -20
  11. package/lib/cjs/formatters/generic.js.map +1 -1
  12. package/lib/cjs/formatters/index.js +18 -27
  13. package/lib/cjs/formatters/index.js.map +1 -1
  14. package/lib/cjs/formatters/llama2.js +3 -3
  15. package/lib/cjs/formatters/llama2.js.map +1 -1
  16. package/lib/cjs/formatters/openai.js +9 -4
  17. package/lib/cjs/formatters/openai.js.map +1 -1
  18. package/lib/cjs/index.js +0 -1
  19. package/lib/cjs/index.js.map +1 -1
  20. package/lib/cjs/types.js +2 -8
  21. package/lib/cjs/types.js.map +1 -1
  22. package/lib/esm/CompletionStream.js +1 -7
  23. package/lib/esm/CompletionStream.js.map +1 -1
  24. package/lib/esm/Driver.js +17 -3
  25. package/lib/esm/Driver.js.map +1 -1
  26. package/lib/esm/formatters/claude.js +3 -5
  27. package/lib/esm/formatters/claude.js.map +1 -1
  28. package/lib/esm/formatters/commons.js +4 -0
  29. package/lib/esm/formatters/commons.js.map +1 -0
  30. package/lib/esm/formatters/generic.js +50 -18
  31. package/lib/esm/formatters/generic.js.map +1 -1
  32. package/lib/esm/formatters/index.js +5 -26
  33. package/lib/esm/formatters/index.js.map +1 -1
  34. package/lib/esm/formatters/llama2.js +1 -1
  35. package/lib/esm/formatters/llama2.js.map +1 -1
  36. package/lib/esm/formatters/openai.js +7 -2
  37. package/lib/esm/formatters/openai.js.map +1 -1
  38. package/lib/esm/index.js +0 -1
  39. package/lib/esm/index.js.map +1 -1
  40. package/lib/esm/types.js +1 -7
  41. package/lib/esm/types.js.map +1 -1
  42. package/lib/types/CompletionStream.d.ts +1 -2
  43. package/lib/types/CompletionStream.d.ts.map +1 -1
  44. package/lib/types/Driver.d.ts +8 -2
  45. package/lib/types/Driver.d.ts.map +1 -1
  46. package/lib/types/formatters/claude.d.ts +1 -1
  47. package/lib/types/formatters/claude.d.ts.map +1 -1
  48. package/lib/types/formatters/commons.d.ts +3 -0
  49. package/lib/types/formatters/commons.d.ts.map +1 -0
  50. package/lib/types/formatters/generic.d.ts +9 -4
  51. package/lib/types/formatters/generic.d.ts.map +1 -1
  52. package/lib/types/formatters/index.d.ts +7 -3
  53. package/lib/types/formatters/index.d.ts.map +1 -1
  54. package/lib/types/formatters/llama2.d.ts +1 -1
  55. package/lib/types/formatters/llama2.d.ts.map +1 -1
  56. package/lib/types/formatters/openai.d.ts +10 -2
  57. package/lib/types/formatters/openai.d.ts.map +1 -1
  58. package/lib/types/index.d.ts +0 -1
  59. package/lib/types/index.d.ts.map +1 -1
  60. package/lib/types/json.d.ts +8 -8
  61. package/lib/types/json.d.ts.map +1 -1
  62. package/lib/types/types.d.ts +55 -14
  63. package/lib/types/types.d.ts.map +1 -1
  64. package/package.json +15 -5
  65. package/src/CompletionStream.ts +1 -8
  66. package/src/Driver.ts +17 -8
  67. package/src/formatters/claude.ts +3 -6
  68. package/src/formatters/commons.ts +5 -0
  69. package/src/formatters/generic.ts +59 -27
  70. package/src/formatters/index.ts +7 -30
  71. package/src/formatters/llama2.ts +1 -1
  72. package/src/formatters/openai.ts +14 -6
  73. package/src/index.ts +0 -1
  74. package/src/json.ts +7 -7
  75. package/src/types.ts +64 -13
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@llumiverse/core",
3
- "version": "0.10.0",
3
+ "version": "0.12.0",
4
4
  "type": "module",
5
5
  "description": "Provide an universal API to LLMs. Support for existing LLMs can be added by writing a driver.",
6
6
  "files": [
@@ -32,6 +32,9 @@
32
32
  "*": {
33
33
  "async": [
34
34
  "./lib/types/async.d.ts"
35
+ ],
36
+ "formatters": [
37
+ "./lib/types/formatters/index.d.ts"
35
38
  ]
36
39
  }
37
40
  },
@@ -47,6 +50,12 @@
47
50
  "default": "./lib/esm/async.js",
48
51
  "import": "./lib/esm/async.js",
49
52
  "require": "./lib/cjs/async.js"
53
+ },
54
+ "./formatters": {
55
+ "types": "./lib/types/formatters/index.d.ts",
56
+ "default": "./lib/esm/formatters/index.js",
57
+ "import": "./lib/esm/formatters/index.js",
58
+ "require": "./lib/cjs/formatters/index.js"
50
59
  }
51
60
  },
52
61
  "scripts": {
@@ -66,8 +75,8 @@
66
75
  "@types/json-schema": "^7.0.15",
67
76
  "api-fetch-client": "^0.8.6",
68
77
  "ts-dual-module": "^0.6.2",
69
- "typescript": "^5.3.3",
70
- "vitest": "^1.2.2"
78
+ "typescript": "^5.4.2",
79
+ "vitest": "^1.4.0"
71
80
  },
72
81
  "dependencies": {
73
82
  "json-schema": "^0.4.0"
@@ -75,7 +84,8 @@
75
84
  "ts_dual_module": {
76
85
  "outDir": "lib",
77
86
  "exports": {
78
- "async": "async.js"
87
+ "async": "async.js",
88
+ "formatters": "formatters/index.js"
79
89
  }
80
90
  }
81
- }
91
+ }
@@ -60,7 +60,6 @@ export class DefaultCompletionStream<PromptT = any> implements CompletionStream<
60
60
  export class FallbackCompletionStream<PromptT = any> implements CompletionStream<PromptT> {
61
61
 
62
62
  prompt: PromptT;
63
- chunks: string[];
64
63
  completion: ExecutionResponse<PromptT> | undefined;
65
64
 
66
65
  constructor(public driver: AbstractDriver<DriverOptions, PromptT>,
@@ -68,22 +67,16 @@ export class FallbackCompletionStream<PromptT = any> implements CompletionStream
68
67
  public options: ExecutionOptions) {
69
68
  this.driver = driver;
70
69
  this.prompt = this.driver.createPrompt(segments, options);
71
- this.chunks = [];
72
70
  }
73
71
 
74
72
  async *[Symbol.asyncIterator]() {
75
73
  // reset state
76
74
  this.completion = undefined;
77
- if (this.chunks.length > 0) {
78
- this.chunks = [];
79
- }
80
75
  this.driver.logger.debug(
81
76
  `[${this.driver.provider}] Streaming is not supported, falling back to blocking execution`
82
77
  );
83
78
  const completion = await this.driver._execute(this.prompt, this.options);
84
-
85
- const content = completion.result === 'string' ? completion.result : JSON.stringify(completion.result);
86
- this.chunks.push(content);
79
+ const content = typeof completion.result === 'string' ? completion.result : JSON.stringify(completion.result);
87
80
  yield content;
88
81
 
89
82
  this.completion = completion;
package/src/Driver.ts CHANGED
@@ -5,7 +5,7 @@
5
5
  */
6
6
 
7
7
  import { DefaultCompletionStream, FallbackCompletionStream } from "./CompletionStream.js";
8
- import { PromptFormatters } from "./formatters/index.js";
8
+ import { formatLlama2Prompt, formatTextPrompt } from "./formatters/index.js";
9
9
  import {
10
10
  AIModel,
11
11
  Completion,
@@ -18,7 +18,6 @@ import {
18
18
  ExecutionResponse,
19
19
  Logger,
20
20
  ModelSearchPayload,
21
- PromptFormats,
22
21
  PromptOptions,
23
22
  PromptSegment,
24
23
  TrainingJob,
@@ -54,7 +53,7 @@ export function createLogger(logger: Logger | "console" | undefined) {
54
53
 
55
54
  function applyExecutionDefaults(options: ExecutionOptions): ExecutionOptions {
56
55
  return {
57
- max_tokens: 1024,
56
+ max_tokens: 2048,
58
57
  temperature: 0.7,
59
58
  ...options
60
59
  }
@@ -106,7 +105,6 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
106
105
  logger: Logger;
107
106
 
108
107
  abstract provider: string; // the provider name
109
- abstract defaultFormat: PromptFormats;
110
108
 
111
109
  constructor(opts: OptionsT) {
112
110
  this.options = opts;
@@ -181,11 +179,22 @@ export abstract class AbstractDriver<OptionsT extends DriverOptions = DriverOpti
181
179
  }
182
180
  }
183
181
 
182
+ /**
183
+ * Override this method to provide a custom prompt formatter
184
+ * @param segments
185
+ * @param options
186
+ * @returns
187
+ */
188
+ protected formatPrompt(segments: PromptSegment[], opts: PromptOptions): PromptT {
189
+ if (/\bllama2?\b/i.test(opts.model)) {
190
+ return formatLlama2Prompt(segments, opts.resultSchema) as PromptT;
191
+ } else {
192
+ return formatTextPrompt(segments, opts.resultSchema) as PromptT;
193
+ }
194
+ }
195
+
184
196
  public createPrompt(segments: PromptSegment[], opts: PromptOptions): PromptT {
185
- return PromptFormatters[opts.format || this.defaultFormat](
186
- segments,
187
- opts.resultSchema
188
- );
197
+ return opts.format ? opts.format(segments, opts.resultSchema) : this.formatPrompt(segments, opts);
189
198
  }
190
199
 
191
200
  /**
@@ -1,5 +1,6 @@
1
1
  import { JSONSchema4 } from "json-schema";
2
2
  import { PromptRole, PromptSegment } from "../index.js";
3
+ import { getJSONSafetyNotice } from "./commons.js";
3
4
 
4
5
  export interface ClaudeMessage {
5
6
  role: 'user' | 'assistant',
@@ -19,7 +20,7 @@ export interface ClaudeMessagesPrompt {
19
20
  * A formatter user by Bedrock to format prompts for claude related models
20
21
  */
21
22
 
22
- export function claudeMessages(segments: PromptSegment[], schema?: JSONSchema4): ClaudeMessagesPrompt {
23
+ export function formatClaudePrompt(segments: PromptSegment[], schema?: JSONSchema4): ClaudeMessagesPrompt {
23
24
  const system: string[] = [];
24
25
  const safety: string[] = [];
25
26
  const messages: ClaudeMessage[] = [];
@@ -35,13 +36,9 @@ export function claudeMessages(segments: PromptSegment[], schema?: JSONSchema4):
35
36
  }
36
37
 
37
38
  if (schema) {
38
- safety.push(`You must answer using the following JSONSchema:
39
- ---
40
- ${JSON.stringify(schema)}
41
- ---`);
39
+ safety.push(getJSONSafetyNotice(schema));
42
40
  }
43
41
 
44
-
45
42
  // messages must contains at least 1 item. If the prompt doesn;t contains a user message (but only system messages)
46
43
  // we need to put the system messages in the messages array
47
44
 
@@ -0,0 +1,5 @@
1
+ import { JSONSchema4 } from "json-schema";
2
+
3
+ export function getJSONSafetyNotice(schema: JSONSchema4) {
4
+ return "The answer must be a JSON object using the following JSON Schema:\n" + JSON.stringify(schema);
5
+ }
@@ -1,34 +1,66 @@
1
1
  import { JSONSchema4 } from "json-schema";
2
- import { PromptRole, PromptSegment } from "../index.js";
2
+ import { PromptRole, PromptSegment } from "../types.js";
3
+ import { getJSONSafetyNotice } from "./commons.js";
3
4
 
4
- export function genericColonSeparator(
5
- messages: PromptSegment[],
6
- schema?: JSONSchema4,
7
- labels: {
8
- user: string;
9
- assistant: string;
10
- system: string;
11
- } = { user: "User", assistant: "Assistant", system: "System" }
12
- ) {
13
- const promptMessages = [];
14
- for (const m of messages) {
15
- if (m.role === PromptRole.user) {
16
- promptMessages.push(`${labels?.user}: ${m.content.trim()}`);
17
- }
18
- if (m.role === PromptRole.assistant) {
19
- promptMessages.push(`${labels.assistant}: ${m.content.trim()}`);
5
+ interface Labels {
6
+ user: string,
7
+ system: string,
8
+ assistant: string,
9
+ safety: string,
10
+ instruction: string
11
+ }
12
+
13
+ export function createTextPromptFormatter(labels: Labels = {
14
+ user: "USER",
15
+ system: "CONTEXT",
16
+ assistant: "ASSISTANT",
17
+ safety: "IMPORTANT",
18
+ instruction: "INSTRUCTION"
19
+ }) {
20
+ return function genericTextPrompt(segments: PromptSegment[], schema?: JSONSchema4): string {
21
+ const isChat = segments.find(m => m.role === PromptRole.assistant);
22
+ const context: string[] = [];
23
+ const content: string[] = [];
24
+ const safety: string[] = [];
25
+ for (const segment of segments) {
26
+ switch (segment.role) {
27
+ case PromptRole.user:
28
+ if (isChat) {
29
+ content.push(`${labels.user}: ${segment.content}`);
30
+ } else {
31
+ content.push(segment.content);
32
+ }
33
+ break;
34
+ case PromptRole.assistant:
35
+ content.push(`${labels.assistant}: ${segment.content}`);
36
+ break;
37
+ case PromptRole.system:
38
+ context.push(segment.content);
39
+ break;
40
+ case PromptRole.safety:
41
+ safety.push(segment.content);
42
+ break;
43
+ }
20
44
  }
21
- if (m.role === PromptRole.system) {
22
- promptMessages.push(`${labels.system}: ${m.content.trim()}`);
45
+
46
+ if (schema) {
47
+ safety.push(getJSONSafetyNotice(schema));
23
48
  }
24
- }
25
49
 
26
- if (schema) {
27
- promptMessages.push(`${labels.system}: You must answer using the following JSONSchema:
28
- ---
29
- ${JSON.stringify(schema)}
30
- ---`);
50
+ const out = [];
51
+ if (context.length > 0) {
52
+ out.push(`${labels.system}: ${context.join('\n')}`);
53
+ }
54
+ if (content.length > 0) {
55
+ const prefix = context.length > 0 && !isChat ? `${labels.instruction}: ` : '';
56
+ out.push(prefix + content.join('\n'));
57
+ }
58
+ if (safety.length > 0) {
59
+ out.push(`${labels.safety}: ${safety.join('\n')}`);
60
+ }
61
+ return out.join('\n');
31
62
  }
32
-
33
- return promptMessages.join("\n\n");
34
63
  }
64
+
65
+ const formatTextPrompt = createTextPromptFormatter();
66
+ export { formatTextPrompt };
@@ -1,33 +1,10 @@
1
1
  import { JSONSchema4 } from "json-schema";
2
- import { genericColonSeparator } from "./generic.js";
3
- import { llama2 } from "./llama2.js";
4
- import { openAI } from "./openai.js";
5
- import {
6
- PromptFormats,
7
- PromptSegment
8
- } from "../types.js";
9
- import { claudeMessages } from "./claude.js";
2
+ import { PromptSegment } from "../types.js";
10
3
 
11
- export function inferFormatterFromModelName(modelName: string): PromptFormats {
12
- const name = modelName.toLowerCase();
13
- if (name.includes("llama")) {
14
- return PromptFormats.llama2;
15
- } else if (name.includes("gpt")) {
16
- return PromptFormats.openai;
17
- } else if (name.includes("claude")) {
18
- return PromptFormats.claude;
19
- } else {
20
- return PromptFormats.genericTextLLM;
21
- }
22
- }
23
-
24
- export const PromptFormatters: Record<
25
- PromptFormats,
26
- (messages: PromptSegment[], schema?: JSONSchema4) => any
27
- > = {
28
- openai: openAI,
29
- llama2: llama2,
30
- claude: claudeMessages,
31
- genericTextLLM: genericColonSeparator,
32
- };
4
+ export type PromptFormatter<T = any> = (messages: PromptSegment[], schema?: JSONSchema4) => T;
33
5
 
6
+ export * from "./commons.js"
7
+ export * from "./generic.js";
8
+ export * from "./llama2.js";
9
+ export * from "./claude.js";
10
+ export * from "./openai.js";
@@ -1,7 +1,7 @@
1
1
  import { JSONSchema4 } from "json-schema";
2
2
  import { PromptRole, PromptSegment } from "../index.js";
3
3
 
4
- export function llama2(messages: PromptSegment[], schema?: JSONSchema4) {
4
+ export function formatLlama2Prompt(messages: PromptSegment[], schema?: JSONSchema4) {
5
5
  const BOS = "<s>";
6
6
  const EOS = "</s>";
7
7
  const INST = "[INST]";
@@ -1,17 +1,25 @@
1
1
  import { PromptRole } from "../index.js";
2
2
  import { PromptSegment } from "../types.js";
3
- import OpenAI from "openai";
4
3
 
5
- export function openAI(segments: PromptSegment[]) {
6
- const system: OpenAI.Chat.ChatCompletionMessageParam[] = [];
7
- const others: OpenAI.Chat.ChatCompletionMessageParam[] = [];
8
- const safety: OpenAI.Chat.ChatCompletionMessageParam[] = [];
4
+ export interface OpenAITextMessage {
5
+ content: string;
6
+ role: "system" | "user" | "assistant";
7
+ }
8
+ /**
9
+ * OpenAI text only prompts
10
+ * @param segments
11
+ * @returns
12
+ */
13
+ export function formatOpenAILikePrompt(segments: PromptSegment[]) {
14
+ const system: OpenAITextMessage[] = [];
15
+ const others: OpenAITextMessage[] = [];
16
+ const safety: OpenAITextMessage[] = [];
9
17
 
10
18
  for (const msg of segments) {
11
19
  if (msg.role === PromptRole.system) {
12
20
  system.push({ content: msg.content, role: "system" });
13
21
  } else if (msg.role === PromptRole.safety) {
14
- safety.push({ content: msg.content, role: "system" });
22
+ safety.push({ content: "IMPORTANT: " + msg.content, role: "system" });
15
23
  } else {
16
24
  others.push({ content: msg.content, role: "user" });
17
25
  }
package/src/index.ts CHANGED
@@ -1,4 +1,3 @@
1
1
  export * from "./Driver.js";
2
- export * from "./formatters/index.js";
3
2
  export * from "./json.js";
4
3
  export * from "./types.js";
package/src/json.ts CHANGED
@@ -6,15 +6,15 @@ function extractJsonFromText(text: string): string {
6
6
  return text.replace(/\\n/g, "");
7
7
  }
8
8
 
9
- export function extractAndParseJSON(text: string): Json {
9
+ export function extractAndParseJSON(text: string): JSONValue {
10
10
  return parseJSON(extractJsonFromText(text));
11
11
  }
12
12
 
13
- export type JsonPrimative = string | number | boolean | null;
14
- export type JsonArray = Json[];
15
- export type JsonObject = { [key: string]: Json };
16
- export type JsonComposite = JsonArray | JsonObject;
17
- export type Json = JsonPrimative | JsonComposite;
13
+ export type JSONPrimitive = string | number | boolean | null;
14
+ export type JSONArray = JSONValue[];
15
+ export type JSONObject = { [key: string]: JSONValue };
16
+ export type JSONComposite = JSONArray | JSONObject;
17
+ export type JSONValue = JSONPrimitive | JSONComposite;
18
18
 
19
19
 
20
20
 
@@ -184,7 +184,7 @@ export class JsonParser {
184
184
  }
185
185
 
186
186
 
187
- export function parseJSON(text: string): Json {
187
+ export function parseJSON(text: string): JSONValue {
188
188
  text = text.trim();
189
189
  try {
190
190
  return JSON.parse(text);
package/src/types.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import { JSONSchema4 } from "json-schema";
2
- import { Readable } from "stream";
3
- import { JsonObject } from "./json.js";
2
+ import { JSONObject } from "./json.js";
3
+ import { PromptFormatter } from "./formatters/index.js";
4
4
 
5
5
  export interface EmbeddingsOptions {
6
6
  /**
@@ -48,11 +48,22 @@ export interface Completion<ResultT = any> {
48
48
  result: ResultT;
49
49
  token_usage?: ExecutionTokenUsage;
50
50
 
51
+ /**
52
+ * The finish reason as reported by the model: stop | length or other model specific values
53
+ */
54
+ finish_reason?: "stop" | "length" | string;
55
+
51
56
  /**
52
57
  * Set only if a result validation error occured, otherwise if the result is valid the error field is undefined
53
58
  * This can only be set if the resultSchema is set and the reuslt could not be parsed as a json or if the result does not match the schema
54
59
  */
55
60
  error?: ResultValidationError;
61
+
62
+ /**
63
+ * The original response. Only included if the option include_original_response is set to true and the request is made using execute. Not supported when streaming.
64
+ */
65
+ original_response?: Record<string, any>;
66
+
56
67
  }
57
68
 
58
69
  export interface ExecutionResponse<PromptT = any> extends Completion {
@@ -81,12 +92,57 @@ export interface DriverOptions {
81
92
 
82
93
  export interface PromptOptions {
83
94
  model: string;
84
- format?: PromptFormats;
95
+ /**
96
+ * A custom formatter to use for format the final model prompt from the input prompt segments.
97
+ * If no one is specified the driver will choose a formatter compatible with the target model
98
+ */
99
+ format?: PromptFormatter;
85
100
  resultSchema?: JSONSchema4;
86
101
  }
87
102
  export interface ExecutionOptions extends PromptOptions {
88
103
  temperature?: number;
89
104
  max_tokens?: number;
105
+ stop_sequence?: string | string[];
106
+
107
+ /**
108
+ * restricts the selection of tokens to the “k” most likely options, based on their probabilities
109
+ * Lower values make the model more deterministic, more focused. Examples:
110
+ * - 10 - result will be highly controlled anc contextually relevant
111
+ * - 50 - result will be more creative but maintaining a balance between control and creativity
112
+ * - 100 - will lead to more creative and less predictable outputs
113
+ * It will be ignored on OpenAI since it does not support it
114
+ */
115
+ top_k?: number;
116
+
117
+ /**
118
+ * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
119
+ * Either use temperature or top_p, not both
120
+ */
121
+ top_p?: number;
122
+
123
+ /**
124
+ * Only supported for OpenAI. Look at OpenAI documentation for more detailsx
125
+ */
126
+ top_logprobs?: number;
127
+
128
+ /**
129
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
130
+ * Ignored for models which doesn;t support it
131
+ */
132
+ presence_penalty?: number;
133
+
134
+ /**
135
+ * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
136
+ * Ignored for models which doesn;t support it
137
+ */
138
+ frequency_penalty?: number;
139
+
140
+ /**
141
+ * If set to true the original response from the target LLM will be included in the response under the original_response field.
142
+ * This is useful for debugging and for some advanced use cases.
143
+ * It is ignored on streaming requests
144
+ */
145
+ include_original_response?: boolean;
90
146
  }
91
147
 
92
148
  // ============== Prompts ===============
@@ -171,13 +227,6 @@ export enum ModelType {
171
227
  // ============== Built-in formats and drivers =====================
172
228
  //TODO
173
229
 
174
- export enum PromptFormats {
175
- openai = "openai",
176
- llama2 = "llama2",
177
- claude = "claude",
178
- genericTextLLM = "genericTextLLM",
179
- }
180
-
181
230
  export enum BuiltinProviders {
182
231
  openai = 'openai',
183
232
  huggingface_ie = 'huggingface_ie',
@@ -186,6 +235,7 @@ export enum BuiltinProviders {
186
235
  vertexai = 'vertexai',
187
236
  togetherai = 'togetherai',
188
237
  mistralai = 'mistralai',
238
+ groq = 'groq',
189
239
  //virtual = 'virtual',
190
240
  //cohere = 'cohere',
191
241
  }
@@ -193,21 +243,22 @@ export enum BuiltinProviders {
193
243
  // ============== training =====================
194
244
 
195
245
 
246
+
196
247
  export interface DataSource {
197
248
  name: string;
198
- getStream(): Readable;
249
+ getStream(): ReadableStream<Uint8Array | string>;
199
250
  getURL(): Promise<string>;
200
251
  }
201
252
 
202
253
  export interface TrainingOptions {
203
254
  name: string; // the new model name
204
255
  model: string; // the model to train
205
- params?: JsonObject; // the training parameters
256
+ params?: JSONObject; // the training parameters
206
257
  }
207
258
 
208
259
  export interface TrainingPromptOptions {
209
260
  segments: PromptSegment[];
210
- completion: string | JsonObject;
261
+ completion: string | JSONObject;
211
262
  model: string; // the model to train
212
263
  schema?: JSONSchema4; // the resuilt schema f any
213
264
  }