@llumiverse/core 0.17.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/README.md +1 -1
  2. package/lib/cjs/CompletionStream.js.map +1 -1
  3. package/lib/cjs/Driver.js +6 -6
  4. package/lib/cjs/Driver.js.map +1 -1
  5. package/lib/cjs/async.js +3 -3
  6. package/lib/cjs/async.js.map +1 -1
  7. package/lib/cjs/formatters/generic.js +6 -6
  8. package/lib/cjs/formatters/generic.js.map +1 -1
  9. package/lib/cjs/formatters/index.js.map +1 -1
  10. package/lib/cjs/formatters/nova.js +11 -11
  11. package/lib/cjs/formatters/nova.js.map +1 -1
  12. package/lib/cjs/formatters/openai.js +11 -11
  13. package/lib/cjs/formatters/openai.js.map +1 -1
  14. package/lib/cjs/index.js +1 -2
  15. package/lib/cjs/index.js.map +1 -1
  16. package/lib/cjs/json.js +1 -1
  17. package/lib/cjs/json.js.map +1 -1
  18. package/lib/cjs/resolver.js +2 -2
  19. package/lib/esm/CompletionStream.js.map +1 -1
  20. package/lib/esm/Driver.js +3 -3
  21. package/lib/esm/Driver.js.map +1 -1
  22. package/lib/esm/async.js +3 -3
  23. package/lib/esm/async.js.map +1 -1
  24. package/lib/esm/formatters/generic.js +1 -1
  25. package/lib/esm/formatters/generic.js.map +1 -1
  26. package/lib/esm/formatters/index.js.map +1 -1
  27. package/lib/esm/formatters/nova.js +5 -5
  28. package/lib/esm/formatters/nova.js.map +1 -1
  29. package/lib/esm/formatters/openai.js +4 -4
  30. package/lib/esm/formatters/openai.js.map +1 -1
  31. package/lib/esm/index.js +1 -2
  32. package/lib/esm/index.js.map +1 -1
  33. package/lib/esm/json.js +1 -1
  34. package/lib/esm/json.js.map +1 -1
  35. package/lib/esm/resolver.js +2 -2
  36. package/lib/types/CompletionStream.d.ts +1 -1
  37. package/lib/types/CompletionStream.d.ts.map +1 -1
  38. package/lib/types/Driver.d.ts +3 -3
  39. package/lib/types/Driver.d.ts.map +1 -1
  40. package/lib/types/async.d.ts +3 -3
  41. package/lib/types/async.d.ts.map +1 -1
  42. package/lib/types/formatters/commons.d.ts +1 -1
  43. package/lib/types/formatters/commons.d.ts.map +1 -1
  44. package/lib/types/formatters/generic.d.ts +2 -2
  45. package/lib/types/formatters/generic.d.ts.map +1 -1
  46. package/lib/types/formatters/index.d.ts +0 -3
  47. package/lib/types/formatters/index.d.ts.map +1 -1
  48. package/lib/types/formatters/nova.d.ts +2 -2
  49. package/lib/types/formatters/nova.d.ts.map +1 -1
  50. package/lib/types/formatters/openai.d.ts +2 -2
  51. package/lib/types/formatters/openai.d.ts.map +1 -1
  52. package/lib/types/index.d.ts +1 -2
  53. package/lib/types/index.d.ts.map +1 -1
  54. package/lib/types/json.d.ts +1 -7
  55. package/lib/types/json.d.ts.map +1 -1
  56. package/lib/types/validation.d.ts +1 -1
  57. package/lib/types/validation.d.ts.map +1 -1
  58. package/package.json +4 -3
  59. package/src/CompletionStream.ts +5 -5
  60. package/src/Driver.ts +5 -5
  61. package/src/async.ts +6 -9
  62. package/src/formatters/commons.ts +1 -1
  63. package/src/formatters/generic.ts +2 -2
  64. package/src/formatters/index.ts +0 -5
  65. package/src/formatters/nova.ts +6 -6
  66. package/src/formatters/openai.ts +5 -5
  67. package/src/index.ts +1 -2
  68. package/src/json.ts +2 -10
  69. package/src/resolver.ts +2 -2
  70. package/src/validation.ts +3 -3
  71. package/lib/cjs/options/bedrock.js +0 -343
  72. package/lib/cjs/options/bedrock.js.map +0 -1
  73. package/lib/cjs/options/groq.js +0 -37
  74. package/lib/cjs/options/groq.js.map +0 -1
  75. package/lib/cjs/options/openai.js +0 -123
  76. package/lib/cjs/options/openai.js.map +0 -1
  77. package/lib/cjs/options/vertexai.js +0 -257
  78. package/lib/cjs/options/vertexai.js.map +0 -1
  79. package/lib/cjs/options.js +0 -54
  80. package/lib/cjs/options.js.map +0 -1
  81. package/lib/cjs/types.js +0 -80
  82. package/lib/cjs/types.js.map +0 -1
  83. package/lib/esm/options/bedrock.js +0 -340
  84. package/lib/esm/options/bedrock.js.map +0 -1
  85. package/lib/esm/options/groq.js +0 -34
  86. package/lib/esm/options/groq.js.map +0 -1
  87. package/lib/esm/options/openai.js +0 -120
  88. package/lib/esm/options/openai.js.map +0 -1
  89. package/lib/esm/options/vertexai.js +0 -253
  90. package/lib/esm/options/vertexai.js.map +0 -1
  91. package/lib/esm/options.js +0 -50
  92. package/lib/esm/options.js.map +0 -1
  93. package/lib/esm/types.js +0 -77
  94. package/lib/esm/types.js.map +0 -1
  95. package/lib/types/options/bedrock.d.ts +0 -32
  96. package/lib/types/options/bedrock.d.ts.map +0 -1
  97. package/lib/types/options/groq.d.ts +0 -12
  98. package/lib/types/options/groq.d.ts.map +0 -1
  99. package/lib/types/options/openai.d.ts +0 -21
  100. package/lib/types/options/openai.d.ts.map +0 -1
  101. package/lib/types/options/vertexai.d.ts +0 -52
  102. package/lib/types/options/vertexai.d.ts.map +0 -1
  103. package/lib/types/options.d.ts +0 -14
  104. package/lib/types/options.d.ts.map +0 -1
  105. package/lib/types/types.d.ts +0 -323
  106. package/lib/types/types.d.ts.map +0 -1
  107. package/src/options/bedrock.ts +0 -388
  108. package/src/options/groq.ts +0 -47
  109. package/src/options/openai.ts +0 -148
  110. package/src/options/vertexai.ts +0 -312
  111. package/src/options.ts +0 -62
  112. package/src/types.ts +0 -405
package/src/types.ts DELETED
@@ -1,405 +0,0 @@
1
- import { PromptFormatter } from './formatters/index.js';
2
- import { JSONObject } from './json.js';
3
- import { TextFallbackOptions } from './options.js';
4
- import { BedrockOptions } from './options/bedrock.js';
5
- import { OpenAiOptions } from './options/openai.js';
6
- import { VertexAIOptions } from './options/vertexai.js';
7
-
8
- export interface EmbeddingsOptions {
9
- /**
10
- * The text to generate the embeddings for. One of text or image is required.
11
- */
12
- text?: string;
13
- /**
14
- * The image to generate embeddings for
15
- */
16
- image?: string
17
- /**
18
- * The model to use to generate the embeddings. Optional.
19
- */
20
- model?: string;
21
-
22
- }
23
-
24
- export interface EmbeddingsResult {
25
- /**
26
- * The embedding vectors corresponding to the words in the input text.
27
- */
28
- values: number[];
29
- /**
30
- * The model used to hgenerate the embeddings.
31
- */
32
- model: string;
33
- /**
34
- * Number of tokens of the input text.
35
- */
36
- token_count?: number;
37
-
38
- }
39
-
40
- export interface ResultValidationError {
41
- code: 'validation_error' | 'json_error' | 'content_policy_violation';
42
- message: string;
43
- data?: string;
44
- }
45
-
46
- //ResultT should be either JSONObject or string
47
- //Internal structure used in driver implementation.
48
- export interface CompletionChunkObject<ResultT = any> {
49
- result: ResultT;
50
- token_usage?: ExecutionTokenUsage;
51
- finish_reason?: "stop" | "length" | string;
52
- }
53
-
54
- //Internal structure used in driver implementation.
55
- export type CompletionChunk = CompletionChunkObject | string;
56
-
57
- export interface ToolDefinition {
58
- name: string,
59
- description?: string,
60
- input_schema: {
61
- type: 'object';
62
- properties?: unknown | null | undefined;
63
- [k: string]: unknown;
64
- },
65
- }
66
- /**
67
- * A tool use instance represents a call to a tool.
68
- * The id property is used to identify the tool call.
69
- */
70
- export interface ToolUse<ParamsT = JSONObject> {
71
- id: string,
72
- tool_name: string,
73
- tool_input: ParamsT | null
74
- }
75
-
76
- //ResultT should be either JSONObject or string
77
- export interface Completion<ResultT = any> {
78
- // the driver impl must return the result and optionally the token_usage. the execution time is computed by the extended abstract driver
79
- result: ResultT;
80
- token_usage?: ExecutionTokenUsage;
81
- /**
82
- * Contains the tools from which the model awaits information.
83
- */
84
- tool_use?: ToolUse[];
85
- /**
86
- * The finish reason as reported by the model: stop | length or other model specific values
87
- */
88
- finish_reason?: "stop" | "length" | "tool_use" | string;
89
-
90
- /**
91
- * Set only if a result validation error occured, otherwise if the result is valid the error field is undefined
92
- * This can only be set if the result_schema is set and the reuslt could not be parsed as a json or if the result does not match the schema
93
- */
94
- error?: ResultValidationError;
95
-
96
- /**
97
- * The original response. Only included if the option include_original_response is set to true and the request is made using execute. Not supported when streaming.
98
- */
99
- original_response?: Record<string, any>;
100
-
101
- /**
102
- * The conversation context. This is an opaque structure that can be passed to the next request to restore the context.
103
- */
104
- conversation?: unknown;
105
- }
106
-
107
- export interface ImageGeneration {
108
-
109
- images?: string[];
110
-
111
- }
112
-
113
- export interface ExecutionResponse<PromptT = any> extends Completion {
114
- prompt: PromptT;
115
- /**
116
- * The time it took to execute the request in seconds
117
- */
118
- execution_time?: number;
119
- /**
120
- * The number of chunks for streamed executions
121
- */
122
- chunks?: number;
123
- }
124
-
125
-
126
- export interface CompletionStream<PromptT = any> extends AsyncIterable<string> {
127
- completion: ExecutionResponse<PromptT> | undefined;
128
- }
129
-
130
- export interface Logger {
131
- debug: (...obj: any[]) => void;
132
- info: (...obj: any[]) => void;
133
- warn: (...obj: any[]) => void;
134
- error: (...obj: any[]) => void;
135
- }
136
-
137
- export interface DriverOptions {
138
- logger?: Logger | "console";
139
- }
140
-
141
- export type JSONSchema4TypeName =
142
- | "string" //
143
- | "number"
144
- | "integer"
145
- | "boolean"
146
- | "object"
147
- | "array"
148
- | "null"
149
- | "any";
150
-
151
- export interface JSONSchema {
152
- type?: JSONSchema4TypeName | JSONSchema4TypeName[];
153
- description?: string;
154
- properties?: Record<string, JSONSchema>;
155
- required?: string[];
156
- [k: string]: any;
157
- }
158
-
159
- //Options are split into PromptOptions, ModelOptions and ExecutionOptions.
160
- //ExecutionOptions are most often used within llumiverse as they are the most complete.
161
- //The base types are useful for external code that needs to interact with llumiverse.
162
- export interface PromptOptions {
163
- model: string;
164
- /**
165
- * A custom formatter to use for format the final model prompt from the input prompt segments.
166
- * If no one is specified the driver will choose a formatter compatible with the target model
167
- */
168
- format?: PromptFormatter;
169
- result_schema?: JSONSchema;
170
- }
171
-
172
- export interface StatelessExecutionOptions extends PromptOptions {
173
- /**
174
- * If set to true the original response from the target LLM will be included in the response under the original_response field.
175
- * This is useful for debugging and for some advanced use cases.
176
- * It is ignored on streaming requests
177
- */
178
- include_original_response?: boolean;
179
- model_options?: ModelOptions;
180
- output_modality: Modalities;
181
- }
182
-
183
- export interface ExecutionOptions extends StatelessExecutionOptions {
184
- /**
185
- * Available tools for the request
186
- */
187
- tools?: ToolDefinition[];
188
- /**
189
- * This is an opaque structure that provides a conversation context
190
- * Each driver implementation will return a conversation property in the execution response
191
- * that can be passed here to restore the context when a new prompt is sent to the model.
192
- */
193
- conversation?: unknown | null;
194
- }
195
-
196
- //Common names to share between different models
197
- export enum SharedOptions {
198
- //Text
199
- max_tokens = "max_tokens",
200
- temperature = "temperature",
201
- top_p = "top_p",
202
- top_k = "top_k",
203
- presence_penalty = "presence_penalty",
204
- frequency_penalty = "frequency_penalty",
205
- stop_sequence = "stop_sequence",
206
-
207
- //Image
208
- seed = "seed",
209
- number_of_images = "number_of_images",
210
- }
211
-
212
- export enum OptionType {
213
- numeric = "numeric",
214
- enum = "enum",
215
- boolean = "boolean",
216
- string_list = "string_list"
217
- }
218
-
219
- // ============== Model Options ===============
220
-
221
- export type ModelOptions = TextFallbackOptions | VertexAIOptions | BedrockOptions | OpenAiOptions;
222
-
223
- // ============== Option Info ===============
224
-
225
- export interface ModelOptionsInfo {
226
- options: ModelOptionInfoItem[];
227
- _option_id: string; //Should follow same ids as ModelOptions
228
- }
229
-
230
- export type ModelOptionInfoItem = NumericOptionInfo | EnumOptionInfo | BooleanOptionInfo | StringListOptionInfo;
231
- interface OptionInfoPrototype {
232
- type: OptionType;
233
- name: string;
234
- description?: string;
235
-
236
- //If this is true, whether other options apply is dependent on this option
237
- //Therefore, if this option is changed, the set of available options should be refreshed.
238
- refresh?: boolean;
239
- }
240
-
241
- export interface NumericOptionInfo extends OptionInfoPrototype {
242
- type: OptionType.numeric;
243
- value?: number;
244
- min?: number;
245
- max?: number;
246
- step?: number;
247
- integer?: boolean;
248
- default?: number;
249
- }
250
-
251
- export interface EnumOptionInfo extends OptionInfoPrototype {
252
- type: OptionType.enum;
253
- value?: string;
254
- enum: Record<string, string>;
255
- default?: string;
256
- }
257
-
258
- export interface BooleanOptionInfo extends OptionInfoPrototype {
259
- type: OptionType.boolean;
260
- value?: boolean;
261
- default?: boolean;
262
- }
263
-
264
- export interface StringListOptionInfo extends OptionInfoPrototype {
265
- type: OptionType.string_list;
266
- value?: string[];
267
- default?: string[];
268
- }
269
-
270
- // ============== Prompts ===============
271
- export enum PromptRole {
272
- safety = "safety",
273
- system = "system",
274
- user = "user",
275
- assistant = "assistant",
276
- negative = "negative",
277
- mask = "mask",
278
- /**
279
- * Used to send the response of a tool
280
- */
281
- tool = "tool"
282
- }
283
-
284
- export interface PromptSegment {
285
- role: PromptRole;
286
- content: string;
287
- /**
288
- * The tool use id if the segment is a tool response
289
- */
290
- tool_use_id?: string;
291
- files?: DataSource[]
292
- }
293
-
294
- export interface ExecutionTokenUsage {
295
- prompt?: number;
296
- result?: number;
297
- total?: number;
298
- }
299
-
300
- export enum Modalities {
301
- text = "text",
302
- image = "image",
303
- }
304
-
305
-
306
- // ============== AI MODEL ==============
307
-
308
- export interface AIModel<ProviderKeys = string> {
309
- id: string; //id of the model known by the provider
310
- name: string; //human readable name
311
- provider: ProviderKeys; //provider name
312
- description?: string;
313
- version?: string; //if any version is specified
314
- type?: ModelType; //type of the model
315
- tags?: string[]; //tags for searching
316
- owner?: string; //owner of the model
317
- status?: AIModelStatus; //status of the model
318
- can_stream?: boolean; //if the model's reponse can be streamed
319
- is_custom?: boolean; //if the model is a custom model (a trained model)
320
- is_multimodal?: boolean //if the model support files and images
321
- input_modalities?: string[]; //if the model support files and images
322
- environment?: string; //the environment name
323
- }
324
-
325
- export enum AIModelStatus {
326
- Available = "available",
327
- Pending = "pending",
328
- Stopped = "stopped",
329
- Unavailable = "unavailable",
330
- Unknown = "unknown"
331
- }
332
-
333
- /**
334
- * payload to list available models for an enviroment
335
- * @param environmentId id of the environment
336
- * @param query text to search for in model name/description
337
- * @param type type of the model
338
- * @param tags tags for searching
339
- */
340
- export interface ModelSearchPayload {
341
- text: string;
342
- type?: ModelType;
343
- tags?: string[];
344
- owner?: string;
345
- }
346
-
347
-
348
- export enum ModelType {
349
- Classifier = "classifier",
350
- Regressor = "regressor",
351
- Clustering = "clustering",
352
- AnomalyDetection = "anomaly-detection",
353
- TimeSeries = "time-series",
354
- Text = "text",
355
- Image = "image",
356
- Audio = "audio",
357
- Video = "video",
358
- Embedding = "embedding",
359
- Chat = "chat",
360
- Code = "code",
361
- NLP = "nlp",
362
- MultiModal = "multi-modal",
363
- Test = "test",
364
- Other = "other",
365
- Unknown = "unknown"
366
- }
367
-
368
-
369
- // ============== training =====================
370
-
371
-
372
-
373
- export interface DataSource {
374
- name: string;
375
- mime_type?: string;
376
- getStream(): Promise<ReadableStream<Uint8Array | string>>;
377
- getURL(): Promise<string>;
378
- }
379
-
380
- export interface TrainingOptions {
381
- name: string; // the new model name
382
- model: string; // the model to train
383
- params?: JSONObject; // the training parameters
384
- }
385
-
386
- export interface TrainingPromptOptions {
387
- segments: PromptSegment[];
388
- completion: string | JSONObject;
389
- model: string; // the model to train
390
- schema?: JSONSchema; // the resuilt schema f any
391
- }
392
-
393
- export enum TrainingJobStatus {
394
- running = "running",
395
- succeeded = "succeeded",
396
- failed = "failed",
397
- cancelled = "cancelled",
398
- }
399
-
400
- export interface TrainingJob {
401
- id: string; // id of the training job
402
- status: TrainingJobStatus; // status of the training job - depends on the implementation
403
- details?: string;
404
- model?: string; // the name of the fine tuned model which is created
405
- }