@llumiverse/core 0.17.0 → 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/README.md +1 -1
  2. package/lib/cjs/CompletionStream.js.map +1 -1
  3. package/lib/cjs/Driver.js +6 -6
  4. package/lib/cjs/Driver.js.map +1 -1
  5. package/lib/cjs/async.js +3 -3
  6. package/lib/cjs/async.js.map +1 -1
  7. package/lib/cjs/capability/bedrock.js +183 -0
  8. package/lib/cjs/capability/bedrock.js.map +1 -0
  9. package/lib/cjs/capability/openai.js +122 -0
  10. package/lib/cjs/capability/openai.js.map +1 -0
  11. package/lib/cjs/capability/vertexai.js +86 -0
  12. package/lib/cjs/capability/vertexai.js.map +1 -0
  13. package/lib/cjs/capability.js +52 -0
  14. package/lib/cjs/capability.js.map +1 -0
  15. package/lib/cjs/formatters/generic.js +6 -6
  16. package/lib/cjs/formatters/generic.js.map +1 -1
  17. package/lib/cjs/formatters/index.js.map +1 -1
  18. package/lib/cjs/formatters/nova.js +11 -11
  19. package/lib/cjs/formatters/nova.js.map +1 -1
  20. package/lib/cjs/formatters/openai.js +25 -11
  21. package/lib/cjs/formatters/openai.js.map +1 -1
  22. package/lib/cjs/index.js +2 -1
  23. package/lib/cjs/index.js.map +1 -1
  24. package/lib/cjs/json.js +1 -1
  25. package/lib/cjs/json.js.map +1 -1
  26. package/lib/cjs/options.js +8 -43
  27. package/lib/cjs/options.js.map +1 -1
  28. package/lib/cjs/resolver.js +2 -2
  29. package/lib/esm/CompletionStream.js.map +1 -1
  30. package/lib/esm/Driver.js +3 -3
  31. package/lib/esm/Driver.js.map +1 -1
  32. package/lib/esm/async.js +3 -3
  33. package/lib/esm/async.js.map +1 -1
  34. package/lib/esm/capability/bedrock.js +180 -0
  35. package/lib/esm/capability/bedrock.js.map +1 -0
  36. package/lib/esm/capability/openai.js +119 -0
  37. package/lib/esm/capability/openai.js.map +1 -0
  38. package/lib/esm/capability/vertexai.js +83 -0
  39. package/lib/esm/capability/vertexai.js.map +1 -0
  40. package/lib/esm/capability.js +47 -0
  41. package/lib/esm/capability.js.map +1 -0
  42. package/lib/esm/formatters/generic.js +1 -1
  43. package/lib/esm/formatters/generic.js.map +1 -1
  44. package/lib/esm/formatters/index.js.map +1 -1
  45. package/lib/esm/formatters/nova.js +5 -5
  46. package/lib/esm/formatters/nova.js.map +1 -1
  47. package/lib/esm/formatters/openai.js +17 -4
  48. package/lib/esm/formatters/openai.js.map +1 -1
  49. package/lib/esm/index.js +2 -1
  50. package/lib/esm/index.js.map +1 -1
  51. package/lib/esm/json.js +1 -1
  52. package/lib/esm/json.js.map +1 -1
  53. package/lib/esm/options.js +3 -37
  54. package/lib/esm/options.js.map +1 -1
  55. package/lib/esm/resolver.js +2 -2
  56. package/lib/types/CompletionStream.d.ts +1 -1
  57. package/lib/types/CompletionStream.d.ts.map +1 -1
  58. package/lib/types/Driver.d.ts +3 -3
  59. package/lib/types/Driver.d.ts.map +1 -1
  60. package/lib/types/async.d.ts +2 -2
  61. package/lib/types/async.d.ts.map +1 -1
  62. package/lib/types/capability/bedrock.d.ts +7 -0
  63. package/lib/types/capability/bedrock.d.ts.map +1 -0
  64. package/lib/types/capability/openai.d.ts +11 -0
  65. package/lib/types/capability/openai.d.ts.map +1 -0
  66. package/lib/types/capability/vertexai.d.ts +11 -0
  67. package/lib/types/capability/vertexai.d.ts.map +1 -0
  68. package/lib/types/capability.d.ts +5 -0
  69. package/lib/types/capability.d.ts.map +1 -0
  70. package/lib/types/formatters/commons.d.ts +1 -1
  71. package/lib/types/formatters/commons.d.ts.map +1 -1
  72. package/lib/types/formatters/generic.d.ts +2 -2
  73. package/lib/types/formatters/generic.d.ts.map +1 -1
  74. package/lib/types/formatters/index.d.ts +0 -3
  75. package/lib/types/formatters/index.d.ts.map +1 -1
  76. package/lib/types/formatters/nova.d.ts +2 -2
  77. package/lib/types/formatters/nova.d.ts.map +1 -1
  78. package/lib/types/formatters/openai.d.ts +3 -2
  79. package/lib/types/formatters/openai.d.ts.map +1 -1
  80. package/lib/types/index.d.ts +2 -1
  81. package/lib/types/index.d.ts.map +1 -1
  82. package/lib/types/json.d.ts +1 -7
  83. package/lib/types/json.d.ts.map +1 -1
  84. package/lib/types/options.d.ts +2 -13
  85. package/lib/types/options.d.ts.map +1 -1
  86. package/lib/types/validation.d.ts +1 -1
  87. package/lib/types/validation.d.ts.map +1 -1
  88. package/package.json +3 -2
  89. package/src/CompletionStream.ts +5 -5
  90. package/src/Driver.ts +5 -5
  91. package/src/async.ts +5 -8
  92. package/src/capability/bedrock.ts +187 -0
  93. package/src/capability/openai.ts +124 -0
  94. package/src/capability/vertexai.ts +88 -0
  95. package/src/capability.ts +49 -0
  96. package/src/formatters/commons.ts +1 -1
  97. package/src/formatters/generic.ts +2 -2
  98. package/src/formatters/index.ts +0 -5
  99. package/src/formatters/nova.ts +6 -6
  100. package/src/formatters/openai.ts +19 -5
  101. package/src/index.ts +3 -2
  102. package/src/json.ts +2 -10
  103. package/src/options.ts +12 -50
  104. package/src/resolver.ts +2 -2
  105. package/src/validation.ts +3 -3
  106. package/lib/cjs/options/bedrock.js +0 -343
  107. package/lib/cjs/options/bedrock.js.map +0 -1
  108. package/lib/cjs/options/groq.js +0 -37
  109. package/lib/cjs/options/groq.js.map +0 -1
  110. package/lib/cjs/options/openai.js +0 -123
  111. package/lib/cjs/options/openai.js.map +0 -1
  112. package/lib/cjs/options/vertexai.js +0 -257
  113. package/lib/cjs/options/vertexai.js.map +0 -1
  114. package/lib/cjs/types.js +0 -80
  115. package/lib/cjs/types.js.map +0 -1
  116. package/lib/esm/options/bedrock.js +0 -340
  117. package/lib/esm/options/bedrock.js.map +0 -1
  118. package/lib/esm/options/groq.js +0 -34
  119. package/lib/esm/options/groq.js.map +0 -1
  120. package/lib/esm/options/openai.js +0 -120
  121. package/lib/esm/options/openai.js.map +0 -1
  122. package/lib/esm/options/vertexai.js +0 -253
  123. package/lib/esm/options/vertexai.js.map +0 -1
  124. package/lib/esm/types.js +0 -77
  125. package/lib/esm/types.js.map +0 -1
  126. package/lib/types/options/bedrock.d.ts +0 -32
  127. package/lib/types/options/bedrock.d.ts.map +0 -1
  128. package/lib/types/options/groq.d.ts +0 -12
  129. package/lib/types/options/groq.d.ts.map +0 -1
  130. package/lib/types/options/openai.d.ts +0 -21
  131. package/lib/types/options/openai.d.ts.map +0 -1
  132. package/lib/types/options/vertexai.d.ts +0 -52
  133. package/lib/types/options/vertexai.d.ts.map +0 -1
  134. package/lib/types/types.d.ts +0 -323
  135. package/lib/types/types.d.ts.map +0 -1
  136. package/src/options/bedrock.ts +0 -388
  137. package/src/options/groq.ts +0 -47
  138. package/src/options/openai.ts +0 -148
  139. package/src/options/vertexai.ts +0 -312
  140. package/src/types.ts +0 -405
@@ -0,0 +1,124 @@
1
+ import { ModelModalities } from "@llumiverse/common";
2
+
3
+ // Record of OpenAI model capabilities keyed by model ID (lowercased)
4
+ const RECORD_MODEL_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
5
+ "chatgpt-4o-latest": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
6
+ "gpt-3.5-turbo": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
7
+ "gpt-3.5-turbo-0125": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
8
+ "gpt-3.5-turbo-1106": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
9
+ "gpt-3.5-turbo-16k": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
10
+ "gpt-3.5-turbo-instruct": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
11
+ "gpt-3.5-turbo-instruct-0914": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
12
+ "gpt-4": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
13
+ "gpt-4-0125-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
14
+ "gpt-4-0613": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
15
+ "gpt-4-1106-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
16
+ "gpt-4-turbo": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
17
+ "gpt-4-turbo-2024-04-09": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
18
+ "gpt-4-turbo-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
19
+ "gpt-4.1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
20
+ "gpt-4.1-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
21
+ "gpt-4.1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
22
+ "gpt-4.1-mini-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
23
+ "gpt-4.1-nano": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
24
+ "gpt-4.1-nano-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
25
+ "gpt-4.5-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
26
+ "gpt-4.5-preview-2025-02-27": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
27
+ "gpt-4o": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
28
+ "gpt-4o-2024-05-13": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
29
+ "gpt-4o-2024-08-06": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
30
+ "gpt-4o-2024-11-20": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
31
+ "gpt-4o-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
32
+ "gpt-4o-mini-2024-07-18": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
33
+ "gpt-4o-mini-search-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
34
+ "gpt-4o-mini-search-preview-2025-03-11": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
35
+ "gpt-4o-search-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
36
+ "gpt-4o-search-preview-2025-03-11": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
37
+ "gpt-image-1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
38
+ "o1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
39
+ "o1-2024-12-17": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
40
+ "o1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
41
+ "o1-mini-2024-09-12": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
42
+ "o1-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
43
+ "o1-preview-2024-09-12": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
44
+ "o1-pro": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
45
+ "o1-pro-2025-03-19": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
46
+ "o3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
47
+ "o3-2025-04-16": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
48
+ "o3-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
49
+ "o3-mini-2025-01-31": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
50
+ "o4-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
51
+ "o4-mini-2025-04-16": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
52
+ };
53
+
54
+ // Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
55
+ const RECORD_FAMILY_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
56
+ "gpt-3.5-turbo": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
57
+ "gpt-4": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
58
+ "gpt-4.1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
59
+ "gpt-4.5": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
60
+ "gpt-4o": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
61
+ "o1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
62
+ "o1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
63
+ "o1-pro": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
64
+ "o3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
65
+ "o3-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
66
+ "o4-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
67
+ };
68
+
69
+ // Fallback pattern lists for inferring modalities and tool support
70
+ const IMAGE_INPUT_MODELS = ["image"];
71
+ const VIDEO_INPUT_MODELS = ["video"];
72
+ const AUDIO_INPUT_MODELS = ["audio"];
73
+ const TEXT_INPUT_MODELS = ["text"];
74
+ const IMAGE_OUTPUT_MODELS = ["image"];
75
+ const VIDEO_OUTPUT_MODELS = ["video"];
76
+ const AUDIO_OUTPUT_MODELS = ["audio"];
77
+ const TEXT_OUTPUT_MODELS = ["text"];
78
+ const EMBEDDING_OUTPUT_MODELS = ["embed"];
79
+ const TOOL_SUPPORT_MODELS = ["tool", "o1", "o3", "o4", "gpt-4.1", "gpt-4o"];
80
+
81
+ function modelMatches(modelName: string, patterns: string[]): boolean {
82
+ return patterns.some(pattern => modelName.includes(pattern));
83
+ }
84
+
85
+ function normalizeOpenAIModelName(modelName: string): string {
86
+ return modelName.toLowerCase();
87
+ }
88
+
89
+ /**
90
+ * Get the full ModelCapabilities for an OpenAI model.
91
+ * Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
92
+ */
93
+ export function getModelCapabilitiesOpenAI(model: string): { input: ModelModalities; output: ModelModalities; tool_support?: boolean } {
94
+ let normalized = normalizeOpenAIModelName(model);
95
+ const record = RECORD_MODEL_CAPABILITIES[normalized];
96
+ if (record) return record;
97
+ let bestFamilyKey = undefined;
98
+ let bestFamilyLength = 0;
99
+ for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
100
+ if (normalized.startsWith(key) && key.length > bestFamilyLength) {
101
+ bestFamilyKey = key;
102
+ bestFamilyLength = key.length;
103
+ }
104
+ }
105
+ if (bestFamilyKey) {
106
+ return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
107
+ }
108
+ const input: ModelModalities = {
109
+ text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
110
+ image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
111
+ video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
112
+ audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
113
+ embed: false
114
+ };
115
+ const output: ModelModalities = {
116
+ text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
117
+ image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
118
+ video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
119
+ audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
120
+ embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
121
+ };
122
+ const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
123
+ return { input, output, tool_support };
124
+ }
@@ -0,0 +1,88 @@
1
+ import { ModelModalities } from "@llumiverse/common";
2
+
3
+ // Record of Vertex AI model capabilities keyed by model ID (last path segment, lowercased)
4
+ const RECORD_MODEL_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
5
+ "imagen-3.0-generate-002": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
6
+ "imagen-3.0-capability-001": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
7
+ "gemini-1.5-flash-002": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
8
+ "gemini-1.5-pro-002": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
9
+ "gemini-2.0-flash-001": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
10
+ "gemini-2.0-flash-lite-001": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
11
+ "gemini-2.5-flash-preview-04-17": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
12
+ "gemini-2.5-pro-preview-05-06": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
13
+ "claude-3-opus": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
14
+ "claude-3-haiku": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
15
+ "claude-3-5-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
16
+ "claude-3-5-haiku": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
17
+ "claude-3-5-sonnet-v2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
18
+ "claude-3-7-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
19
+ };
20
+
21
+ // Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
22
+ const RECORD_FAMILY_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
23
+ "gemini-1.5": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
24
+ "gemini-2.0": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
25
+ "gemini-2.5": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
26
+ "imagen-3.0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
27
+ "claude-3-5": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
28
+ "claude-3-7": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
29
+ "claude-3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
30
+ };
31
+
32
+ // Fallback pattern lists for inferring modalities and tool support
33
+ const IMAGE_INPUT_MODELS = ["image"];
34
+ const VIDEO_INPUT_MODELS = ["video"];
35
+ const AUDIO_INPUT_MODELS = ["audio"];
36
+ const TEXT_INPUT_MODELS = ["text"];
37
+ const IMAGE_OUTPUT_MODELS = ["image"];
38
+ const VIDEO_OUTPUT_MODELS = ["video"];
39
+ const AUDIO_OUTPUT_MODELS = ["audio"];
40
+ const TEXT_OUTPUT_MODELS = ["text"];
41
+ const EMBEDDING_OUTPUT_MODELS = ["embed"];
42
+ const TOOL_SUPPORT_MODELS = ["tool", "sonnet", "opus", "gemini", "claude-3-5", "claude-3-7"];
43
+
44
+ function modelMatches(modelName: string, patterns: string[]): boolean {
45
+ return patterns.some(pattern => modelName.includes(pattern));
46
+ }
47
+
48
+ function normalizeVertexAIModelName(modelName: string): string {
49
+ const segments = modelName.toLowerCase().split("/");
50
+ return segments[segments.length - 1];
51
+ }
52
+
53
+ /**
54
+ * Get the full ModelCapabilities for a Vertex AI model.
55
+ * Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
56
+ */
57
+ export function getModelCapabilitiesVertexAI(model: string): { input: ModelModalities; output: ModelModalities; tool_support?: boolean } {
58
+ let normalized = normalizeVertexAIModelName(model);
59
+ const record = RECORD_MODEL_CAPABILITIES[normalized];
60
+ if (record) return record;
61
+ let bestFamilyKey = undefined;
62
+ let bestFamilyLength = 0;
63
+ for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
64
+ if (normalized.startsWith(key) && key.length > bestFamilyLength) {
65
+ bestFamilyKey = key;
66
+ bestFamilyLength = key.length;
67
+ }
68
+ }
69
+ if (bestFamilyKey) {
70
+ return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
71
+ }
72
+ const input: ModelModalities = {
73
+ text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
74
+ image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
75
+ video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
76
+ audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
77
+ embed: false
78
+ };
79
+ const output: ModelModalities = {
80
+ text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
81
+ image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
82
+ video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
83
+ audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
84
+ embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
85
+ };
86
+ const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
87
+ return { input, output, tool_support };
88
+ }
@@ -0,0 +1,49 @@
1
+ import { getModelCapabilitiesBedrock } from "./capability/bedrock.js";
2
+ import { getModelCapabilitiesOpenAI } from "./capability/openai.js";
3
+ import { getModelCapabilitiesVertexAI } from "./capability/vertexai.js";
4
+ import { ModelCapabilities, ModelModalities } from "@llumiverse/common";
5
+
6
+ export function getModelCapabilities(model: string, provider?: string): ModelCapabilities {
7
+ const capabilities = _getModelCapabilities(model, provider);
8
+ // Globally disable audio and video for all models, as we don't support them yet
9
+ // We also do not support tool use while streaming
10
+ // TODO: Remove this when we add support.
11
+ capabilities.input.audio = false;
12
+ capabilities.output.audio = false;
13
+ capabilities.output.video = false;
14
+ capabilities.tool_support_streaming = false;
15
+ return capabilities;
16
+ }
17
+
18
+ function _getModelCapabilities(model: string, provider?: string): ModelCapabilities {
19
+ switch (provider?.toLowerCase()) {
20
+ case "vertexai":
21
+ return getModelCapabilitiesVertexAI(model);
22
+ case "openai":
23
+ return getModelCapabilitiesOpenAI(model);
24
+ case "bedrock":
25
+ return getModelCapabilitiesBedrock(model);
26
+ default:
27
+ // Guess the provider based on the model name
28
+ if (model.startsWith("gpt")) {
29
+ return getModelCapabilitiesOpenAI(model);
30
+ } else if (model.startsWith("publishers/")) {
31
+ return getModelCapabilitiesVertexAI(model);
32
+ } else if (model.startsWith("arn:aws")) {
33
+ return getModelCapabilitiesBedrock(model);
34
+ }
35
+ // Fallback to a generic model with no capabilities
36
+ return { input: {}, output: {} } satisfies ModelCapabilities;
37
+ }
38
+ }
39
+
40
+ export function supportsToolUse(model: string, provider?: string, streaming: boolean = false): boolean {
41
+ const capabilities = getModelCapabilities(model, provider);
42
+ return streaming ? !!capabilities.tool_support_streaming : !!capabilities.tool_support;
43
+ }
44
+
45
+ export function modelModalitiesToArray(modalities: ModelModalities): string[] {
46
+ return Object.entries(modalities)
47
+ .filter(([_, isSupported]) => isSupported)
48
+ .map(([modality]) => modality);
49
+ }
@@ -1,4 +1,4 @@
1
- import { JSONSchema } from "../types.js";
1
+ import { JSONSchema } from "@llumiverse/common";
2
2
 
3
3
  export function getJSONSafetyNotice(schema: JSONSchema) {
4
4
  return "The answer must be a JSON object using the following JSON Schema:\n" + JSON.stringify(schema, undefined, 2);
@@ -1,5 +1,5 @@
1
- import { JSONSchema } from "../types.js";
2
- import { PromptRole, PromptSegment } from "../types.js";
1
+ import { JSONSchema } from "@llumiverse/common";
2
+ import { PromptRole, PromptSegment } from "@llumiverse/common";
3
3
  import { getJSONSafetyNotice } from "./commons.js";
4
4
 
5
5
  interface Labels {
@@ -1,8 +1,3 @@
1
- import { JSONSchema } from "../types.js";
2
- import { PromptSegment } from "../types.js";
3
-
4
- export type PromptFormatter<T = any> = (messages: PromptSegment[], schema?: JSONSchema) => T;
5
-
6
1
  export * from "./commons.js";
7
2
  export * from "./generic.js";
8
3
  export * from "./openai.js";
@@ -1,6 +1,6 @@
1
- import { JSONSchema } from "../types.js";
2
- import { PromptRole, PromptSegment, readStreamAsBase64 } from "../index.js";
3
- //import { readStreamAsBase64 } from "../stream.js";
1
+ import { JSONSchema } from "@llumiverse/common";
2
+ import { PromptRole, PromptSegment } from "@llumiverse/common";
3
+ import { readStreamAsBase64 } from "../stream.js";
4
4
  import { getJSONSafetyNotice } from "./commons.js";
5
5
 
6
6
  export interface NovaMessage {
@@ -105,7 +105,7 @@ export async function formatNovaPrompt(segments: PromptSegment[], schema?: JSONS
105
105
  safety.push("IMPORTANT: " + getJSONSafetyNotice(schema));
106
106
  }
107
107
 
108
- // messages must contains at least 1 item. If the prompt doesn;t contains a user message (but only system messages)
108
+ // messages must contains at least 1 item. If the prompt does not contains a user message (but only system messages)
109
109
  // we need to put the system messages in the messages array
110
110
 
111
111
  let systemMessage = system.join('\n').trim();
@@ -119,7 +119,7 @@ export async function formatNovaPrompt(segments: PromptSegment[], schema?: JSONS
119
119
  systemMessage = systemMessage + '\n\nIMPORTANT: ' + safety.join('\n');
120
120
  }
121
121
 
122
- /*start Nova's message to amke sure it answers properly in JSON
122
+ /*start Nova's message to make sure it answers properly in JSON
123
123
  if enabled, this requires to add the { to Nova's response*/
124
124
 
125
125
  if (schema) {
@@ -131,7 +131,7 @@ export async function formatNovaPrompt(segments: PromptSegment[], schema?: JSONS
131
131
  });
132
132
  }
133
133
 
134
- // put system mesages first and safety last
134
+ // put system messages first and safety last
135
135
  return {
136
136
  system: systemMessage ? [{ text: systemMessage }] : [{ text: "" }],
137
137
  messages: messages,
@@ -1,6 +1,6 @@
1
- import { PromptRole, PromptOptions } from "../index.js";
1
+ import { PromptRole, PromptOptions } from "@llumiverse/common";
2
2
  import { readStreamAsBase64 } from "../stream.js";
3
- import { PromptSegment } from "../types.js";
3
+ import { PromptSegment } from "@llumiverse/common";
4
4
 
5
5
 
6
6
  export interface OpenAITextMessage {
@@ -33,6 +33,20 @@ export interface OpenAIContentPartImage {
33
33
  }
34
34
  }
35
35
 
36
+ //Anything before gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots.
37
+ //https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat
38
+ export const noStructuredOutputModels: string[] = [
39
+ "turbo",
40
+ "davinci",
41
+ "babbage",
42
+ "curie",
43
+ "chatgpt-4o",
44
+ "gpt-4",
45
+ "gpt-4o-2024-05-13", //later gpt-4o does support structured output
46
+ "o1-preview",
47
+ "o1-mini",
48
+ ]
49
+
36
50
  /**
37
51
  * OpenAI text only prompts
38
52
  * @param segments
@@ -56,7 +70,7 @@ export function formatOpenAILikeTextPrompt(segments: PromptSegment[]): OpenAITex
56
70
  }
57
71
  }
58
72
 
59
- // put system mesages first and safety last
73
+ // put system messages first and safety last
60
74
  return system.concat(user).concat(safety);
61
75
  }
62
76
 
@@ -70,7 +84,7 @@ export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[]
70
84
 
71
85
  const parts: (OpenAIContentPartImage | OpenAIContentPartText)[] = [];
72
86
 
73
- //generate the parts based on promptsegment
87
+ //generate the parts based on PromptSegment
74
88
  if (msg.files) {
75
89
  for (const file of msg.files) {
76
90
  const stream = await file.getStream();
@@ -147,7 +161,7 @@ export async function formatOpenAILikeMultimodalPrompt(segments: PromptSegment[]
147
161
  })
148
162
  }
149
163
 
150
- // put system mesages first and safety last
164
+ // put system messages first and safety last
151
165
  return ([] as OpenAIInputMessage[]).concat(system).concat(others).concat(safety);
152
166
 
153
167
  }
package/src/index.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  export * from "./Driver.js";
2
2
  export * from "./json.js";
3
3
  export * from "./stream.js";
4
- export * from "./types.js";
5
- export * from "./options.js";
4
+ export * from "./options.js";
5
+ export * from "./capability.js";
6
+ export * from "@llumiverse/common";
package/src/json.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import { JSONValue } from "@llumiverse/common";
1
2
 
2
3
  function extractJsonFromText(text: string): string {
3
4
  const start = text.indexOf("{");
@@ -10,15 +11,6 @@ export function extractAndParseJSON(text: string): JSONValue {
10
11
  return parseJSON(extractJsonFromText(text));
11
12
  }
12
13
 
13
- export type JSONPrimitive = string | number | boolean | null;
14
- export type JSONArray = JSONValue[];
15
- export type JSONObject = { [key: string]: JSONValue };
16
- export type JSONComposite = JSONArray | JSONObject;
17
- export type JSONValue = JSONPrimitive | JSONComposite;
18
-
19
-
20
-
21
-
22
14
  const RX_DQUOTE = /^"([^"\\]|\\.)*"/us;
23
15
  const RX_SQUOTE = /^'([^'\\]|\\.)*'/us;
24
16
  const RX_NUMBER = /^-?\d+(\.\d+)?/;
@@ -192,7 +184,7 @@ export function parseJSON(text: string): JSONValue {
192
184
  // use a relaxed parser
193
185
  try {
194
186
  return JsonParser.parse(text);
195
- } catch (err2: any) { // throw the original error
187
+ } catch (err2: any) { // throw the original error
196
188
  throw err;
197
189
  }
198
190
  }
package/src/options.ts CHANGED
@@ -1,53 +1,15 @@
1
- import { ModelOptions, ModelOptionsInfo, OptionType, SharedOptions } from "./types.js";
2
- import { getBedrockOptions } from "./options/bedrock.js";
3
- import { getVertexAiOptions } from "./options/vertexai.js";
4
- import { getOpenAiOptions } from "./options/openai.js";
5
- import { getGroqOptions } from "./options/groq.js";
1
+ import {
2
+ getBedrockOptions,
3
+ textOptionsFallback,
4
+ getGroqOptions,
5
+ getOpenAiOptions,
6
+ getVertexAiOptions,
7
+ ModelOptions,
8
+ ModelOptionsInfo
9
+ } from "@llumiverse/common";
6
10
 
7
- export interface TextFallbackOptions {
8
- _option_id: "text-fallback"; //For specific models should be format as "provider-model"
9
- max_tokens?: number;
10
- temperature?: number;
11
- top_p?: number;
12
- top_k?: number;
13
- presence_penalty?: number;
14
- frequency_penalty?: number;
15
- stop_sequence?: string[];
16
- }
17
-
18
- export const textOptionsFallback: ModelOptionsInfo = {
19
- _option_id: "text-fallback",
20
- options: [
21
- {
22
- name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1,
23
- integer: true, step: 200, description: "The maximum number of tokens to generate"
24
- },
25
- {
26
- name: SharedOptions.temperature, type: OptionType.numeric, min: 0.0, default: 0.7,
27
- integer: false, step: 0.1, description: "A higher temperature biases toward less likely tokens, making the model more creative"
28
- },
29
- {
30
- name: SharedOptions.top_p, type: OptionType.numeric, min: 0, max: 1,
31
- integer: false, step: 0.1, description: "Limits token sampling to the cumulative probability of the top p tokens"
32
- },
33
- {
34
- name: SharedOptions.top_k, type: OptionType.numeric, min: 1,
35
- integer: true, step: 1, description: "Limits token sampling to the top k tokens"
36
- },
37
- {
38
- name: SharedOptions.presence_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
39
- integer: false, step: 0.1, description: "Penalise tokens if they appear at least once in the text"
40
- },
41
- {
42
- name: SharedOptions.frequency_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
43
- integer: false, step: 0.1, description: "Penalise tokens based on their frequency in the text"
44
- },
45
- { name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [], description: "The generation will halt if one of the stop sequences is output" },
46
- ]
47
- };
48
-
49
- export function getOptions(provider?: string, model?: string, options?: ModelOptions): ModelOptionsInfo {
50
- switch (provider) {
11
+ export function getOptions(model: string, provider?: string, options?: ModelOptions): ModelOptionsInfo {
12
+ switch (provider?.toLowerCase()) {
51
13
  case "bedrock":
52
14
  return getBedrockOptions(model ?? "", options);
53
15
  case "vertexai":
@@ -59,4 +21,4 @@ export function getOptions(provider?: string, model?: string, options?: ModelOpt
59
21
  default:
60
22
  return textOptionsFallback;
61
23
  }
62
- }
24
+ }
package/src/resolver.ts CHANGED
@@ -1,8 +1,8 @@
1
1
  /**
2
2
  * Get the property named by "name" of the given object
3
- * If an array is idnexed using a string key then a map is done and an array with the content of the properties with that name are returned
3
+ * If an array is indexed using a string key then a map is done and an array with the content of the properties with that name are returned
4
4
  * Ex: docs.text => will return an array of text properties of the docs array
5
- * @param object the obejct
5
+ * @param object the object
6
6
  * @param name the name of the property.
7
7
  * @returns the property value
8
8
  */
package/src/validation.ts CHANGED
@@ -2,7 +2,7 @@ import { Ajv } from 'ajv';
2
2
  import addFormats from 'ajv-formats';
3
3
  import { extractAndParseJSON } from "./json.js";
4
4
  import { resolveField } from './resolver.js';
5
- import { ResultValidationError } from "./types.js";
5
+ import { ResultValidationError } from "@llumiverse/common";
6
6
 
7
7
 
8
8
  const ajv = new Ajv({
@@ -45,7 +45,7 @@ export function validateResult(data: any, schema: Object) {
45
45
  const valid = validate(json);
46
46
 
47
47
  if (!valid && validate.errors) {
48
- let errors = [];
48
+ let errors = [];
49
49
 
50
50
  for (const e of validate.errors) {
51
51
  const path = e.instancePath.split("/").slice(1);
@@ -55,7 +55,7 @@ export function validateResult(data: any, schema: Object) {
55
55
  const schemaField = resolveField(schema, schemaPath.slice(0, -3));
56
56
 
57
57
  //ignore date if empty or null
58
- if (!value
58
+ if (!value
59
59
  && ["date", "date-time"].includes(schemaFieldFormat)
60
60
  && !schemaField?.required?.includes(path[path.length - 1])) {
61
61
  continue;