@llumiverse/core 0.18.0 → 0.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/lib/cjs/formatters/openai.js +0 -14
  2. package/lib/cjs/formatters/openai.js.map +1 -1
  3. package/lib/cjs/index.js +0 -2
  4. package/lib/cjs/index.js.map +1 -1
  5. package/lib/cjs/stream.js +12 -1
  6. package/lib/cjs/stream.js.map +1 -1
  7. package/lib/esm/formatters/openai.js +0 -13
  8. package/lib/esm/formatters/openai.js.map +1 -1
  9. package/lib/esm/index.js +0 -2
  10. package/lib/esm/index.js.map +1 -1
  11. package/lib/esm/stream.js +10 -1
  12. package/lib/esm/stream.js.map +1 -1
  13. package/lib/types/async.d.ts +1 -1
  14. package/lib/types/async.d.ts.map +1 -1
  15. package/lib/types/formatters/openai.d.ts +0 -1
  16. package/lib/types/formatters/openai.d.ts.map +1 -1
  17. package/lib/types/index.d.ts +0 -2
  18. package/lib/types/index.d.ts.map +1 -1
  19. package/lib/types/stream.d.ts +2 -0
  20. package/lib/types/stream.d.ts.map +1 -1
  21. package/package.json +4 -4
  22. package/src/async.ts +1 -1
  23. package/src/formatters/openai.ts +0 -14
  24. package/src/index.ts +0 -2
  25. package/src/stream.ts +15 -3
  26. package/lib/cjs/capability/bedrock.js +0 -183
  27. package/lib/cjs/capability/bedrock.js.map +0 -1
  28. package/lib/cjs/capability/openai.js +0 -122
  29. package/lib/cjs/capability/openai.js.map +0 -1
  30. package/lib/cjs/capability/vertexai.js +0 -86
  31. package/lib/cjs/capability/vertexai.js.map +0 -1
  32. package/lib/cjs/capability.js +0 -52
  33. package/lib/cjs/capability.js.map +0 -1
  34. package/lib/cjs/options.js +0 -19
  35. package/lib/cjs/options.js.map +0 -1
  36. package/lib/esm/capability/bedrock.js +0 -180
  37. package/lib/esm/capability/bedrock.js.map +0 -1
  38. package/lib/esm/capability/openai.js +0 -119
  39. package/lib/esm/capability/openai.js.map +0 -1
  40. package/lib/esm/capability/vertexai.js +0 -83
  41. package/lib/esm/capability/vertexai.js.map +0 -1
  42. package/lib/esm/capability.js +0 -47
  43. package/lib/esm/capability.js.map +0 -1
  44. package/lib/esm/options.js +0 -16
  45. package/lib/esm/options.js.map +0 -1
  46. package/lib/types/capability/bedrock.d.ts +0 -7
  47. package/lib/types/capability/bedrock.d.ts.map +0 -1
  48. package/lib/types/capability/openai.d.ts +0 -11
  49. package/lib/types/capability/openai.d.ts.map +0 -1
  50. package/lib/types/capability/vertexai.d.ts +0 -11
  51. package/lib/types/capability/vertexai.d.ts.map +0 -1
  52. package/lib/types/capability.d.ts +0 -5
  53. package/lib/types/capability.d.ts.map +0 -1
  54. package/lib/types/options.d.ts +0 -3
  55. package/lib/types/options.d.ts.map +0 -1
  56. package/src/capability/bedrock.ts +0 -187
  57. package/src/capability/openai.ts +0 -124
  58. package/src/capability/vertexai.ts +0 -88
  59. package/src/capability.ts +0 -49
  60. package/src/options.ts +0 -24
@@ -1,187 +0,0 @@
1
- import { ModelModalities, ModelCapabilities } from "@llumiverse/common";
2
-
3
- // Record of Bedrock model capabilities keyed by model ID.
4
- const RECORD_MODEL_CAPABILITIES: Record<string, ModelCapabilities> = {
5
- "foundation-model/ai21.jamba-1-5-large-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
6
- "foundation-model/ai21.jamba-1-5-mini-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
7
- "foundation-model/ai21.jamba-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
8
- "foundation-model/amazon.nova-canvas-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { image: true, text: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
9
- "foundation-model/amazon.nova-lite-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
10
- "foundation-model/amazon.nova-micro-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
11
- "foundation-model/amazon.nova-pro-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
12
- "foundation-model/amazon.titan-text-express-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
13
- "foundation-model/amazon.titan-text-lite-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
14
- "foundation-model/amazon.titan-text-premier-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
15
- "foundation-model/amazon.titan-tg1-large": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
16
- "foundation-model/anthropic.claude-3-5-haiku-20241022-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
17
- "foundation-model/anthropic.claude-3-5-sonnet-20240620-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
18
- "foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
19
- "foundation-model/anthropic.claude-3-haiku-20240307-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
20
- "foundation-model/anthropic.claude-3-opus-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
21
- "foundation-model/anthropic.claude-3-sonnet-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
22
- "foundation-model/anthropic.claude-instant-v1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
23
- "foundation-model/anthropic.claude-v2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
24
- "foundation-model/anthropic.claude-v2:1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
25
- "foundation-model/cohere.command-light-text-v14": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
26
- "foundation-model/cohere.command-r-plus-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
27
- "foundation-model/cohere.command-r-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
28
- "foundation-model/cohere.command-text-v14": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
29
- "foundation-model/meta.llama3-1-405b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
30
- "foundation-model/meta.llama3-1-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
31
- "foundation-model/meta.llama3-1-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
32
- "foundation-model/meta.llama3-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
33
- "foundation-model/meta.llama3-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
34
- "foundation-model/mistral.mixtral-8x7b-instruct-v0:1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
35
- "foundation-model/mistral.mistral-7b-instruct-v0:2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
36
- "foundation-model/mistral.mistral-large-2402-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
37
- "foundation-model/mistral.mistral-large-2407-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
38
- "foundation-model/mistral.mistral-small-2402-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
39
- "inference-profile/us.amazon.nova-lite-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
40
- "inference-profile/us.amazon.nova-micro-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
41
- "inference-profile/us.amazon.nova-premier-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
42
- "inference-profile/us.amazon.nova-pro-v1:0": { input: { text: true, image: true, video: true, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
43
- "inference-profile/us.anthropic.claude-3-5-haiku-20241022-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
44
- "inference-profile/us.anthropic.claude-3-5-sonnet-20240620-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
45
- "inference-profile/us.anthropic.claude-3-5-sonnet-20241022-v2:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
46
- "inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
47
- "inference-profile/us.anthropic.claude-3-haiku-20240307-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
48
- "inference-profile/us.anthropic.claude-3-opus-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
49
- "inference-profile/us.anthropic.claude-3-sonnet-20240229-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
50
- "inference-profile/us.deepseek.r1-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
51
- "inference-profile/us.meta.llama3-1-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
52
- "inference-profile/us.meta.llama3-1-8b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
53
- "inference-profile/us.meta.llama3-2-1b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
54
- "inference-profile/us.meta.llama3-2-11b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
55
- "inference-profile/us.meta.llama3-2-3b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
56
- "inference-profile/us.meta.llama3-2-90b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
57
- "inference-profile/us.meta.llama3-3-70b-instruct-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
58
- "inference-profile/us.meta.llama4-maverick-17b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
59
- "inference-profile/us.meta.llama4-scout-17b-instruct-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
60
- "inference-profile/us.mistral.pixtral-large-2502-v1:0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
61
- "inference-profile/us.writer.palmyra-x4-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
62
- "inference-profile/us.writer.palmyra-x5-v1:0": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false }
63
- };
64
-
65
- // Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
66
- const RECORD_FAMILY_CAPABILITIES: Record<string, ModelCapabilities> = {
67
- "foundation-model/ai21.jamba": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
68
- "foundation-model/amazon.nova": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: false, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
69
- "foundation-model/amazon.titan": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
70
- "foundation-model/anthropic.claude": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
71
- "foundation-model/anthropic.claude-3": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
72
- "foundation-model/anthropic.claude-3-5": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
73
- "foundation-model/anthropic.claude-3-7": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
74
- "foundation-model/cohere.command": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
75
- "foundation-model/meta.llama3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
76
- "foundation-model/mistral.mistral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
77
- "foundation-model/mistral.mistral-large": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
78
- "foundation-model/mistral.mixtral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
79
- "inference-profile/us.anthropic.claude-3-haiku": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
80
- "inference-profile/us.anthropic.claude-3-5-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: true },
81
- "inference-profile/us.anthropic.claude-3-opus": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
82
- "inference-profile/us.anthropic.claude-3-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
83
- "inference-profile/us.deepseek.r1": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
84
- "inference-profile/us.meta.llama3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
85
- "inference-profile/us.meta.llama4-maverick-17b": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
86
- "inference-profile/us.meta.llama4-scout-17b": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false, tool_support_streaming: false },
87
- "inference-profile/us.mistral.pixtral": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false },
88
- "inference-profile/us.writer.palmyra": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true, tool_support_streaming: false }
89
- };
90
-
91
- /**
92
- * Extract the model identifier from an ARN or inference profile
93
- * @param modelName The full model ARN or name
94
- * @returns The normalized model identifier
95
- */
96
- function normalizeModelName(modelName: string): string {
97
- const modelLower = modelName.toLowerCase();
98
- if (modelLower.includes("inference-profile")) {
99
- const parts = modelLower.split("/");
100
- if (parts.length > 1) {
101
- const providerModel = parts[parts.length - 1];
102
- const modelParts = providerModel.split(".");
103
- if (modelParts.length > 1 && modelParts[1] === "deepseek") {
104
- return `deepseek-${modelParts.slice(2).join(".")}`;
105
- }
106
- return modelParts.length > 2 ? modelParts.slice(2).join(".") : providerModel;
107
- }
108
- }
109
- return modelLower;
110
- }
111
-
112
- // Fallback pattern lists for inferring modalities and tool support
113
- const IMAGE_INPUT_MODELS = ["image"]; // fallback: if model id contains 'image', supports image input
114
- const VIDEO_INPUT_MODELS = ["video"];
115
- const AUDIO_INPUT_MODELS = ["audio"];
116
- const TEXT_INPUT_MODELS = ["text"];
117
- const IMAGE_OUTPUT_MODELS = ["image"];
118
- const VIDEO_OUTPUT_MODELS = ["video"];
119
- const AUDIO_OUTPUT_MODELS = ["audio"];
120
- const TEXT_OUTPUT_MODELS = ["text"];
121
- const EMBEDDING_OUTPUT_MODELS = ["embed"];
122
- const TOOL_SUPPORT_MODELS = ["tool", "sonnet", "opus", "nova", "palmyra", "command-r", "mistral-large", "pixtral"];
123
-
124
- function modelMatches(modelName: string, patterns: string[]): boolean {
125
- return patterns.some(pattern => modelName.includes(pattern));
126
- }
127
-
128
- /**
129
- * Get the full ModelCapabilities for a Bedrock model.
130
- * Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
131
- */
132
- export function getModelCapabilitiesBedrock(model: string): ModelCapabilities {
133
- // Normalize ARN or inference-profile to model ID
134
- let normalized = model;
135
- const arnPattern = /^arn:aws:bedrock:[^:]+:[^:]*:(inference-profile|foundation-model)\/.+/i;
136
- if (arnPattern.test(model)) {
137
- // Extract after last occurrence of 'foundation-model/' or 'inference-profile/'
138
- const foundationIdx = model.lastIndexOf('foundation-model/');
139
- const inferenceIdx = model.lastIndexOf('inference-profile/');
140
- if (foundationIdx !== -1) {
141
- normalized = model.substring(foundationIdx);
142
- } else if (inferenceIdx !== -1) {
143
- normalized = model.substring(inferenceIdx);
144
- }
145
- }
146
- // Standardize region for inference-profile to 'us' for record lookup
147
- // This allows support for different AWS regions by mapping all to 'us'
148
- if (normalized.startsWith("inference-profile/")) {
149
- normalized = normalized.replace(/^inference-profile\/[^.]+\./, "inference-profile/us.");
150
- }
151
-
152
- // 1. Exact match in record
153
- const record = RECORD_MODEL_CAPABILITIES[normalized];
154
- if (record) return record;
155
-
156
- // 2. Fallback: find the longest matching family prefix in RECORD_FAMILY_CAPABILITIES
157
- let bestFamilyKey = undefined;
158
- let bestFamilyLength = 0;
159
- for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
160
- if (normalized.startsWith(key) && key.length > bestFamilyLength) {
161
- bestFamilyKey = key;
162
- bestFamilyLength = key.length;
163
- }
164
- }
165
- if (bestFamilyKey) {
166
- return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
167
- }
168
-
169
- // 3. Fallback: infer from normalized name
170
- normalized = normalizeModelName(normalized);
171
- const input: ModelModalities = {
172
- text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
173
- image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
174
- video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
175
- audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
176
- embed: false
177
- };
178
- const output: ModelModalities = {
179
- text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
180
- image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
181
- video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
182
- audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
183
- embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
184
- };
185
- const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
186
- return { input, output, tool_support };
187
- }
@@ -1,124 +0,0 @@
1
- import { ModelModalities } from "@llumiverse/common";
2
-
3
- // Record of OpenAI model capabilities keyed by model ID (lowercased)
4
- const RECORD_MODEL_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
5
- "chatgpt-4o-latest": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
6
- "gpt-3.5-turbo": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
7
- "gpt-3.5-turbo-0125": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
8
- "gpt-3.5-turbo-1106": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
9
- "gpt-3.5-turbo-16k": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
10
- "gpt-3.5-turbo-instruct": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
11
- "gpt-3.5-turbo-instruct-0914": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
12
- "gpt-4": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
13
- "gpt-4-0125-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
14
- "gpt-4-0613": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
15
- "gpt-4-1106-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
16
- "gpt-4-turbo": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
17
- "gpt-4-turbo-2024-04-09": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
18
- "gpt-4-turbo-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
19
- "gpt-4.1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
20
- "gpt-4.1-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
21
- "gpt-4.1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
22
- "gpt-4.1-mini-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
23
- "gpt-4.1-nano": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
24
- "gpt-4.1-nano-2025-04-14": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
25
- "gpt-4.5-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
26
- "gpt-4.5-preview-2025-02-27": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
27
- "gpt-4o": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
28
- "gpt-4o-2024-05-13": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
29
- "gpt-4o-2024-08-06": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
30
- "gpt-4o-2024-11-20": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
31
- "gpt-4o-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
32
- "gpt-4o-mini-2024-07-18": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
33
- "gpt-4o-mini-search-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
34
- "gpt-4o-mini-search-preview-2025-03-11": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
35
- "gpt-4o-search-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
36
- "gpt-4o-search-preview-2025-03-11": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
37
- "gpt-image-1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
38
- "o1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
39
- "o1-2024-12-17": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
40
- "o1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
41
- "o1-mini-2024-09-12": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
42
- "o1-preview": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
43
- "o1-preview-2024-09-12": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
44
- "o1-pro": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
45
- "o1-pro-2025-03-19": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
46
- "o3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
47
- "o3-2025-04-16": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
48
- "o3-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
49
- "o3-mini-2025-01-31": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
50
- "o4-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
51
- "o4-mini-2025-04-16": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
52
- };
53
-
54
- // Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
55
- const RECORD_FAMILY_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
56
- "gpt-3.5-turbo": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
57
- "gpt-4": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
58
- "gpt-4.1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
59
- "gpt-4.5": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
60
- "gpt-4o": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
61
- "o1": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
62
- "o1-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
63
- "o1-pro": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
64
- "o3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
65
- "o3-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
66
- "o4-mini": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
67
- };
68
-
69
- // Fallback pattern lists for inferring modalities and tool support
70
- const IMAGE_INPUT_MODELS = ["image"];
71
- const VIDEO_INPUT_MODELS = ["video"];
72
- const AUDIO_INPUT_MODELS = ["audio"];
73
- const TEXT_INPUT_MODELS = ["text"];
74
- const IMAGE_OUTPUT_MODELS = ["image"];
75
- const VIDEO_OUTPUT_MODELS = ["video"];
76
- const AUDIO_OUTPUT_MODELS = ["audio"];
77
- const TEXT_OUTPUT_MODELS = ["text"];
78
- const EMBEDDING_OUTPUT_MODELS = ["embed"];
79
- const TOOL_SUPPORT_MODELS = ["tool", "o1", "o3", "o4", "gpt-4.1", "gpt-4o"];
80
-
81
- function modelMatches(modelName: string, patterns: string[]): boolean {
82
- return patterns.some(pattern => modelName.includes(pattern));
83
- }
84
-
85
- function normalizeOpenAIModelName(modelName: string): string {
86
- return modelName.toLowerCase();
87
- }
88
-
89
- /**
90
- * Get the full ModelCapabilities for an OpenAI model.
91
- * Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
92
- */
93
- export function getModelCapabilitiesOpenAI(model: string): { input: ModelModalities; output: ModelModalities; tool_support?: boolean } {
94
- let normalized = normalizeOpenAIModelName(model);
95
- const record = RECORD_MODEL_CAPABILITIES[normalized];
96
- if (record) return record;
97
- let bestFamilyKey = undefined;
98
- let bestFamilyLength = 0;
99
- for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
100
- if (normalized.startsWith(key) && key.length > bestFamilyLength) {
101
- bestFamilyKey = key;
102
- bestFamilyLength = key.length;
103
- }
104
- }
105
- if (bestFamilyKey) {
106
- return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
107
- }
108
- const input: ModelModalities = {
109
- text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
110
- image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
111
- video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
112
- audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
113
- embed: false
114
- };
115
- const output: ModelModalities = {
116
- text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
117
- image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
118
- video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
119
- audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
120
- embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
121
- };
122
- const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
123
- return { input, output, tool_support };
124
- }
@@ -1,88 +0,0 @@
1
- import { ModelModalities } from "@llumiverse/common";
2
-
3
- // Record of Vertex AI model capabilities keyed by model ID (last path segment, lowercased)
4
- const RECORD_MODEL_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
5
- "imagen-3.0-generate-002": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
6
- "imagen-3.0-capability-001": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
7
- "gemini-1.5-flash-002": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
8
- "gemini-1.5-pro-002": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
9
- "gemini-2.0-flash-001": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
10
- "gemini-2.0-flash-lite-001": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: false },
11
- "gemini-2.5-flash-preview-04-17": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
12
- "gemini-2.5-pro-preview-05-06": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
13
- "claude-3-opus": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
14
- "claude-3-haiku": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
15
- "claude-3-5-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
16
- "claude-3-5-haiku": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
17
- "claude-3-5-sonnet-v2": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
18
- "claude-3-7-sonnet": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
19
- };
20
-
21
- // Populate RECORD_FAMILY_CAPABILITIES as a const record (lowest common denominator for each family)
22
- const RECORD_FAMILY_CAPABILITIES: Record<string, { input: ModelModalities; output: ModelModalities; tool_support?: boolean }> = {
23
- "gemini-1.5": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
24
- "gemini-2.0": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
25
- "gemini-2.5": { input: { text: true, image: true, video: true, audio: true, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
26
- "imagen-3.0": { input: { text: true, image: true, video: false, audio: false, embed: false }, output: { text: false, image: true, video: false, audio: false, embed: false }, tool_support: false },
27
- "claude-3-5": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
28
- "claude-3-7": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true },
29
- "claude-3": { input: { text: true, image: false, video: false, audio: false, embed: false }, output: { text: true, image: false, video: false, audio: false, embed: false }, tool_support: true }
30
- };
31
-
32
- // Fallback pattern lists for inferring modalities and tool support
33
- const IMAGE_INPUT_MODELS = ["image"];
34
- const VIDEO_INPUT_MODELS = ["video"];
35
- const AUDIO_INPUT_MODELS = ["audio"];
36
- const TEXT_INPUT_MODELS = ["text"];
37
- const IMAGE_OUTPUT_MODELS = ["image"];
38
- const VIDEO_OUTPUT_MODELS = ["video"];
39
- const AUDIO_OUTPUT_MODELS = ["audio"];
40
- const TEXT_OUTPUT_MODELS = ["text"];
41
- const EMBEDDING_OUTPUT_MODELS = ["embed"];
42
- const TOOL_SUPPORT_MODELS = ["tool", "sonnet", "opus", "gemini", "claude-3-5", "claude-3-7"];
43
-
44
- function modelMatches(modelName: string, patterns: string[]): boolean {
45
- return patterns.some(pattern => modelName.includes(pattern));
46
- }
47
-
48
- function normalizeVertexAIModelName(modelName: string): string {
49
- const segments = modelName.toLowerCase().split("/");
50
- return segments[segments.length - 1];
51
- }
52
-
53
- /**
54
- * Get the full ModelCapabilities for a Vertex AI model.
55
- * Checks RECORD_MODEL_CAPABILITIES first, then falls back to pattern-based inference.
56
- */
57
- export function getModelCapabilitiesVertexAI(model: string): { input: ModelModalities; output: ModelModalities; tool_support?: boolean } {
58
- let normalized = normalizeVertexAIModelName(model);
59
- const record = RECORD_MODEL_CAPABILITIES[normalized];
60
- if (record) return record;
61
- let bestFamilyKey = undefined;
62
- let bestFamilyLength = 0;
63
- for (const key of Object.keys(RECORD_FAMILY_CAPABILITIES)) {
64
- if (normalized.startsWith(key) && key.length > bestFamilyLength) {
65
- bestFamilyKey = key;
66
- bestFamilyLength = key.length;
67
- }
68
- }
69
- if (bestFamilyKey) {
70
- return RECORD_FAMILY_CAPABILITIES[bestFamilyKey];
71
- }
72
- const input: ModelModalities = {
73
- text: modelMatches(normalized, TEXT_INPUT_MODELS) || undefined,
74
- image: modelMatches(normalized, IMAGE_INPUT_MODELS) || undefined,
75
- video: modelMatches(normalized, VIDEO_INPUT_MODELS) || undefined,
76
- audio: modelMatches(normalized, AUDIO_INPUT_MODELS) || undefined,
77
- embed: false
78
- };
79
- const output: ModelModalities = {
80
- text: modelMatches(normalized, TEXT_OUTPUT_MODELS) || undefined,
81
- image: modelMatches(normalized, IMAGE_OUTPUT_MODELS) || undefined,
82
- video: modelMatches(normalized, VIDEO_OUTPUT_MODELS) || undefined,
83
- audio: modelMatches(normalized, AUDIO_OUTPUT_MODELS) || undefined,
84
- embed: modelMatches(normalized, EMBEDDING_OUTPUT_MODELS) || undefined
85
- };
86
- const tool_support = modelMatches(normalized, TOOL_SUPPORT_MODELS) || undefined;
87
- return { input, output, tool_support };
88
- }
package/src/capability.ts DELETED
@@ -1,49 +0,0 @@
1
- import { getModelCapabilitiesBedrock } from "./capability/bedrock.js";
2
- import { getModelCapabilitiesOpenAI } from "./capability/openai.js";
3
- import { getModelCapabilitiesVertexAI } from "./capability/vertexai.js";
4
- import { ModelCapabilities, ModelModalities } from "@llumiverse/common";
5
-
6
- export function getModelCapabilities(model: string, provider?: string): ModelCapabilities {
7
- const capabilities = _getModelCapabilities(model, provider);
8
- // Globally disable audio and video for all models, as we don't support them yet
9
- // We also do not support tool use while streaming
10
- // TODO: Remove this when we add support.
11
- capabilities.input.audio = false;
12
- capabilities.output.audio = false;
13
- capabilities.output.video = false;
14
- capabilities.tool_support_streaming = false;
15
- return capabilities;
16
- }
17
-
18
- function _getModelCapabilities(model: string, provider?: string): ModelCapabilities {
19
- switch (provider?.toLowerCase()) {
20
- case "vertexai":
21
- return getModelCapabilitiesVertexAI(model);
22
- case "openai":
23
- return getModelCapabilitiesOpenAI(model);
24
- case "bedrock":
25
- return getModelCapabilitiesBedrock(model);
26
- default:
27
- // Guess the provider based on the model name
28
- if (model.startsWith("gpt")) {
29
- return getModelCapabilitiesOpenAI(model);
30
- } else if (model.startsWith("publishers/")) {
31
- return getModelCapabilitiesVertexAI(model);
32
- } else if (model.startsWith("arn:aws")) {
33
- return getModelCapabilitiesBedrock(model);
34
- }
35
- // Fallback to a generic model with no capabilities
36
- return { input: {}, output: {} } satisfies ModelCapabilities;
37
- }
38
- }
39
-
40
- export function supportsToolUse(model: string, provider?: string, streaming: boolean = false): boolean {
41
- const capabilities = getModelCapabilities(model, provider);
42
- return streaming ? !!capabilities.tool_support_streaming : !!capabilities.tool_support;
43
- }
44
-
45
- export function modelModalitiesToArray(modalities: ModelModalities): string[] {
46
- return Object.entries(modalities)
47
- .filter(([_, isSupported]) => isSupported)
48
- .map(([modality]) => modality);
49
- }
package/src/options.ts DELETED
@@ -1,24 +0,0 @@
1
- import {
2
- getBedrockOptions,
3
- textOptionsFallback,
4
- getGroqOptions,
5
- getOpenAiOptions,
6
- getVertexAiOptions,
7
- ModelOptions,
8
- ModelOptionsInfo
9
- } from "@llumiverse/common";
10
-
11
- export function getOptions(model: string, provider?: string, options?: ModelOptions): ModelOptionsInfo {
12
- switch (provider?.toLowerCase()) {
13
- case "bedrock":
14
- return getBedrockOptions(model ?? "", options);
15
- case "vertexai":
16
- return getVertexAiOptions(model ?? "", options);
17
- case "openai":
18
- return getOpenAiOptions(model ?? "", options);
19
- case "groq":
20
- return getGroqOptions(model ?? "", options);
21
- default:
22
- return textOptionsFallback;
23
- }
24
- }