@blaxel/llamaindex 0.2.49-preview.112 → 0.2.50-dev.215

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/esm/model.js CHANGED
@@ -36,8 +36,22 @@ class BlaxelLLM {
36
36
  }
37
37
  async ensureMetadata() {
38
38
  if (!this._metadata) {
39
- const llm = await this.createLLM();
40
- this._metadata = llm.metadata;
39
+ try {
40
+ const llm = await this.createLLM();
41
+ this._metadata = llm.metadata;
42
+ }
43
+ catch {
44
+ // If metadata access fails (e.g., Gemini), use default metadata
45
+ this._metadata = {
46
+ model: this.modelData?.spec?.runtime?.model || this.model,
47
+ temperature: this.options?.temperature ?? 0,
48
+ topP: this.options?.topP ?? 1,
49
+ maxTokens: this.options?.maxTokens ?? undefined,
50
+ contextWindow: this.options?.contextWindow ?? 4096,
51
+ tokenizer: undefined,
52
+ structuredOutput: this.options?.structuredOutput ?? false,
53
+ };
54
+ }
41
55
  }
42
56
  }
43
57
  async createLLM() {
@@ -52,6 +66,9 @@ class BlaxelLLM {
52
66
  });
53
67
  }
54
68
  if (this.type === "anthropic") {
69
+ // Set a dummy API key to satisfy AnthropicSession constructor requirement
70
+ // The actual authentication is handled via defaultHeaders
71
+ process.env.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY || "dummy-key-for-blaxel";
55
72
  const llm = anthropic({
56
73
  model: this.modelData?.spec?.runtime?.model,
57
74
  session: new AnthropicSession({
@@ -60,10 +77,57 @@ class BlaxelLLM {
60
77
  }),
61
78
  ...this.options,
62
79
  });
63
- return {
80
+ // Wrap the LLM to normalize Anthropic's response format (array content -> string)
81
+ // Create overloaded chat function
82
+ const chatWrapper = async (params) => {
83
+ // Type guard to determine if params is streaming or non-streaming
84
+ const isStreaming = 'stream' in params && params.stream === true;
85
+ let response;
86
+ if (isStreaming) {
87
+ response = await llm.chat(params);
88
+ }
89
+ else {
90
+ response = await llm.chat(params);
91
+ }
92
+ // Handle streaming responses (AsyncIterable)
93
+ const isAsyncIterable = (value) => {
94
+ return value !== null && typeof value === 'object' && Symbol.asyncIterator in value;
95
+ };
96
+ if (isAsyncIterable(response)) {
97
+ return response; // Streaming responses are handled differently, return as-is
98
+ }
99
+ // Transform array content to string for non-streaming responses
100
+ const chatResponse = response;
101
+ if (chatResponse && typeof chatResponse === 'object' && chatResponse !== null && 'message' in chatResponse) {
102
+ if (chatResponse.message && Array.isArray(chatResponse.message.content)) {
103
+ const contentArray = chatResponse.message.content;
104
+ const textContent = contentArray
105
+ .filter((item) => item.type === 'text' && item.text)
106
+ .map((item) => item.text)
107
+ .join('');
108
+ return {
109
+ ...chatResponse,
110
+ message: {
111
+ ...chatResponse.message,
112
+ content: textContent || chatResponse.message.content,
113
+ },
114
+ };
115
+ }
116
+ }
117
+ return chatResponse;
118
+ };
119
+ // Add overload signatures
120
+ const chatWithOverloads = chatWrapper;
121
+ const wrappedLLM = {
64
122
  ...llm,
65
123
  supportToolCall: true,
124
+ chat: chatWithOverloads,
125
+ complete: llm.complete.bind(llm),
126
+ exec: llm.exec.bind(llm),
127
+ streamExec: llm.streamExec.bind(llm),
128
+ metadata: llm.metadata,
66
129
  };
130
+ return wrappedLLM;
67
131
  }
68
132
  if (this.type === "cohere") {
69
133
  const llm = openai({
@@ -119,6 +183,23 @@ class BlaxelLLM {
119
183
  return llm.complete(params);
120
184
  }
121
185
  }
186
+ async exec(params) {
187
+ await this.ensureMetadata();
188
+ const llm = await this.createLLM();
189
+ // Type guard to handle overloads
190
+ if ('stream' in params && params.stream === true) {
191
+ return llm.exec(params);
192
+ }
193
+ else {
194
+ return llm.exec(params);
195
+ }
196
+ }
197
+ // streamExec method
198
+ async streamExec(params) {
199
+ await this.ensureMetadata();
200
+ const llm = await this.createLLM();
201
+ return llm.streamExec(params);
202
+ }
122
203
  }
123
204
  export const blModel = async (model, options) => {
124
205
  const modelData = await getModelMetadata(model);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blaxel/llamaindex",
3
- "version": "0.2.49-preview.112",
3
+ "version": "0.2.50-dev.215",
4
4
  "description": "Blaxel SDK for TypeScript",
5
5
  "license": "MIT",
6
6
  "author": "Blaxel, INC (https://blaxel.ai)",
@@ -40,15 +40,15 @@
40
40
  "dist"
41
41
  ],
42
42
  "dependencies": {
43
- "@llamaindex/anthropic": "^0.3.15",
44
- "@llamaindex/core": "0.6.13",
45
- "@llamaindex/google": "0.3.12",
46
- "@llamaindex/mistral": "^0.1.13",
47
- "@llamaindex/openai": "^0.4.7",
43
+ "@llamaindex/anthropic": "^0.3.26",
44
+ "@llamaindex/core": "0.6.22",
45
+ "@llamaindex/google": "0.3.22",
46
+ "@llamaindex/mistral": "^0.1.22",
47
+ "@llamaindex/openai": "^0.4.21",
48
48
  "@opentelemetry/instrumentation": "^0.203.0",
49
49
  "@traceloop/instrumentation-llamaindex": "^0.14.0",
50
- "llamaindex": "^0.11.13",
51
- "@blaxel/core": "0.2.49-preview.112"
50
+ "llamaindex": "^0.12.0",
51
+ "@blaxel/core": "0.2.50-dev.215"
52
52
  },
53
53
  "devDependencies": {
54
54
  "@eslint/js": "^9.30.1",