@blaxel/llamaindex 0.2.49 → 0.2.50-preview.115

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/esm/model.js CHANGED
@@ -36,40 +36,146 @@ class BlaxelLLM {
36
36
  }
37
37
  async ensureMetadata() {
38
38
  if (!this._metadata) {
39
- const llm = await this.createLLM();
40
- this._metadata = llm.metadata;
39
+ try {
40
+ const llm = await this.createLLM();
41
+ this._metadata = llm.metadata;
42
+ }
43
+ catch {
44
+ // If metadata access fails (e.g., Gemini), use default metadata
45
+ this._metadata = {
46
+ model: this.modelData?.spec?.runtime?.model || this.model,
47
+ temperature: this.options?.temperature ?? 0,
48
+ topP: this.options?.topP ?? 1,
49
+ maxTokens: this.options?.maxTokens ?? undefined,
50
+ contextWindow: this.options?.contextWindow ?? 4096,
51
+ tokenizer: undefined,
52
+ structuredOutput: this.options?.structuredOutput ?? false,
53
+ };
54
+ }
41
55
  }
42
56
  }
43
57
  async createLLM() {
44
58
  await authenticate();
59
+ // Capture fresh headers and token after authentication
60
+ // Use getter to ensure we get the latest values
61
+ const currentToken = settings.token;
45
62
  const url = `${settings.runUrl}/${settings.workspace}/models/${this.model}`;
63
+ // Custom fetch function that adds authentication headers
64
+ const authenticatedFetch = async (input, init) => {
65
+ await authenticate();
66
+ // Get fresh headers after authentication
67
+ const freshHeaders = { ...settings.headers };
68
+ const headers = {
69
+ ...freshHeaders,
70
+ ...(init?.headers || {}),
71
+ };
72
+ // Ensure Content-Type is set for JSON requests if body exists and Content-Type is not already set
73
+ if (init?.body && !headers['Content-Type'] && !headers['content-type']) {
74
+ // If body is a string, check if it looks like JSON
75
+ if (typeof init.body === 'string') {
76
+ const trimmed = init.body.trim();
77
+ if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
78
+ headers['Content-Type'] = 'application/json';
79
+ }
80
+ }
81
+ else {
82
+ // For non-string bodies (FormData, Blob, etc.), let fetch handle it
83
+ // For objects, assume JSON
84
+ if (typeof init.body === 'object' && !(init.body instanceof FormData) && !(init.body instanceof Blob)) {
85
+ headers['Content-Type'] = 'application/json';
86
+ }
87
+ }
88
+ }
89
+ return fetch(input, {
90
+ ...init,
91
+ headers,
92
+ });
93
+ };
46
94
  if (this.type === "mistral") {
47
95
  return openai({
48
96
  model: this.modelData?.spec?.runtime?.model,
49
- apiKey: settings.token,
97
+ apiKey: currentToken,
50
98
  baseURL: `${url}/v1`,
99
+ additionalSessionOptions: {
100
+ fetch: authenticatedFetch,
101
+ },
51
102
  ...this.options,
52
103
  });
53
104
  }
54
105
  if (this.type === "anthropic") {
106
+ // Set a dummy API key to satisfy AnthropicSession constructor requirement
107
+ // The actual authentication is handled via defaultHeaders
108
+ process.env.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY || "dummy-key-for-blaxel";
109
+ // Get fresh headers right before creating the session
110
+ const anthropicHeaders = { ...settings.headers };
55
111
  const llm = anthropic({
56
112
  model: this.modelData?.spec?.runtime?.model,
57
113
  session: new AnthropicSession({
58
114
  baseURL: url,
59
- defaultHeaders: settings.headers,
115
+ defaultHeaders: anthropicHeaders,
60
116
  }),
61
117
  ...this.options,
62
118
  });
63
- return {
119
+ // Wrap the LLM to normalize Anthropic's response format (array content -> string)
120
+ // Create overloaded chat function
121
+ const chatWrapper = async (params) => {
122
+ // Type guard to determine if params is streaming or non-streaming
123
+ const isStreaming = 'stream' in params && params.stream === true;
124
+ let response;
125
+ if (isStreaming) {
126
+ response = await llm.chat(params);
127
+ }
128
+ else {
129
+ response = await llm.chat(params);
130
+ }
131
+ // Handle streaming responses (AsyncIterable)
132
+ const isAsyncIterable = (value) => {
133
+ return value !== null && typeof value === 'object' && Symbol.asyncIterator in value;
134
+ };
135
+ if (isAsyncIterable(response)) {
136
+ return response; // Streaming responses are handled differently, return as-is
137
+ }
138
+ // Transform array content to string for non-streaming responses
139
+ const chatResponse = response;
140
+ if (chatResponse && typeof chatResponse === 'object' && chatResponse !== null && 'message' in chatResponse) {
141
+ if (chatResponse.message && Array.isArray(chatResponse.message.content)) {
142
+ const contentArray = chatResponse.message.content;
143
+ const textContent = contentArray
144
+ .filter((item) => item.type === 'text' && item.text)
145
+ .map((item) => item.text)
146
+ .join('');
147
+ return {
148
+ ...chatResponse,
149
+ message: {
150
+ ...chatResponse.message,
151
+ content: textContent || chatResponse.message.content,
152
+ },
153
+ };
154
+ }
155
+ }
156
+ return chatResponse;
157
+ };
158
+ // Add overload signatures
159
+ const chatWithOverloads = chatWrapper;
160
+ const wrappedLLM = {
64
161
  ...llm,
65
162
  supportToolCall: true,
163
+ chat: chatWithOverloads,
164
+ complete: llm.complete.bind(llm),
165
+ exec: llm.exec.bind(llm),
166
+ streamExec: llm.streamExec.bind(llm),
167
+ metadata: llm.metadata,
66
168
  };
169
+ return wrappedLLM;
67
170
  }
68
171
  if (this.type === "cohere") {
69
172
  const llm = openai({
70
173
  model: this.modelData?.spec?.runtime?.model,
71
- apiKey: settings.token,
72
- baseURL: `${url}/compatibility/v1`,
174
+ apiKey: currentToken,
175
+ baseURL: `${url}/compatibility/v1`, // OpenAI compatibility endpoint
176
+ additionalSessionOptions: {
177
+ fetch: authenticatedFetch,
178
+ },
73
179
  ...this.options,
74
180
  });
75
181
  return {
@@ -92,8 +198,11 @@ class BlaxelLLM {
92
198
  }
93
199
  return openai({
94
200
  model: this.modelData?.spec?.runtime?.model,
95
- apiKey: settings.token,
201
+ apiKey: currentToken,
96
202
  baseURL: `${url}/v1`,
203
+ additionalSessionOptions: {
204
+ fetch: authenticatedFetch,
205
+ },
97
206
  ...this.options,
98
207
  });
99
208
  }
@@ -119,6 +228,23 @@ class BlaxelLLM {
119
228
  return llm.complete(params);
120
229
  }
121
230
  }
231
+ async exec(params) {
232
+ await this.ensureMetadata();
233
+ const llm = await this.createLLM();
234
+ // Type guard to handle overloads
235
+ if ('stream' in params && params.stream === true) {
236
+ return llm.exec(params);
237
+ }
238
+ else {
239
+ return llm.exec(params);
240
+ }
241
+ }
242
+ // streamExec method
243
+ async streamExec(params) {
244
+ await this.ensureMetadata();
245
+ const llm = await this.createLLM();
246
+ return llm.streamExec(params);
247
+ }
122
248
  }
123
249
  export const blModel = async (model, options) => {
124
250
  const modelData = await getModelMetadata(model);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blaxel/llamaindex",
3
- "version": "0.2.49",
3
+ "version": "0.2.50-preview.115",
4
4
  "description": "Blaxel SDK for TypeScript",
5
5
  "license": "MIT",
6
6
  "author": "Blaxel, INC (https://blaxel.ai)",
@@ -40,15 +40,15 @@
40
40
  "dist"
41
41
  ],
42
42
  "dependencies": {
43
- "@llamaindex/anthropic": "^0.3.15",
44
- "@llamaindex/core": "0.6.13",
45
- "@llamaindex/google": "0.3.12",
46
- "@llamaindex/mistral": "^0.1.13",
47
- "@llamaindex/openai": "^0.4.7",
43
+ "@llamaindex/anthropic": "^0.3.26",
44
+ "@llamaindex/core": "0.6.22",
45
+ "@llamaindex/google": "0.3.22",
46
+ "@llamaindex/mistral": "^0.1.22",
47
+ "@llamaindex/openai": "^0.4.21",
48
48
  "@opentelemetry/instrumentation": "^0.203.0",
49
49
  "@traceloop/instrumentation-llamaindex": "^0.14.0",
50
- "llamaindex": "^0.11.13",
51
- "@blaxel/core": "0.2.49"
50
+ "llamaindex": "^0.12.0",
51
+ "@blaxel/core": "0.2.50-preview.115"
52
52
  },
53
53
  "devDependencies": {
54
54
  "@eslint/js": "^9.30.1",