@blaxel/llamaindex 0.2.49 → 0.2.50-preview.115

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cjs/model.js CHANGED
@@ -39,40 +39,146 @@ class BlaxelLLM {
39
39
  }
40
40
  async ensureMetadata() {
41
41
  if (!this._metadata) {
42
- const llm = await this.createLLM();
43
- this._metadata = llm.metadata;
42
+ try {
43
+ const llm = await this.createLLM();
44
+ this._metadata = llm.metadata;
45
+ }
46
+ catch {
47
+ // If metadata access fails (e.g., Gemini), use default metadata
48
+ this._metadata = {
49
+ model: this.modelData?.spec?.runtime?.model || this.model,
50
+ temperature: this.options?.temperature ?? 0,
51
+ topP: this.options?.topP ?? 1,
52
+ maxTokens: this.options?.maxTokens ?? undefined,
53
+ contextWindow: this.options?.contextWindow ?? 4096,
54
+ tokenizer: undefined,
55
+ structuredOutput: this.options?.structuredOutput ?? false,
56
+ };
57
+ }
44
58
  }
45
59
  }
46
60
  async createLLM() {
47
61
  await (0, core_1.authenticate)();
62
+ // Capture fresh headers and token after authentication
63
+ // Use getter to ensure we get the latest values
64
+ const currentToken = core_1.settings.token;
48
65
  const url = `${core_1.settings.runUrl}/${core_1.settings.workspace}/models/${this.model}`;
66
+ // Custom fetch function that adds authentication headers
67
+ const authenticatedFetch = async (input, init) => {
68
+ await (0, core_1.authenticate)();
69
+ // Get fresh headers after authentication
70
+ const freshHeaders = { ...core_1.settings.headers };
71
+ const headers = {
72
+ ...freshHeaders,
73
+ ...(init?.headers || {}),
74
+ };
75
+ // Ensure Content-Type is set for JSON requests if body exists and Content-Type is not already set
76
+ if (init?.body && !headers['Content-Type'] && !headers['content-type']) {
77
+ // If body is a string, check if it looks like JSON
78
+ if (typeof init.body === 'string') {
79
+ const trimmed = init.body.trim();
80
+ if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
81
+ headers['Content-Type'] = 'application/json';
82
+ }
83
+ }
84
+ else {
85
+ // For non-string bodies (FormData, Blob, etc.), let fetch handle it
86
+ // For objects, assume JSON
87
+ if (typeof init.body === 'object' && !(init.body instanceof FormData) && !(init.body instanceof Blob)) {
88
+ headers['Content-Type'] = 'application/json';
89
+ }
90
+ }
91
+ }
92
+ return fetch(input, {
93
+ ...init,
94
+ headers,
95
+ });
96
+ };
49
97
  if (this.type === "mistral") {
50
98
  return (0, openai_1.openai)({
51
99
  model: this.modelData?.spec?.runtime?.model,
52
- apiKey: core_1.settings.token,
100
+ apiKey: currentToken,
53
101
  baseURL: `${url}/v1`,
102
+ additionalSessionOptions: {
103
+ fetch: authenticatedFetch,
104
+ },
54
105
  ...this.options,
55
106
  });
56
107
  }
57
108
  if (this.type === "anthropic") {
109
+ // Set a dummy API key to satisfy AnthropicSession constructor requirement
110
+ // The actual authentication is handled via defaultHeaders
111
+ process.env.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY || "dummy-key-for-blaxel";
112
+ // Get fresh headers right before creating the session
113
+ const anthropicHeaders = { ...core_1.settings.headers };
58
114
  const llm = (0, anthropic_1.anthropic)({
59
115
  model: this.modelData?.spec?.runtime?.model,
60
116
  session: new anthropic_1.AnthropicSession({
61
117
  baseURL: url,
62
- defaultHeaders: core_1.settings.headers,
118
+ defaultHeaders: anthropicHeaders,
63
119
  }),
64
120
  ...this.options,
65
121
  });
66
- return {
122
+ // Wrap the LLM to normalize Anthropic's response format (array content -> string)
123
+ // Create overloaded chat function
124
+ const chatWrapper = async (params) => {
125
+ // Type guard to determine if params is streaming or non-streaming
126
+ const isStreaming = 'stream' in params && params.stream === true;
127
+ let response;
128
+ if (isStreaming) {
129
+ response = await llm.chat(params);
130
+ }
131
+ else {
132
+ response = await llm.chat(params);
133
+ }
134
+ // Handle streaming responses (AsyncIterable)
135
+ const isAsyncIterable = (value) => {
136
+ return value !== null && typeof value === 'object' && Symbol.asyncIterator in value;
137
+ };
138
+ if (isAsyncIterable(response)) {
139
+ return response; // Streaming responses are handled differently, return as-is
140
+ }
141
+ // Transform array content to string for non-streaming responses
142
+ const chatResponse = response;
143
+ if (chatResponse && typeof chatResponse === 'object' && chatResponse !== null && 'message' in chatResponse) {
144
+ if (chatResponse.message && Array.isArray(chatResponse.message.content)) {
145
+ const contentArray = chatResponse.message.content;
146
+ const textContent = contentArray
147
+ .filter((item) => item.type === 'text' && item.text)
148
+ .map((item) => item.text)
149
+ .join('');
150
+ return {
151
+ ...chatResponse,
152
+ message: {
153
+ ...chatResponse.message,
154
+ content: textContent || chatResponse.message.content,
155
+ },
156
+ };
157
+ }
158
+ }
159
+ return chatResponse;
160
+ };
161
+ // Add overload signatures
162
+ const chatWithOverloads = chatWrapper;
163
+ const wrappedLLM = {
67
164
  ...llm,
68
165
  supportToolCall: true,
166
+ chat: chatWithOverloads,
167
+ complete: llm.complete.bind(llm),
168
+ exec: llm.exec.bind(llm),
169
+ streamExec: llm.streamExec.bind(llm),
170
+ metadata: llm.metadata,
69
171
  };
172
+ return wrappedLLM;
70
173
  }
71
174
  if (this.type === "cohere") {
72
175
  const llm = (0, openai_1.openai)({
73
176
  model: this.modelData?.spec?.runtime?.model,
74
- apiKey: core_1.settings.token,
75
- baseURL: `${url}/compatibility/v1`,
177
+ apiKey: currentToken,
178
+ baseURL: `${url}/compatibility/v1`, // OpenAI compatibility endpoint
179
+ additionalSessionOptions: {
180
+ fetch: authenticatedFetch,
181
+ },
76
182
  ...this.options,
77
183
  });
78
184
  return {
@@ -95,8 +201,11 @@ class BlaxelLLM {
95
201
  }
96
202
  return (0, openai_1.openai)({
97
203
  model: this.modelData?.spec?.runtime?.model,
98
- apiKey: core_1.settings.token,
204
+ apiKey: currentToken,
99
205
  baseURL: `${url}/v1`,
206
+ additionalSessionOptions: {
207
+ fetch: authenticatedFetch,
208
+ },
100
209
  ...this.options,
101
210
  });
102
211
  }
@@ -122,6 +231,23 @@ class BlaxelLLM {
122
231
  return llm.complete(params);
123
232
  }
124
233
  }
234
+ async exec(params) {
235
+ await this.ensureMetadata();
236
+ const llm = await this.createLLM();
237
+ // Type guard to handle overloads
238
+ if ('stream' in params && params.stream === true) {
239
+ return llm.exec(params);
240
+ }
241
+ else {
242
+ return llm.exec(params);
243
+ }
244
+ }
245
+ // streamExec method
246
+ async streamExec(params) {
247
+ await this.ensureMetadata();
248
+ const llm = await this.createLLM();
249
+ return llm.streamExec(params);
250
+ }
125
251
  }
126
252
  const blModel = async (model, options) => {
127
253
  const modelData = await (0, core_1.getModelMetadata)(model);