@blaxel/llamaindex 0.2.49-preview.112 → 0.2.50-dev.215
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/.tsbuildinfo +1 -1
- package/dist/cjs/model.js +84 -3
- package/dist/esm/.tsbuildinfo +1 -1
- package/dist/esm/model.js +84 -3
- package/package.json +8 -8
package/dist/cjs/model.js
CHANGED
|
@@ -39,8 +39,22 @@ class BlaxelLLM {
|
|
|
39
39
|
}
|
|
40
40
|
async ensureMetadata() {
|
|
41
41
|
if (!this._metadata) {
|
|
42
|
-
|
|
43
|
-
|
|
42
|
+
try {
|
|
43
|
+
const llm = await this.createLLM();
|
|
44
|
+
this._metadata = llm.metadata;
|
|
45
|
+
}
|
|
46
|
+
catch {
|
|
47
|
+
// If metadata access fails (e.g., Gemini), use default metadata
|
|
48
|
+
this._metadata = {
|
|
49
|
+
model: this.modelData?.spec?.runtime?.model || this.model,
|
|
50
|
+
temperature: this.options?.temperature ?? 0,
|
|
51
|
+
topP: this.options?.topP ?? 1,
|
|
52
|
+
maxTokens: this.options?.maxTokens ?? undefined,
|
|
53
|
+
contextWindow: this.options?.contextWindow ?? 4096,
|
|
54
|
+
tokenizer: undefined,
|
|
55
|
+
structuredOutput: this.options?.structuredOutput ?? false,
|
|
56
|
+
};
|
|
57
|
+
}
|
|
44
58
|
}
|
|
45
59
|
}
|
|
46
60
|
async createLLM() {
|
|
@@ -55,6 +69,9 @@ class BlaxelLLM {
|
|
|
55
69
|
});
|
|
56
70
|
}
|
|
57
71
|
if (this.type === "anthropic") {
|
|
72
|
+
// Set a dummy API key to satisfy AnthropicSession constructor requirement
|
|
73
|
+
// The actual authentication is handled via defaultHeaders
|
|
74
|
+
process.env.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY || "dummy-key-for-blaxel";
|
|
58
75
|
const llm = (0, anthropic_1.anthropic)({
|
|
59
76
|
model: this.modelData?.spec?.runtime?.model,
|
|
60
77
|
session: new anthropic_1.AnthropicSession({
|
|
@@ -63,10 +80,57 @@ class BlaxelLLM {
|
|
|
63
80
|
}),
|
|
64
81
|
...this.options,
|
|
65
82
|
});
|
|
66
|
-
|
|
83
|
+
// Wrap the LLM to normalize Anthropic's response format (array content -> string)
|
|
84
|
+
// Create overloaded chat function
|
|
85
|
+
const chatWrapper = async (params) => {
|
|
86
|
+
// Type guard to determine if params is streaming or non-streaming
|
|
87
|
+
const isStreaming = 'stream' in params && params.stream === true;
|
|
88
|
+
let response;
|
|
89
|
+
if (isStreaming) {
|
|
90
|
+
response = await llm.chat(params);
|
|
91
|
+
}
|
|
92
|
+
else {
|
|
93
|
+
response = await llm.chat(params);
|
|
94
|
+
}
|
|
95
|
+
// Handle streaming responses (AsyncIterable)
|
|
96
|
+
const isAsyncIterable = (value) => {
|
|
97
|
+
return value !== null && typeof value === 'object' && Symbol.asyncIterator in value;
|
|
98
|
+
};
|
|
99
|
+
if (isAsyncIterable(response)) {
|
|
100
|
+
return response; // Streaming responses are handled differently, return as-is
|
|
101
|
+
}
|
|
102
|
+
// Transform array content to string for non-streaming responses
|
|
103
|
+
const chatResponse = response;
|
|
104
|
+
if (chatResponse && typeof chatResponse === 'object' && chatResponse !== null && 'message' in chatResponse) {
|
|
105
|
+
if (chatResponse.message && Array.isArray(chatResponse.message.content)) {
|
|
106
|
+
const contentArray = chatResponse.message.content;
|
|
107
|
+
const textContent = contentArray
|
|
108
|
+
.filter((item) => item.type === 'text' && item.text)
|
|
109
|
+
.map((item) => item.text)
|
|
110
|
+
.join('');
|
|
111
|
+
return {
|
|
112
|
+
...chatResponse,
|
|
113
|
+
message: {
|
|
114
|
+
...chatResponse.message,
|
|
115
|
+
content: textContent || chatResponse.message.content,
|
|
116
|
+
},
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
return chatResponse;
|
|
121
|
+
};
|
|
122
|
+
// Add overload signatures
|
|
123
|
+
const chatWithOverloads = chatWrapper;
|
|
124
|
+
const wrappedLLM = {
|
|
67
125
|
...llm,
|
|
68
126
|
supportToolCall: true,
|
|
127
|
+
chat: chatWithOverloads,
|
|
128
|
+
complete: llm.complete.bind(llm),
|
|
129
|
+
exec: llm.exec.bind(llm),
|
|
130
|
+
streamExec: llm.streamExec.bind(llm),
|
|
131
|
+
metadata: llm.metadata,
|
|
69
132
|
};
|
|
133
|
+
return wrappedLLM;
|
|
70
134
|
}
|
|
71
135
|
if (this.type === "cohere") {
|
|
72
136
|
const llm = (0, openai_1.openai)({
|
|
@@ -122,6 +186,23 @@ class BlaxelLLM {
|
|
|
122
186
|
return llm.complete(params);
|
|
123
187
|
}
|
|
124
188
|
}
|
|
189
|
+
async exec(params) {
|
|
190
|
+
await this.ensureMetadata();
|
|
191
|
+
const llm = await this.createLLM();
|
|
192
|
+
// Type guard to handle overloads
|
|
193
|
+
if ('stream' in params && params.stream === true) {
|
|
194
|
+
return llm.exec(params);
|
|
195
|
+
}
|
|
196
|
+
else {
|
|
197
|
+
return llm.exec(params);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
// streamExec method
|
|
201
|
+
async streamExec(params) {
|
|
202
|
+
await this.ensureMetadata();
|
|
203
|
+
const llm = await this.createLLM();
|
|
204
|
+
return llm.streamExec(params);
|
|
205
|
+
}
|
|
125
206
|
}
|
|
126
207
|
const blModel = async (model, options) => {
|
|
127
208
|
const modelData = await (0, core_1.getModelMetadata)(model);
|