@blaxel/llamaindex 0.2.23-dev.173 → 0.2.23-preview.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/model.js +85 -22
  2. package/package.json +2 -2
package/dist/model.js CHANGED
@@ -5,69 +5,132 @@ const core_1 = require("@blaxel/core");
5
5
  const anthropic_1 = require("@llamaindex/anthropic");
6
6
  const google_1 = require("@llamaindex/google");
7
7
  const openai_1 = require("@llamaindex/openai");
8
- const blModel = async (model, options) => {
9
- const url = `${core_1.settings.runUrl}/${core_1.settings.workspace}/models/${model}`;
10
- const modelData = await (0, core_1.getModelMetadata)(model);
11
- if (!modelData) {
12
- throw new Error(`Model ${model} not found`);
8
+ // Custom LLM provider that refreshes auth on each call
9
+ class BlaxelLLM {
10
+ model;
11
+ options;
12
+ modelData;
13
+ type;
14
+ _metadata;
15
+ constructor(model, modelData, options) {
16
+ this.model = model;
17
+ this.modelData = modelData;
18
+ this.options = options;
19
+ this.type = modelData?.spec?.runtime?.type || "openai";
13
20
  }
14
- await (0, core_1.authenticate)();
15
- const type = modelData?.spec?.runtime?.type || "openai";
16
- try {
17
- if (type === "mistral") {
21
+ get supportToolCall() {
22
+ return true;
23
+ }
24
+ get metadata() {
25
+ // Return cached metadata or default values
26
+ if (this._metadata) {
27
+ return this._metadata;
28
+ }
29
+ // Return default values with overrides from options
30
+ return {
31
+ model: this.model,
32
+ temperature: this.options?.temperature ?? 0,
33
+ topP: this.options?.topP ?? 1,
34
+ maxTokens: this.options?.maxTokens ?? undefined,
35
+ contextWindow: this.options?.contextWindow ?? 4096,
36
+ tokenizer: undefined, // Let the underlying LLM handle tokenizer
37
+ structuredOutput: this.options?.structuredOutput ?? false,
38
+ };
39
+ }
40
+ async ensureMetadata() {
41
+ if (!this._metadata) {
42
+ const llm = await this.createLLM();
43
+ this._metadata = llm.metadata;
44
+ }
45
+ }
46
+ async createLLM() {
47
+ await (0, core_1.authenticate)();
48
+ const url = `${core_1.settings.runUrl}/${core_1.settings.workspace}/models/${this.model}`;
49
+ if (this.type === "mistral") {
18
50
  return (0, openai_1.openai)({
19
- model: modelData?.spec?.runtime?.model,
51
+ model: this.modelData?.spec?.runtime?.model,
20
52
  apiKey: core_1.settings.token,
21
53
  baseURL: `${url}/v1`,
22
- ...options,
54
+ ...this.options,
23
55
  });
24
56
  }
25
- if (type === "anthropic") {
57
+ if (this.type === "anthropic") {
26
58
  const llm = (0, anthropic_1.anthropic)({
27
- model: modelData?.spec?.runtime?.model,
59
+ model: this.modelData?.spec?.runtime?.model,
28
60
  session: new anthropic_1.AnthropicSession({
29
61
  baseURL: url,
30
62
  defaultHeaders: core_1.settings.headers,
31
63
  }),
32
- ...options,
64
+ ...this.options,
33
65
  });
34
66
  return {
35
67
  ...llm,
36
68
  supportToolCall: true,
37
69
  };
38
70
  }
39
- if (type === "cohere") {
71
+ if (this.type === "cohere") {
40
72
  const llm = (0, openai_1.openai)({
41
- model: modelData?.spec?.runtime?.model,
73
+ model: this.modelData?.spec?.runtime?.model,
42
74
  apiKey: core_1.settings.token,
43
75
  baseURL: `${url}/compatibility/v1`,
44
- ...options,
76
+ ...this.options,
45
77
  });
46
78
  return {
47
79
  ...llm,
48
80
  supportToolCall: true,
49
81
  };
50
82
  }
51
- if (type === "gemini") {
83
+ if (this.type === "gemini") {
52
84
  process.env.GOOGLE_API_KEY = process.env.GOOGLE_API_KEY || "THIS_IS_A_DUMMY_KEY_FOR_LLAMAINDEX";
53
85
  const llm = new google_1.Gemini({
54
86
  apiKey: core_1.settings.token,
55
- model: modelData?.spec?.runtime?.model,
87
+ model: this.modelData?.spec?.runtime?.model,
56
88
  httpOptions: {
57
89
  baseUrl: url,
58
90
  headers: core_1.settings.headers,
59
91
  },
60
- ...options,
92
+ ...this.options,
61
93
  });
62
94
  return llm;
63
95
  }
64
96
  return (0, openai_1.openai)({
65
- model: modelData?.spec?.runtime?.model,
97
+ model: this.modelData?.spec?.runtime?.model,
66
98
  apiKey: core_1.settings.token,
67
99
  baseURL: `${url}/v1`,
68
- ...options,
100
+ ...this.options,
69
101
  });
70
102
  }
103
+ async chat(params) {
104
+ await this.ensureMetadata();
105
+ const llm = await this.createLLM();
106
+ // Type guard to handle overloads
107
+ if ('stream' in params && params.stream === true) {
108
+ return llm.chat(params);
109
+ }
110
+ else {
111
+ return llm.chat(params);
112
+ }
113
+ }
114
+ async complete(params) {
115
+ await this.ensureMetadata();
116
+ const llm = await this.createLLM();
117
+ // Type guard to handle overloads
118
+ if ('stream' in params && params.stream === true) {
119
+ return llm.complete(params);
120
+ }
121
+ else {
122
+ return llm.complete(params);
123
+ }
124
+ }
125
+ }
126
+ const blModel = async (model, options) => {
127
+ const modelData = await (0, core_1.getModelMetadata)(model);
128
+ if (!modelData) {
129
+ throw new Error(`Model ${model} not found`);
130
+ }
131
+ try {
132
+ return new BlaxelLLM(model, modelData, options);
133
+ }
71
134
  catch (err) {
72
135
  (0, core_1.handleDynamicImportError)(err);
73
136
  throw err;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blaxel/llamaindex",
3
- "version": "0.2.23-dev.173",
3
+ "version": "0.2.23-preview.53",
4
4
  "description": "Blaxel SDK for TypeScript",
5
5
  "license": "MIT",
6
6
  "author": "Blaxel, INC (https://blaxel.ai)",
@@ -61,7 +61,7 @@
61
61
  "@opentelemetry/instrumentation": "^0.203.0",
62
62
  "@traceloop/instrumentation-llamaindex": "^0.14.0",
63
63
  "llamaindex": "^0.11.13",
64
- "@blaxel/core": "0.2.23-dev.173"
64
+ "@blaxel/core": "0.2.23-preview.53"
65
65
  },
66
66
  "devDependencies": {
67
67
  "@eslint/js": "^9.30.1",