@blaxel/langgraph 0.2.23-dev.174 → 0.2.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ import { FetchFunction } from "cohere-ai/core";
2
+ /**
3
+ * Creates a custom fetcher for CohereClient that adds dynamic headers
4
+ * CohereClient's fetcher expects a function that intercepts fetch requests
5
+ */
6
+ export declare const createCohereFetcher: () => FetchFunction;
@@ -0,0 +1,172 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createCohereFetcher = void 0;
4
+ const core_1 = require("@blaxel/core");
5
+ /**
6
+ * Creates a custom fetcher for CohereClient that adds dynamic headers
7
+ * CohereClient's fetcher expects a function that intercepts fetch requests
8
+ */
9
+ const createCohereFetcher = () => {
10
+ // Return a function that matches CohereClient's FetchFunction interface
11
+ const fetcher = async (args) => {
12
+ await (0, core_1.authenticate)();
13
+ const dynamicHeaders = core_1.settings.headers;
14
+ // Extract all fields from args
15
+ const { url, method, headers: argsHeaders, body, contentType, queryParameters, timeoutMs, withCredentials, abortSignal, requestType, responseType, duplex } = args;
16
+ // Build URL with query parameters
17
+ let requestUrl = url;
18
+ if (queryParameters) {
19
+ const params = new URLSearchParams();
20
+ Object.entries(queryParameters).forEach(([key, value]) => {
21
+ if (Array.isArray(value)) {
22
+ value.forEach(v => {
23
+ if (typeof v === 'object' && v !== null) {
24
+ params.append(key, JSON.stringify(v));
25
+ }
26
+ else {
27
+ params.append(key, String(v));
28
+ }
29
+ });
30
+ }
31
+ else if (typeof value === 'object' && value !== null) {
32
+ params.append(key, JSON.stringify(value));
33
+ }
34
+ else {
35
+ params.append(key, String(value));
36
+ }
37
+ });
38
+ const queryString = params.toString();
39
+ if (queryString) {
40
+ requestUrl += (url.includes('?') ? '&' : '?') + queryString;
41
+ }
42
+ }
43
+ // Merge headers and filter out undefined values
44
+ const mergedHeaders = {
45
+ ...(argsHeaders || {}),
46
+ ...dynamicHeaders,
47
+ };
48
+ // Add content-type if specified
49
+ if (contentType) {
50
+ mergedHeaders['Content-Type'] = contentType;
51
+ }
52
+ // Filter out undefined values
53
+ const headers = Object.entries(mergedHeaders).reduce((acc, [key, value]) => {
54
+ if (value !== undefined) {
55
+ acc[key] = value;
56
+ }
57
+ return acc;
58
+ }, {});
59
+ // Prepare body based on requestType
60
+ let requestBody;
61
+ if (body !== undefined) {
62
+ if (requestType === 'json' || !requestType) {
63
+ requestBody = JSON.stringify(body);
64
+ }
65
+ else if (requestType === 'bytes' && body instanceof Uint8Array) {
66
+ // Create a new ArrayBuffer from the Uint8Array to avoid SharedArrayBuffer issues
67
+ const arrayBuffer = new ArrayBuffer(body.length);
68
+ const view = new Uint8Array(arrayBuffer);
69
+ view.set(body);
70
+ requestBody = arrayBuffer;
71
+ }
72
+ else if (requestType === 'file' && body instanceof Blob) {
73
+ requestBody = body;
74
+ }
75
+ else if (typeof body === 'string') {
76
+ requestBody = body;
77
+ }
78
+ else {
79
+ requestBody = JSON.stringify(body);
80
+ }
81
+ }
82
+ try {
83
+ // Create abort controller for timeout
84
+ const controller = new AbortController();
85
+ let timeoutId;
86
+ if (timeoutMs) {
87
+ timeoutId = setTimeout(() => controller.abort(), timeoutMs);
88
+ }
89
+ // Merge abort signals
90
+ const signal = abortSignal
91
+ ? AbortSignal.any([abortSignal, controller.signal])
92
+ : controller.signal;
93
+ // Make the request with merged headers
94
+ const requestInit = {
95
+ method: method,
96
+ headers,
97
+ body: requestBody,
98
+ credentials: withCredentials ? 'include' : 'same-origin',
99
+ signal,
100
+ };
101
+ // Add duplex if specified (for streaming)
102
+ if (duplex) {
103
+ requestInit.duplex = duplex;
104
+ }
105
+ const response = await fetch(requestUrl, requestInit);
106
+ // Clear timeout
107
+ if (timeoutId) {
108
+ clearTimeout(timeoutId);
109
+ }
110
+ // Handle response based on responseType
111
+ let responseBody;
112
+ if (response.ok) {
113
+ if (responseType === 'blob') {
114
+ responseBody = await response.blob();
115
+ }
116
+ else if (responseType === 'text') {
117
+ responseBody = await response.text();
118
+ }
119
+ else if (responseType === 'arrayBuffer') {
120
+ responseBody = await response.arrayBuffer();
121
+ }
122
+ else if (responseType === 'streaming' || responseType === 'sse') {
123
+ // For streaming, return the response body stream
124
+ responseBody = response.body;
125
+ }
126
+ else {
127
+ // Default to JSON
128
+ responseBody = await response.json();
129
+ }
130
+ // Return success response in the format CohereClient expects
131
+ return {
132
+ ok: true,
133
+ body: responseBody,
134
+ headers: Object.fromEntries(response.headers.entries()),
135
+ };
136
+ }
137
+ else {
138
+ // Return error response in the format CohereClient expects
139
+ const errorBody = await response.text();
140
+ return {
141
+ ok: false,
142
+ error: {
143
+ reason: "status-code",
144
+ statusCode: response.status,
145
+ body: errorBody,
146
+ },
147
+ };
148
+ }
149
+ }
150
+ catch (error) {
151
+ // Check if it's a timeout error
152
+ if (error instanceof Error && error.name === 'AbortError') {
153
+ return {
154
+ ok: false,
155
+ error: {
156
+ reason: "timeout",
157
+ },
158
+ };
159
+ }
160
+ // Return unknown error
161
+ return {
162
+ ok: false,
163
+ error: {
164
+ reason: "unknown",
165
+ errorMessage: error instanceof Error ? error.message : String(error),
166
+ },
167
+ };
168
+ }
169
+ };
170
+ return fetcher;
171
+ };
172
+ exports.createCohereFetcher = createCohereFetcher;
@@ -1,4 +1,4 @@
1
- import { GenerateContentRequest, Part as GenerativeAIPart, ModelParams, RequestOptions, SafetySetting, type CachedContent } from "@google/generative-ai";
1
+ import { GenerateContentRequest, Part as GenerativeAIPart, GenerativeModel, ModelParams, RequestOptions, SafetySetting, type CachedContent } from "@google/generative-ai";
2
2
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
3
3
  import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
4
4
  import { BaseChatModel, type BaseChatModelCallOptions, type BaseChatModelParams, type LangSmithParams } from "@langchain/core/language_models/chat_models";
@@ -532,9 +532,12 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerati
532
532
  streaming: boolean;
533
533
  streamUsage: boolean;
534
534
  convertSystemMessageToHumanContent: boolean | undefined;
535
- private client;
535
+ baseUrl?: string;
536
+ apiVersion?: string;
537
+ client: GenerativeModel;
536
538
  get _isMultimodalModel(): boolean;
537
539
  constructor(fields?: GoogleGenerativeAIChatInput);
540
+ initClient(fields?: GoogleGenerativeAIChatInput): GenerativeModel;
538
541
  useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void;
539
542
  get useSystemInstruction(): boolean;
540
543
  get computeUseSystemInstruction(): boolean;
@@ -418,6 +418,8 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
418
418
  streaming = false;
419
419
  streamUsage = true;
420
420
  convertSystemMessageToHumanContent;
421
+ baseUrl;
422
+ apiVersion;
421
423
  client;
422
424
  get _isMultimodalModel() {
423
425
  return (this.model.includes("vision") ||
@@ -452,12 +454,8 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
452
454
  }
453
455
  this.stopSequences = fields?.stopSequences ?? this.stopSequences;
454
456
  this.apiKey = fields?.apiKey ?? (0, env_1.getEnvironmentVariable)("GOOGLE_API_KEY");
455
- if (!this.apiKey) {
456
- throw new Error("Please set an API key for Google GenerativeAI " +
457
- "in the environment variable GOOGLE_API_KEY " +
458
- "or in the `apiKey` field of the " +
459
- "ChatGoogleGenerativeAI constructor");
460
- }
457
+ this.apiVersion = fields?.apiVersion ?? this.apiVersion;
458
+ this.baseUrl = fields?.baseUrl ?? this.baseUrl;
461
459
  this.safetySettings = fields?.safetySettings ?? this.safetySettings;
462
460
  if (this.safetySettings && this.safetySettings.length > 0) {
463
461
  const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category));
@@ -466,7 +464,15 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
466
464
  }
467
465
  }
468
466
  this.streaming = fields?.streaming ?? this.streaming;
469
- this.client = new generative_ai_1.GoogleGenerativeAI(this.apiKey).getGenerativeModel({
467
+ this.client = this.initClient(fields);
468
+ this.streamUsage = fields?.streamUsage ?? this.streamUsage;
469
+ }
470
+ initClient(fields) {
471
+ const apiKey = this.apiKey ? this.apiKey : "replaced";
472
+ const apiVersion = this.apiVersion ?? fields?.apiVersion;
473
+ const baseUrl = this.baseUrl ?? fields?.baseUrl;
474
+ const customHeaders = this.customHeaders ?? fields?.customHeaders;
475
+ return new generative_ai_1.GoogleGenerativeAI(apiKey).getGenerativeModel({
470
476
  model: this.model,
471
477
  safetySettings: this.safetySettings,
472
478
  generationConfig: {
@@ -479,11 +485,10 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
479
485
  ...(fields?.json ? { responseMimeType: "application/json" } : {}),
480
486
  },
481
487
  }, {
482
- apiVersion: fields?.apiVersion,
483
- baseUrl: fields?.baseUrl,
484
- customHeaders: fields?.customHeaders,
488
+ apiVersion,
489
+ baseUrl: baseUrl,
490
+ customHeaders: customHeaders,
485
491
  });
486
- this.streamUsage = fields?.streamUsage ?? this.streamUsage;
487
492
  }
488
493
  useCachedContent(cachedContent, modelParams, requestOptions) {
489
494
  if (!this.apiKey)
@@ -0,0 +1,11 @@
1
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
2
+ import { BaseMessage } from "@langchain/core/messages";
3
+ import { ChatResult } from "@langchain/core/outputs";
4
+ import { ChatGoogleGenerativeAI } from "./google-genai/index.js";
5
+ /**
6
+ * Custom ChatGoogleGenerativeAI that ensures authentication before each request
7
+ */
8
+ export declare class AuthenticatedChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
9
+ _generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
10
+ _streamResponseChunks(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<any>;
11
+ }
@@ -0,0 +1,30 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AuthenticatedChatGoogleGenerativeAI = void 0;
4
+ const core_1 = require("@blaxel/core");
5
+ const index_js_1 = require("./google-genai/index.js");
6
+ /**
7
+ * Custom ChatGoogleGenerativeAI that ensures authentication before each request
8
+ */
9
+ class AuthenticatedChatGoogleGenerativeAI extends index_js_1.ChatGoogleGenerativeAI {
10
+ async _generate(messages, options, runManager) {
11
+ // Authenticate before making the request
12
+ await (0, core_1.authenticate)();
13
+ this.customHeaders = {};
14
+ for (const header in core_1.settings.headers) {
15
+ this.customHeaders[header] = core_1.settings.headers[header];
16
+ }
17
+ this.client = this.initClient();
18
+ return await super._generate(messages, options || {}, runManager);
19
+ }
20
+ async *_streamResponseChunks(messages, options, runManager) {
21
+ // Authenticate before making the request
22
+ await (0, core_1.authenticate)();
23
+ this.customHeaders = {};
24
+ for (const header in core_1.settings.headers) {
25
+ this.customHeaders[header] = core_1.settings.headers[header];
26
+ }
27
+ yield* super._streamResponseChunks(messages, options || {}, runManager);
28
+ }
29
+ }
30
+ exports.AuthenticatedChatGoogleGenerativeAI = AuthenticatedChatGoogleGenerativeAI;
package/dist/model.js CHANGED
@@ -7,8 +7,31 @@ const cohere_1 = require("@langchain/cohere");
7
7
  const deepseek_1 = require("@langchain/deepseek");
8
8
  const openai_1 = require("@langchain/openai");
9
9
  const cohere_ai_1 = require("cohere-ai");
10
- const index_js_1 = require("./model/google-genai/index.js");
10
+ const cohere_js_1 = require("./model/cohere.js");
11
+ const google_genai_js_1 = require("./model/google-genai.js");
11
12
  const xai_js_1 = require("./model/xai.js");
13
+ /**
14
+ * Creates a custom fetch function that adds dynamic headers to each request
15
+ * Returns a function compatible with OpenAI SDK's fetch option
16
+ */
17
+ const authenticatedFetch = () => {
18
+ const customFetch = async (input, init) => {
19
+ await (0, core_1.authenticate)();
20
+ const dynamicHeaders = core_1.settings.headers;
21
+ // Merge headers: init headers take precedence over dynamic headers
22
+ const headers = {
23
+ ...dynamicHeaders,
24
+ ...(init?.headers || {}),
25
+ };
26
+ // Make the request with merged headers
27
+ return await fetch(input, {
28
+ ...init,
29
+ headers,
30
+ });
31
+ };
32
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-return
33
+ return customFetch;
34
+ };
12
35
  const blModel = async (model, options) => {
13
36
  const url = `${core_1.settings.runUrl}/${core_1.settings.workspace}/models/${model}`;
14
37
  const modelData = await (0, core_1.getModelMetadata)(model);
@@ -19,7 +42,7 @@ const blModel = async (model, options) => {
19
42
  const type = modelData?.spec?.runtime?.type || "openai";
20
43
  try {
21
44
  if (type === "gemini") {
22
- return new index_js_1.ChatGoogleGenerativeAI({
45
+ return new google_genai_js_1.AuthenticatedChatGoogleGenerativeAI({
23
46
  apiKey: core_1.settings.token,
24
47
  model: modelData?.spec?.runtime?.model,
25
48
  baseUrl: url,
@@ -29,30 +52,36 @@ const blModel = async (model, options) => {
29
52
  }
30
53
  else if (type === "mistral") {
31
54
  return new openai_1.ChatOpenAI({
32
- apiKey: core_1.settings.token,
55
+ apiKey: "replaced",
33
56
  model: modelData?.spec?.runtime?.model,
34
57
  configuration: {
35
58
  baseURL: `${url}/v1`,
59
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
60
+ fetch: authenticatedFetch(),
36
61
  },
37
62
  ...options,
38
63
  });
39
64
  }
40
65
  else if (type === "cohere") {
41
66
  return new cohere_1.ChatCohere({
42
- apiKey: core_1.settings.token,
67
+ apiKey: "replaced",
43
68
  model: modelData?.spec?.runtime?.model,
44
69
  client: new cohere_ai_1.CohereClient({
45
- token: core_1.settings.token,
70
+ token: "replaced",
46
71
  environment: url,
72
+ fetcher: (0, cohere_js_1.createCohereFetcher)(),
47
73
  }),
74
+ ...options,
48
75
  });
49
76
  }
50
77
  else if (type === "deepseek") {
51
78
  return new deepseek_1.ChatDeepSeek({
52
- apiKey: core_1.settings.token,
79
+ apiKey: "replaced",
53
80
  model: modelData?.spec?.runtime?.model,
54
81
  configuration: {
55
82
  baseURL: `${url}/v1`,
83
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
84
+ fetch: authenticatedFetch(),
56
85
  },
57
86
  ...options,
58
87
  });
@@ -62,16 +91,19 @@ const blModel = async (model, options) => {
62
91
  anthropicApiUrl: url,
63
92
  model: modelData?.spec?.runtime?.model,
64
93
  clientOptions: {
65
- defaultHeaders: core_1.settings.headers,
94
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
95
+ fetch: authenticatedFetch(),
66
96
  },
67
97
  ...options,
68
98
  });
69
99
  }
70
100
  else if (type === "xai") {
71
101
  return new xai_js_1.ChatXAI({
72
- apiKey: core_1.settings.token,
102
+ apiKey: "replaced",
73
103
  configuration: {
74
104
  baseURL: `${url}/v1`,
105
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
106
+ fetch: authenticatedFetch(),
75
107
  },
76
108
  model: modelData?.spec?.runtime?.model,
77
109
  ...options,
@@ -80,19 +112,23 @@ const blModel = async (model, options) => {
80
112
  else if (type === "cerebras") {
81
113
  // We don't use ChatCerebras because there is a problem with apiKey headers
82
114
  return new openai_1.ChatOpenAI({
83
- apiKey: core_1.settings.token,
115
+ apiKey: "replaced",
84
116
  model: modelData?.spec?.runtime?.model,
85
117
  configuration: {
86
118
  baseURL: `${url}/v1`,
119
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
120
+ fetch: authenticatedFetch(),
87
121
  },
88
122
  ...options,
89
123
  });
90
124
  }
91
125
  return new openai_1.ChatOpenAI({
92
- apiKey: core_1.settings.token,
126
+ apiKey: "replaced",
93
127
  model: modelData?.spec?.runtime?.model,
94
128
  configuration: {
95
129
  baseURL: `${url}/v1`,
130
+ // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
131
+ fetch: authenticatedFetch(),
96
132
  },
97
133
  ...options,
98
134
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blaxel/langgraph",
3
- "version": "0.2.23-dev.174",
3
+ "version": "0.2.23",
4
4
  "description": "Blaxel SDK for TypeScript",
5
5
  "license": "MIT",
6
6
  "author": "Blaxel, INC (https://blaxel.ai)",
@@ -65,7 +65,7 @@
65
65
  "langchain": "^0.3.24",
66
66
  "zod": "^3.24.3",
67
67
  "zod-to-json-schema": "^3.24.5",
68
- "@blaxel/core": "0.2.23-dev.174"
68
+ "@blaxel/core": "0.2.23"
69
69
  },
70
70
  "devDependencies": {
71
71
  "@eslint/js": "^9.26.0",