graphlit-client 1.0.20250705001 ā 1.0.20250710001
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.d.ts +7 -0
- package/dist/client.js +57 -2
- package/dist/generated/graphql-documents.js +20 -0
- package/dist/generated/graphql-types.d.ts +103 -4
- package/dist/generated/graphql-types.js +17 -3
- package/dist/model-mapping.js +13 -0
- package/dist/streaming/providers.d.ts +2 -0
- package/dist/streaming/providers.js +28 -0
- package/package.json +1 -1
package/dist/client.d.ts
CHANGED
@@ -48,6 +48,7 @@ declare class Graphlit {
|
|
48
48
|
private mistralClient?;
|
49
49
|
private bedrockClient?;
|
50
50
|
private deepseekClient?;
|
51
|
+
private xaiClient?;
|
51
52
|
constructor(organizationIdOrOptions?: string | GraphlitClientOptions, environmentId?: string, jwtSecret?: string, ownerId?: string, userId?: string, apiUri?: string);
|
52
53
|
refreshClient(): void;
|
53
54
|
/**
|
@@ -95,6 +96,11 @@ declare class Graphlit {
|
|
95
96
|
* @param client - OpenAI client instance configured for Deepseek (e.g., new OpenAI({ baseURL: "https://api.deepseek.com", apiKey: "..." }))
|
96
97
|
*/
|
97
98
|
setDeepseekClient(client: any): void;
|
99
|
+
/**
|
100
|
+
* Set a custom xAI client instance for streaming
|
101
|
+
* @param client - OpenAI client instance configured for xAI (e.g., new OpenAI({ baseURL: "https://api.x.ai/v1", apiKey: "..." }))
|
102
|
+
*/
|
103
|
+
setXaiClient(client: any): void;
|
98
104
|
/**
|
99
105
|
* Update retry configuration and refresh the Apollo client
|
100
106
|
* @param retryConfig - New retry configuration
|
@@ -505,6 +511,7 @@ declare class Graphlit {
|
|
505
511
|
* Stream with Deepseek client
|
506
512
|
*/
|
507
513
|
private streamWithDeepseek;
|
514
|
+
private streamWithXai;
|
508
515
|
private executeToolsForPromptAgent;
|
509
516
|
private prettyPrintGraphQLError;
|
510
517
|
private mutateAndCheckError;
|
package/dist/client.js
CHANGED
@@ -9,7 +9,7 @@ import * as dotenv from "dotenv";
|
|
9
9
|
import { getServiceType, getModelName } from "./model-mapping.js";
|
10
10
|
import { UIEventAdapter } from "./streaming/ui-event-adapter.js";
|
11
11
|
import { formatMessagesForOpenAI, formatMessagesForAnthropic, formatMessagesForGoogle, formatMessagesForMistral, formatMessagesForBedrock, } from "./streaming/llm-formatters.js";
|
12
|
-
import { streamWithOpenAI, streamWithAnthropic, streamWithGoogle, streamWithGroq, streamWithCerebras, streamWithCohere, streamWithMistral, streamWithBedrock, streamWithDeepseek, } from "./streaming/providers.js";
|
12
|
+
import { streamWithOpenAI, streamWithAnthropic, streamWithGoogle, streamWithGroq, streamWithCerebras, streamWithCohere, streamWithMistral, streamWithBedrock, streamWithDeepseek, streamWithXai, } from "./streaming/providers.js";
|
13
13
|
// Optional imports for streaming LLM clients
|
14
14
|
// These are peer dependencies and may not be installed
|
15
15
|
// We need to use createRequire for optional dependencies to avoid build errors
|
@@ -132,6 +132,7 @@ class Graphlit {
|
|
132
132
|
mistralClient;
|
133
133
|
bedrockClient;
|
134
134
|
deepseekClient;
|
135
|
+
xaiClient;
|
135
136
|
constructor(organizationIdOrOptions, environmentId, jwtSecret, ownerId, userId, apiUri) {
|
136
137
|
// Handle both old constructor signature and new options object
|
137
138
|
let options;
|
@@ -331,6 +332,13 @@ class Graphlit {
|
|
331
332
|
setDeepseekClient(client) {
|
332
333
|
this.deepseekClient = client;
|
333
334
|
}
|
335
|
+
/**
|
336
|
+
* Set a custom xAI client instance for streaming
|
337
|
+
* @param client - OpenAI client instance configured for xAI (e.g., new OpenAI({ baseURL: "https://api.x.ai/v1", apiKey: "..." }))
|
338
|
+
*/
|
339
|
+
setXaiClient(client) {
|
340
|
+
this.xaiClient = client;
|
341
|
+
}
|
334
342
|
/**
|
335
343
|
* Update retry configuration and refresh the Apollo client
|
336
344
|
* @param retryConfig - New retry configuration
|
@@ -1823,6 +1831,8 @@ class Graphlit {
|
|
1823
1831
|
return hasBedrockClient;
|
1824
1832
|
case Types.ModelServiceTypes.Deepseek:
|
1825
1833
|
return OpenAI !== undefined || this.deepseekClient !== undefined;
|
1834
|
+
case Types.ModelServiceTypes.Xai:
|
1835
|
+
return OpenAI !== undefined || this.xaiClient !== undefined;
|
1826
1836
|
default:
|
1827
1837
|
return false;
|
1828
1838
|
}
|
@@ -1839,6 +1849,8 @@ class Graphlit {
|
|
1839
1849
|
this.cohereClient !== undefined;
|
1840
1850
|
const hasMistral = Mistral !== undefined || this.mistralClient !== undefined;
|
1841
1851
|
const hasBedrock = BedrockRuntimeClient !== undefined || this.bedrockClient !== undefined;
|
1852
|
+
const hasDeepseek = OpenAI !== undefined || this.deepseekClient !== undefined;
|
1853
|
+
const hasXai = OpenAI !== undefined || this.xaiClient !== undefined;
|
1842
1854
|
return (hasOpenAI ||
|
1843
1855
|
hasAnthropic ||
|
1844
1856
|
hasGoogle ||
|
@@ -1846,7 +1858,9 @@ class Graphlit {
|
|
1846
1858
|
hasCerebras ||
|
1847
1859
|
hasCohere ||
|
1848
1860
|
hasMistral ||
|
1849
|
-
hasBedrock
|
1861
|
+
hasBedrock ||
|
1862
|
+
hasDeepseek ||
|
1863
|
+
hasXai);
|
1850
1864
|
}
|
1851
1865
|
/**
|
1852
1866
|
* Execute an agent with non-streaming response
|
@@ -2480,6 +2494,26 @@ class Graphlit {
|
|
2480
2494
|
console.log(`\nš [Streaming] Deepseek native streaming completed (Round ${currentRound})`);
|
2481
2495
|
}
|
2482
2496
|
}
|
2497
|
+
else if (serviceType === Types.ModelServiceTypes.Xai &&
|
2498
|
+
(OpenAI || this.xaiClient)) {
|
2499
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2500
|
+
console.log(`\nā
[Streaming] Using xAI native streaming (Round ${currentRound})`);
|
2501
|
+
}
|
2502
|
+
const xaiMessages = formatMessagesForOpenAI(messages); // xAI uses OpenAI format
|
2503
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING_MESSAGES) {
|
2504
|
+
console.log(`š [xAI] Sending ${xaiMessages.length} messages to LLM: ${JSON.stringify(xaiMessages)}`);
|
2505
|
+
}
|
2506
|
+
await this.streamWithXai(specification, xaiMessages, tools, uiAdapter, (message, calls, usage) => {
|
2507
|
+
roundMessage = message;
|
2508
|
+
toolCalls = calls;
|
2509
|
+
if (usage) {
|
2510
|
+
uiAdapter.setUsageData(usage);
|
2511
|
+
}
|
2512
|
+
}, abortSignal);
|
2513
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2514
|
+
console.log(`\nš [Streaming] xAI native streaming completed (Round ${currentRound})`);
|
2515
|
+
}
|
2516
|
+
}
|
2483
2517
|
else {
|
2484
2518
|
// Fallback to non-streaming
|
2485
2519
|
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
@@ -3021,6 +3055,27 @@ class Graphlit {
|
|
3021
3055
|
}
|
3022
3056
|
await streamWithDeepseek(specification, messages, tools, deepseekClient, (event) => uiAdapter.handleEvent(event), onComplete, abortSignal);
|
3023
3057
|
}
|
3058
|
+
async streamWithXai(specification, messages, tools, uiAdapter, onComplete, abortSignal) {
|
3059
|
+
// Check if we have either the OpenAI module or a provided xAI client
|
3060
|
+
if (!OpenAI && !this.xaiClient) {
|
3061
|
+
throw new Error("xAI client not available (requires OpenAI SDK)");
|
3062
|
+
}
|
3063
|
+
// Use provided client or create a new one with xAI base URL
|
3064
|
+
const xaiClient = this.xaiClient ||
|
3065
|
+
(OpenAI
|
3066
|
+
? new OpenAI({
|
3067
|
+
baseURL: "https://api.x.ai/v1",
|
3068
|
+
apiKey: process.env.XAI_API_KEY || "",
|
3069
|
+
})
|
3070
|
+
: null);
|
3071
|
+
if (!xaiClient) {
|
3072
|
+
throw new Error("Failed to create xAI client");
|
3073
|
+
}
|
3074
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
3075
|
+
console.log(`š [Graphlit SDK] Routing to xAI streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
|
3076
|
+
}
|
3077
|
+
await streamWithXai(specification, messages, tools, xaiClient, (event) => uiAdapter.handleEvent(event), onComplete, abortSignal);
|
3078
|
+
}
|
3024
3079
|
// Helper method to execute tools for promptAgent
|
3025
3080
|
async executeToolsForPromptAgent(toolCalls, toolHandlers, allToolCalls, signal) {
|
3026
3081
|
const responses = [];
|
@@ -9223,6 +9223,16 @@ export const GetSpecification = gql `
|
|
9223
9223
|
temperature
|
9224
9224
|
probability
|
9225
9225
|
}
|
9226
|
+
xai {
|
9227
|
+
tokenLimit
|
9228
|
+
completionTokenLimit
|
9229
|
+
model
|
9230
|
+
key
|
9231
|
+
modelName
|
9232
|
+
endpoint
|
9233
|
+
temperature
|
9234
|
+
probability
|
9235
|
+
}
|
9226
9236
|
groq {
|
9227
9237
|
tokenLimit
|
9228
9238
|
completionTokenLimit
|
@@ -9583,6 +9593,16 @@ export const QuerySpecifications = gql `
|
|
9583
9593
|
temperature
|
9584
9594
|
probability
|
9585
9595
|
}
|
9596
|
+
xai {
|
9597
|
+
tokenLimit
|
9598
|
+
completionTokenLimit
|
9599
|
+
model
|
9600
|
+
key
|
9601
|
+
modelName
|
9602
|
+
endpoint
|
9603
|
+
temperature
|
9604
|
+
probability
|
9605
|
+
}
|
9586
9606
|
groq {
|
9587
9607
|
tokenLimit
|
9588
9608
|
completionTokenLimit
|
@@ -399,13 +399,13 @@ export declare enum AnthropicModels {
|
|
399
399
|
Claude_3Haiku = "CLAUDE_3_HAIKU",
|
400
400
|
/** Claude 3 Haiku (03-07-2024 version) */
|
401
401
|
Claude_3Haiku_20240307 = "CLAUDE_3_HAIKU_20240307",
|
402
|
-
/** Claude
|
402
|
+
/** @deprecated Use Claude 4 Opus instead. */
|
403
403
|
Claude_3Opus = "CLAUDE_3_OPUS",
|
404
404
|
/** Claude 3 Opus (02-29-2024 version) */
|
405
405
|
Claude_3Opus_20240229 = "CLAUDE_3_OPUS_20240229",
|
406
|
-
/** Claude
|
406
|
+
/** @deprecated Use Claude 4 Sonnet instead. */
|
407
407
|
Claude_3Sonnet = "CLAUDE_3_SONNET",
|
408
|
-
/** Claude
|
408
|
+
/** @deprecated Use Claude 4 Sonnet instead. */
|
409
409
|
Claude_3Sonnet_20240229 = "CLAUDE_3_SONNET_20240229",
|
410
410
|
/** Claude 4 Opus (Latest) */
|
411
411
|
Claude_4Opus = "CLAUDE_4_OPUS",
|
@@ -8460,7 +8460,9 @@ export declare enum ModelServiceTypes {
|
|
8460
8460
|
/** Replicate */
|
8461
8461
|
Replicate = "REPLICATE",
|
8462
8462
|
/** Voyage */
|
8463
|
-
Voyage = "VOYAGE"
|
8463
|
+
Voyage = "VOYAGE",
|
8464
|
+
/** xAI */
|
8465
|
+
Xai = "XAI"
|
8464
8466
|
}
|
8465
8467
|
/** Represents an LLM text entity extraction connector. */
|
8466
8468
|
export type ModelTextExtractionProperties = {
|
@@ -13803,6 +13805,8 @@ export type Specification = {
|
|
13803
13805
|
type?: Maybe<SpecificationTypes>;
|
13804
13806
|
/** The Voyage model properties. */
|
13805
13807
|
voyage?: Maybe<VoyageModelProperties>;
|
13808
|
+
/** The xAI model properties. */
|
13809
|
+
xai?: Maybe<XaiModelProperties>;
|
13806
13810
|
};
|
13807
13811
|
/** Represents a filter for LLM specifications. */
|
13808
13812
|
export type SpecificationFilter = {
|
@@ -13889,6 +13893,8 @@ export type SpecificationInput = {
|
|
13889
13893
|
type?: InputMaybe<SpecificationTypes>;
|
13890
13894
|
/** The Voyage model properties. */
|
13891
13895
|
voyage?: InputMaybe<VoyageModelPropertiesInput>;
|
13896
|
+
/** The XAI model properties. */
|
13897
|
+
xai?: InputMaybe<XaiModelPropertiesInput>;
|
13892
13898
|
};
|
13893
13899
|
/** Represents LLM specification query results. */
|
13894
13900
|
export type SpecificationResults = {
|
@@ -13973,6 +13979,8 @@ export type SpecificationUpdateInput = {
|
|
13973
13979
|
type?: InputMaybe<SpecificationTypes>;
|
13974
13980
|
/** The Voyage model properties. */
|
13975
13981
|
voyage?: InputMaybe<VoyageModelPropertiesUpdateInput>;
|
13982
|
+
/** The XAI model properties. */
|
13983
|
+
xai?: InputMaybe<XaiModelPropertiesUpdateInput>;
|
13976
13984
|
};
|
13977
13985
|
/** Represents the storage policy. */
|
13978
13986
|
export type StoragePolicy = {
|
@@ -14894,6 +14902,75 @@ export type WorkflowUpdateInput = {
|
|
14894
14902
|
/** The storage stage of the content workflow. */
|
14895
14903
|
storage?: InputMaybe<StorageWorkflowStageInput>;
|
14896
14904
|
};
|
14905
|
+
/** Represents xAI model properties. */
|
14906
|
+
export type XaiModelProperties = {
|
14907
|
+
__typename?: 'XAIModelProperties';
|
14908
|
+
/** The limit of tokens generated by prompt completion. */
|
14909
|
+
completionTokenLimit?: Maybe<Scalars['Int']['output']>;
|
14910
|
+
/** The xAI API endpoint, if using developer's own account. */
|
14911
|
+
endpoint?: Maybe<Scalars['URL']['output']>;
|
14912
|
+
/** The xAI API key, if using developer's own account. */
|
14913
|
+
key?: Maybe<Scalars['String']['output']>;
|
14914
|
+
/** The xAI model, or custom, when using developer's own account. */
|
14915
|
+
model: XaiModels;
|
14916
|
+
/** The xAI model name, if using developer's own account. */
|
14917
|
+
modelName?: Maybe<Scalars['String']['output']>;
|
14918
|
+
/** The model token probability. */
|
14919
|
+
probability?: Maybe<Scalars['Float']['output']>;
|
14920
|
+
/** The model temperature. */
|
14921
|
+
temperature?: Maybe<Scalars['Float']['output']>;
|
14922
|
+
/** The number of tokens which can provided to the xAI model, if using developer's own account. */
|
14923
|
+
tokenLimit?: Maybe<Scalars['Int']['output']>;
|
14924
|
+
};
|
14925
|
+
/** Represents xAI model properties. */
|
14926
|
+
export type XaiModelPropertiesInput = {
|
14927
|
+
/** The limit of tokens generated by prompt completion. */
|
14928
|
+
completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
14929
|
+
/** The xAI API endpoint, if using developer's own account. */
|
14930
|
+
endpoint?: InputMaybe<Scalars['URL']['input']>;
|
14931
|
+
/** The xAI API key, if using developer's own account. */
|
14932
|
+
key?: InputMaybe<Scalars['String']['input']>;
|
14933
|
+
/** The xAI model, or custom, when using developer's own account. */
|
14934
|
+
model: XaiModels;
|
14935
|
+
/** The xAI model name, if using developer's own account. */
|
14936
|
+
modelName?: InputMaybe<Scalars['String']['input']>;
|
14937
|
+
/** The model token probability. */
|
14938
|
+
probability?: InputMaybe<Scalars['Float']['input']>;
|
14939
|
+
/** The model temperature. */
|
14940
|
+
temperature?: InputMaybe<Scalars['Float']['input']>;
|
14941
|
+
/** The number of tokens which can provided to the xAI model, if using developer's own account. */
|
14942
|
+
tokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
14943
|
+
};
|
14944
|
+
/** Represents xAI model properties. */
|
14945
|
+
export type XaiModelPropertiesUpdateInput = {
|
14946
|
+
/** The limit of tokens generated by prompt completion. */
|
14947
|
+
completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
14948
|
+
/** The xAI API endpoint, if using developer's own account. */
|
14949
|
+
endpoint?: InputMaybe<Scalars['URL']['input']>;
|
14950
|
+
/** The xAI API key, if using developer's own account. */
|
14951
|
+
key?: InputMaybe<Scalars['String']['input']>;
|
14952
|
+
/** The xAI model, or custom, when using developer's own account. */
|
14953
|
+
model?: InputMaybe<XaiModels>;
|
14954
|
+
/** The xAI model name, if using developer's own account. */
|
14955
|
+
modelName?: InputMaybe<Scalars['String']['input']>;
|
14956
|
+
/** The model token probability. */
|
14957
|
+
probability?: InputMaybe<Scalars['Float']['input']>;
|
14958
|
+
/** The model temperature. */
|
14959
|
+
temperature?: InputMaybe<Scalars['Float']['input']>;
|
14960
|
+
/** The number of tokens which can provided to the xAI model, if using developer's own account. */
|
14961
|
+
tokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
14962
|
+
};
|
14963
|
+
/** xAI model type */
|
14964
|
+
export declare enum XaiModels {
|
14965
|
+
/** Developer-specified model */
|
14966
|
+
Custom = "CUSTOM",
|
14967
|
+
/** Grok 3 (Latest) */
|
14968
|
+
Grok_3 = "GROK_3",
|
14969
|
+
/** Grok 3 Mini (Latest) */
|
14970
|
+
Grok_3Mini = "GROK_3_MINI",
|
14971
|
+
/** Grok 4 (Latest) */
|
14972
|
+
Grok_4 = "GROK_4"
|
14973
|
+
}
|
14897
14974
|
/** Represents YouTube feed properties. */
|
14898
14975
|
export type YouTubeFeedProperties = {
|
14899
14976
|
__typename?: 'YouTubeFeedProperties';
|
@@ -25878,6 +25955,17 @@ export type GetSpecificationQuery = {
|
|
25878
25955
|
temperature?: number | null;
|
25879
25956
|
probability?: number | null;
|
25880
25957
|
} | null;
|
25958
|
+
xai?: {
|
25959
|
+
__typename?: 'XAIModelProperties';
|
25960
|
+
tokenLimit?: number | null;
|
25961
|
+
completionTokenLimit?: number | null;
|
25962
|
+
model: XaiModels;
|
25963
|
+
key?: string | null;
|
25964
|
+
modelName?: string | null;
|
25965
|
+
endpoint?: any | null;
|
25966
|
+
temperature?: number | null;
|
25967
|
+
probability?: number | null;
|
25968
|
+
} | null;
|
25881
25969
|
groq?: {
|
25882
25970
|
__typename?: 'GroqModelProperties';
|
25883
25971
|
tokenLimit?: number | null;
|
@@ -26283,6 +26371,17 @@ export type QuerySpecificationsQuery = {
|
|
26283
26371
|
temperature?: number | null;
|
26284
26372
|
probability?: number | null;
|
26285
26373
|
} | null;
|
26374
|
+
xai?: {
|
26375
|
+
__typename?: 'XAIModelProperties';
|
26376
|
+
tokenLimit?: number | null;
|
26377
|
+
completionTokenLimit?: number | null;
|
26378
|
+
model: XaiModels;
|
26379
|
+
key?: string | null;
|
26380
|
+
modelName?: string | null;
|
26381
|
+
endpoint?: any | null;
|
26382
|
+
temperature?: number | null;
|
26383
|
+
probability?: number | null;
|
26384
|
+
} | null;
|
26286
26385
|
groq?: {
|
26287
26386
|
__typename?: 'GroqModelProperties';
|
26288
26387
|
tokenLimit?: number | null;
|
@@ -31,13 +31,13 @@ export var AnthropicModels;
|
|
31
31
|
AnthropicModels["Claude_3Haiku"] = "CLAUDE_3_HAIKU";
|
32
32
|
/** Claude 3 Haiku (03-07-2024 version) */
|
33
33
|
AnthropicModels["Claude_3Haiku_20240307"] = "CLAUDE_3_HAIKU_20240307";
|
34
|
-
/** Claude
|
34
|
+
/** @deprecated Use Claude 4 Opus instead. */
|
35
35
|
AnthropicModels["Claude_3Opus"] = "CLAUDE_3_OPUS";
|
36
36
|
/** Claude 3 Opus (02-29-2024 version) */
|
37
37
|
AnthropicModels["Claude_3Opus_20240229"] = "CLAUDE_3_OPUS_20240229";
|
38
|
-
/** Claude
|
38
|
+
/** @deprecated Use Claude 4 Sonnet instead. */
|
39
39
|
AnthropicModels["Claude_3Sonnet"] = "CLAUDE_3_SONNET";
|
40
|
-
/** Claude
|
40
|
+
/** @deprecated Use Claude 4 Sonnet instead. */
|
41
41
|
AnthropicModels["Claude_3Sonnet_20240229"] = "CLAUDE_3_SONNET_20240229";
|
42
42
|
/** Claude 4 Opus (Latest) */
|
43
43
|
AnthropicModels["Claude_4Opus"] = "CLAUDE_4_OPUS";
|
@@ -1469,6 +1469,8 @@ export var ModelServiceTypes;
|
|
1469
1469
|
ModelServiceTypes["Replicate"] = "REPLICATE";
|
1470
1470
|
/** Voyage */
|
1471
1471
|
ModelServiceTypes["Voyage"] = "VOYAGE";
|
1472
|
+
/** xAI */
|
1473
|
+
ModelServiceTypes["Xai"] = "XAI";
|
1472
1474
|
})(ModelServiceTypes || (ModelServiceTypes = {}));
|
1473
1475
|
/** Model type */
|
1474
1476
|
export var ModelTypes;
|
@@ -2246,6 +2248,18 @@ export var VoyageModels;
|
|
2246
2248
|
/** Voyage Multilingual 2.0 */
|
2247
2249
|
VoyageModels["VoyageMultilingual_2_0"] = "VOYAGE_MULTILINGUAL_2_0";
|
2248
2250
|
})(VoyageModels || (VoyageModels = {}));
|
2251
|
+
/** xAI model type */
|
2252
|
+
export var XaiModels;
|
2253
|
+
(function (XaiModels) {
|
2254
|
+
/** Developer-specified model */
|
2255
|
+
XaiModels["Custom"] = "CUSTOM";
|
2256
|
+
/** Grok 3 (Latest) */
|
2257
|
+
XaiModels["Grok_3"] = "GROK_3";
|
2258
|
+
/** Grok 3 Mini (Latest) */
|
2259
|
+
XaiModels["Grok_3Mini"] = "GROK_3_MINI";
|
2260
|
+
/** Grok 4 (Latest) */
|
2261
|
+
XaiModels["Grok_4"] = "GROK_4";
|
2262
|
+
})(XaiModels || (XaiModels = {}));
|
2249
2263
|
export var YouTubeTypes;
|
2250
2264
|
(function (YouTubeTypes) {
|
2251
2265
|
/** YouTube Channel */
|
package/dist/model-mapping.js
CHANGED
@@ -136,6 +136,12 @@ const DEEPSEEK_MODEL_MAP = {
|
|
136
136
|
[Types.DeepseekModels.Chat]: "deepseek-chat",
|
137
137
|
[Types.DeepseekModels.Reasoner]: "deepseek-reasoner",
|
138
138
|
};
|
139
|
+
// xAI model mappings
|
140
|
+
const XAI_MODEL_MAP = {
|
141
|
+
[Types.XaiModels.Grok_4]: "grok-4",
|
142
|
+
[Types.XaiModels.Grok_3]: "grok-3",
|
143
|
+
[Types.XaiModels.Grok_3Mini]: "grok-3-mini",
|
144
|
+
};
|
139
145
|
/**
|
140
146
|
* Get the actual model name for a given specification
|
141
147
|
* @param specification - The Graphlit specification object
|
@@ -171,6 +177,9 @@ export function getModelName(specification) {
|
|
171
177
|
if (specification?.deepseek?.modelName) {
|
172
178
|
return specification.deepseek.modelName;
|
173
179
|
}
|
180
|
+
if (specification?.xai?.modelName) {
|
181
|
+
return specification.xai.modelName;
|
182
|
+
}
|
174
183
|
// Map based on service type and model enum
|
175
184
|
switch (serviceType) {
|
176
185
|
case Types.ModelServiceTypes.OpenAi:
|
@@ -201,6 +210,9 @@ export function getModelName(specification) {
|
|
201
210
|
case Types.ModelServiceTypes.Deepseek:
|
202
211
|
const deepseekModel = specification?.deepseek?.model;
|
203
212
|
return deepseekModel ? DEEPSEEK_MODEL_MAP[deepseekModel] : undefined;
|
213
|
+
case Types.ModelServiceTypes.Xai:
|
214
|
+
const xaiModel = specification?.xai?.model;
|
215
|
+
return xaiModel ? XAI_MODEL_MAP[xaiModel] : undefined;
|
204
216
|
default:
|
205
217
|
return undefined;
|
206
218
|
}
|
@@ -221,6 +233,7 @@ export function isStreamingSupported(serviceType) {
|
|
221
233
|
Types.ModelServiceTypes.Mistral,
|
222
234
|
Types.ModelServiceTypes.Bedrock,
|
223
235
|
Types.ModelServiceTypes.Deepseek,
|
236
|
+
Types.ModelServiceTypes.Xai,
|
224
237
|
];
|
225
238
|
return streamingServices.includes(serviceType);
|
226
239
|
}
|
@@ -50,4 +50,6 @@ onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls:
|
|
50
50
|
*/
|
51
51
|
export declare function streamWithBedrock(specification: Specification, messages: BedrockMessage[], systemPrompt: string | undefined, tools: ToolDefinitionInput[] | undefined, bedrockClient: any, // BedrockRuntimeClient instance
|
52
52
|
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
|
53
|
+
export declare function streamWithXai(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, xaiClient: any, // OpenAI client instance configured for xAI
|
54
|
+
onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
|
53
55
|
export {};
|
@@ -2526,3 +2526,31 @@ onEvent, onComplete, abortSignal) {
|
|
2526
2526
|
throw error;
|
2527
2527
|
}
|
2528
2528
|
}
|
2529
|
+
export async function streamWithXai(specification, messages, tools, xaiClient, // OpenAI client instance configured for xAI
|
2530
|
+
onEvent, onComplete, abortSignal) {
|
2531
|
+
try {
|
2532
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2533
|
+
console.log(`š [xAI] Starting stream | Model: ${getModelName(specification)} | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
|
2534
|
+
}
|
2535
|
+
// xAI uses the same API as OpenAI, so we can reuse the OpenAI streaming logic
|
2536
|
+
return await streamWithOpenAI(specification, messages, tools, xaiClient, onEvent, onComplete, abortSignal);
|
2537
|
+
}
|
2538
|
+
catch (error) {
|
2539
|
+
// Handle xAI-specific errors if any
|
2540
|
+
const errorMessage = error.message || error.toString();
|
2541
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2542
|
+
console.log(`ā ļø [xAI] Error: ${errorMessage}`);
|
2543
|
+
}
|
2544
|
+
// Check for rate limit errors
|
2545
|
+
if (error.status === 429 || error.statusCode === 429) {
|
2546
|
+
if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
|
2547
|
+
console.log(`ā ļø [xAI] Rate limit hit (429)`);
|
2548
|
+
}
|
2549
|
+
// Re-throw with proper status code for retry logic
|
2550
|
+
const rateLimitError = new Error("xAI rate limit exceeded");
|
2551
|
+
rateLimitError.statusCode = 429;
|
2552
|
+
throw rateLimitError;
|
2553
|
+
}
|
2554
|
+
throw error;
|
2555
|
+
}
|
2556
|
+
}
|