graphlit-client 1.0.20250705001 → 1.0.20250710002

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client.d.ts CHANGED
@@ -48,6 +48,7 @@ declare class Graphlit {
48
48
  private mistralClient?;
49
49
  private bedrockClient?;
50
50
  private deepseekClient?;
51
+ private xaiClient?;
51
52
  constructor(organizationIdOrOptions?: string | GraphlitClientOptions, environmentId?: string, jwtSecret?: string, ownerId?: string, userId?: string, apiUri?: string);
52
53
  refreshClient(): void;
53
54
  /**
@@ -95,6 +96,11 @@ declare class Graphlit {
95
96
  * @param client - OpenAI client instance configured for Deepseek (e.g., new OpenAI({ baseURL: "https://api.deepseek.com", apiKey: "..." }))
96
97
  */
97
98
  setDeepseekClient(client: any): void;
99
+ /**
100
+ * Set a custom xAI client instance for streaming
101
+ * @param client - OpenAI client instance configured for xAI (e.g., new OpenAI({ baseURL: "https://api.x.ai/v1", apiKey: "..." }))
102
+ */
103
+ setXaiClient(client: any): void;
98
104
  /**
99
105
  * Update retry configuration and refresh the Apollo client
100
106
  * @param retryConfig - New retry configuration
@@ -505,6 +511,7 @@ declare class Graphlit {
505
511
  * Stream with Deepseek client
506
512
  */
507
513
  private streamWithDeepseek;
514
+ private streamWithXai;
508
515
  private executeToolsForPromptAgent;
509
516
  private prettyPrintGraphQLError;
510
517
  private mutateAndCheckError;
package/dist/client.js CHANGED
@@ -9,7 +9,7 @@ import * as dotenv from "dotenv";
9
9
  import { getServiceType, getModelName } from "./model-mapping.js";
10
10
  import { UIEventAdapter } from "./streaming/ui-event-adapter.js";
11
11
  import { formatMessagesForOpenAI, formatMessagesForAnthropic, formatMessagesForGoogle, formatMessagesForMistral, formatMessagesForBedrock, } from "./streaming/llm-formatters.js";
12
- import { streamWithOpenAI, streamWithAnthropic, streamWithGoogle, streamWithGroq, streamWithCerebras, streamWithCohere, streamWithMistral, streamWithBedrock, streamWithDeepseek, } from "./streaming/providers.js";
12
+ import { streamWithOpenAI, streamWithAnthropic, streamWithGoogle, streamWithGroq, streamWithCerebras, streamWithCohere, streamWithMistral, streamWithBedrock, streamWithDeepseek, streamWithXai, } from "./streaming/providers.js";
13
13
  // Optional imports for streaming LLM clients
14
14
  // These are peer dependencies and may not be installed
15
15
  // We need to use createRequire for optional dependencies to avoid build errors
@@ -111,6 +111,14 @@ catch (e) {
111
111
  }
112
112
  }
113
113
  const DEFAULT_MAX_TOOL_ROUNDS = 1000;
114
+ // Helper function to validate GUID format
115
+ function isValidGuid(guid) {
116
+ if (!guid)
117
+ return false;
118
+ // GUID regex pattern: 8-4-4-4-12 hexadecimal characters
119
+ const guidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
120
+ return guidRegex.test(guid);
121
+ }
114
122
  // Define the Graphlit class
115
123
  class Graphlit {
116
124
  client;
@@ -132,6 +140,7 @@ class Graphlit {
132
140
  mistralClient;
133
141
  bedrockClient;
134
142
  deepseekClient;
143
+ xaiClient;
135
144
  constructor(organizationIdOrOptions, environmentId, jwtSecret, ownerId, userId, apiUri) {
136
145
  // Handle both old constructor signature and new options object
137
146
  let options;
@@ -188,12 +197,25 @@ class Graphlit {
188
197
  if (!this.organizationId) {
189
198
  throw new Error("Graphlit organization identifier is required.");
190
199
  }
200
+ if (!isValidGuid(this.organizationId)) {
201
+ throw new Error(`Invalid organization ID format. Expected a valid GUID, but received: '${this.organizationId}'. ` +
202
+ "A valid GUID should be in the format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
203
+ }
191
204
  if (!this.environmentId) {
192
205
  throw new Error("Graphlit environment identifier is required.");
193
206
  }
207
+ if (!isValidGuid(this.environmentId)) {
208
+ throw new Error(`Invalid environment ID format. Expected a valid GUID, but received: '${this.environmentId}'. ` +
209
+ "A valid GUID should be in the format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
210
+ }
194
211
  if (!this.jwtSecret) {
195
212
  throw new Error("Graphlit environment JWT secret is required.");
196
213
  }
214
+ // Validate optional userId if provided (ownerId can be any format)
215
+ if (this.userId && !isValidGuid(this.userId)) {
216
+ throw new Error(`Invalid user ID format. Expected a valid GUID, but received: '${this.userId}'. ` +
217
+ "A valid GUID should be in the format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx");
218
+ }
197
219
  this.refreshClient();
198
220
  }
199
221
  refreshClient() {
@@ -331,6 +353,13 @@ class Graphlit {
331
353
  setDeepseekClient(client) {
332
354
  this.deepseekClient = client;
333
355
  }
356
+ /**
357
+ * Set a custom xAI client instance for streaming
358
+ * @param client - OpenAI client instance configured for xAI (e.g., new OpenAI({ baseURL: "https://api.x.ai/v1", apiKey: "..." }))
359
+ */
360
+ setXaiClient(client) {
361
+ this.xaiClient = client;
362
+ }
334
363
  /**
335
364
  * Update retry configuration and refresh the Apollo client
336
365
  * @param retryConfig - New retry configuration
@@ -1823,6 +1852,8 @@ class Graphlit {
1823
1852
  return hasBedrockClient;
1824
1853
  case Types.ModelServiceTypes.Deepseek:
1825
1854
  return OpenAI !== undefined || this.deepseekClient !== undefined;
1855
+ case Types.ModelServiceTypes.Xai:
1856
+ return OpenAI !== undefined || this.xaiClient !== undefined;
1826
1857
  default:
1827
1858
  return false;
1828
1859
  }
@@ -1839,6 +1870,8 @@ class Graphlit {
1839
1870
  this.cohereClient !== undefined;
1840
1871
  const hasMistral = Mistral !== undefined || this.mistralClient !== undefined;
1841
1872
  const hasBedrock = BedrockRuntimeClient !== undefined || this.bedrockClient !== undefined;
1873
+ const hasDeepseek = OpenAI !== undefined || this.deepseekClient !== undefined;
1874
+ const hasXai = OpenAI !== undefined || this.xaiClient !== undefined;
1842
1875
  return (hasOpenAI ||
1843
1876
  hasAnthropic ||
1844
1877
  hasGoogle ||
@@ -1846,7 +1879,9 @@ class Graphlit {
1846
1879
  hasCerebras ||
1847
1880
  hasCohere ||
1848
1881
  hasMistral ||
1849
- hasBedrock);
1882
+ hasBedrock ||
1883
+ hasDeepseek ||
1884
+ hasXai);
1850
1885
  }
1851
1886
  /**
1852
1887
  * Execute an agent with non-streaming response
@@ -2480,6 +2515,26 @@ class Graphlit {
2480
2515
  console.log(`\n🏁 [Streaming] Deepseek native streaming completed (Round ${currentRound})`);
2481
2516
  }
2482
2517
  }
2518
+ else if (serviceType === Types.ModelServiceTypes.Xai &&
2519
+ (OpenAI || this.xaiClient)) {
2520
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2521
+ console.log(`\n✅ [Streaming] Using xAI native streaming (Round ${currentRound})`);
2522
+ }
2523
+ const xaiMessages = formatMessagesForOpenAI(messages); // xAI uses OpenAI format
2524
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING_MESSAGES) {
2525
+ console.log(`🔍 [xAI] Sending ${xaiMessages.length} messages to LLM: ${JSON.stringify(xaiMessages)}`);
2526
+ }
2527
+ await this.streamWithXai(specification, xaiMessages, tools, uiAdapter, (message, calls, usage) => {
2528
+ roundMessage = message;
2529
+ toolCalls = calls;
2530
+ if (usage) {
2531
+ uiAdapter.setUsageData(usage);
2532
+ }
2533
+ }, abortSignal);
2534
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2535
+ console.log(`\n🏁 [Streaming] xAI native streaming completed (Round ${currentRound})`);
2536
+ }
2537
+ }
2483
2538
  else {
2484
2539
  // Fallback to non-streaming
2485
2540
  if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
@@ -3021,6 +3076,27 @@ class Graphlit {
3021
3076
  }
3022
3077
  await streamWithDeepseek(specification, messages, tools, deepseekClient, (event) => uiAdapter.handleEvent(event), onComplete, abortSignal);
3023
3078
  }
3079
+ async streamWithXai(specification, messages, tools, uiAdapter, onComplete, abortSignal) {
3080
+ // Check if we have either the OpenAI module or a provided xAI client
3081
+ if (!OpenAI && !this.xaiClient) {
3082
+ throw new Error("xAI client not available (requires OpenAI SDK)");
3083
+ }
3084
+ // Use provided client or create a new one with xAI base URL
3085
+ const xaiClient = this.xaiClient ||
3086
+ (OpenAI
3087
+ ? new OpenAI({
3088
+ baseURL: "https://api.x.ai/v1",
3089
+ apiKey: process.env.XAI_API_KEY || "",
3090
+ })
3091
+ : null);
3092
+ if (!xaiClient) {
3093
+ throw new Error("Failed to create xAI client");
3094
+ }
3095
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
3096
+ console.log(`🚀 [Graphlit SDK] Routing to xAI streaming provider | Spec: ${specification.name} (${specification.id}) | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
3097
+ }
3098
+ await streamWithXai(specification, messages, tools, xaiClient, (event) => uiAdapter.handleEvent(event), onComplete, abortSignal);
3099
+ }
3024
3100
  // Helper method to execute tools for promptAgent
3025
3101
  async executeToolsForPromptAgent(toolCalls, toolHandlers, allToolCalls, signal) {
3026
3102
  const responses = [];
@@ -9223,6 +9223,16 @@ export const GetSpecification = gql `
9223
9223
  temperature
9224
9224
  probability
9225
9225
  }
9226
+ xai {
9227
+ tokenLimit
9228
+ completionTokenLimit
9229
+ model
9230
+ key
9231
+ modelName
9232
+ endpoint
9233
+ temperature
9234
+ probability
9235
+ }
9226
9236
  groq {
9227
9237
  tokenLimit
9228
9238
  completionTokenLimit
@@ -9583,6 +9593,16 @@ export const QuerySpecifications = gql `
9583
9593
  temperature
9584
9594
  probability
9585
9595
  }
9596
+ xai {
9597
+ tokenLimit
9598
+ completionTokenLimit
9599
+ model
9600
+ key
9601
+ modelName
9602
+ endpoint
9603
+ temperature
9604
+ probability
9605
+ }
9586
9606
  groq {
9587
9607
  tokenLimit
9588
9608
  completionTokenLimit
@@ -399,13 +399,13 @@ export declare enum AnthropicModels {
399
399
  Claude_3Haiku = "CLAUDE_3_HAIKU",
400
400
  /** Claude 3 Haiku (03-07-2024 version) */
401
401
  Claude_3Haiku_20240307 = "CLAUDE_3_HAIKU_20240307",
402
- /** Claude 3 Opus (Latest) */
402
+ /** @deprecated Use Claude 4 Opus instead. */
403
403
  Claude_3Opus = "CLAUDE_3_OPUS",
404
404
  /** Claude 3 Opus (02-29-2024 version) */
405
405
  Claude_3Opus_20240229 = "CLAUDE_3_OPUS_20240229",
406
- /** Claude 3 Sonnet (Latest) */
406
+ /** @deprecated Use Claude 4 Sonnet instead. */
407
407
  Claude_3Sonnet = "CLAUDE_3_SONNET",
408
- /** Claude 3 Sonnet (02-29-2024 version) */
408
+ /** @deprecated Use Claude 4 Sonnet instead. */
409
409
  Claude_3Sonnet_20240229 = "CLAUDE_3_SONNET_20240229",
410
410
  /** Claude 4 Opus (Latest) */
411
411
  Claude_4Opus = "CLAUDE_4_OPUS",
@@ -8460,7 +8460,9 @@ export declare enum ModelServiceTypes {
8460
8460
  /** Replicate */
8461
8461
  Replicate = "REPLICATE",
8462
8462
  /** Voyage */
8463
- Voyage = "VOYAGE"
8463
+ Voyage = "VOYAGE",
8464
+ /** xAI */
8465
+ Xai = "XAI"
8464
8466
  }
8465
8467
  /** Represents an LLM text entity extraction connector. */
8466
8468
  export type ModelTextExtractionProperties = {
@@ -13803,6 +13805,8 @@ export type Specification = {
13803
13805
  type?: Maybe<SpecificationTypes>;
13804
13806
  /** The Voyage model properties. */
13805
13807
  voyage?: Maybe<VoyageModelProperties>;
13808
+ /** The xAI model properties. */
13809
+ xai?: Maybe<XaiModelProperties>;
13806
13810
  };
13807
13811
  /** Represents a filter for LLM specifications. */
13808
13812
  export type SpecificationFilter = {
@@ -13889,6 +13893,8 @@ export type SpecificationInput = {
13889
13893
  type?: InputMaybe<SpecificationTypes>;
13890
13894
  /** The Voyage model properties. */
13891
13895
  voyage?: InputMaybe<VoyageModelPropertiesInput>;
13896
+ /** The XAI model properties. */
13897
+ xai?: InputMaybe<XaiModelPropertiesInput>;
13892
13898
  };
13893
13899
  /** Represents LLM specification query results. */
13894
13900
  export type SpecificationResults = {
@@ -13973,6 +13979,8 @@ export type SpecificationUpdateInput = {
13973
13979
  type?: InputMaybe<SpecificationTypes>;
13974
13980
  /** The Voyage model properties. */
13975
13981
  voyage?: InputMaybe<VoyageModelPropertiesUpdateInput>;
13982
+ /** The XAI model properties. */
13983
+ xai?: InputMaybe<XaiModelPropertiesUpdateInput>;
13976
13984
  };
13977
13985
  /** Represents the storage policy. */
13978
13986
  export type StoragePolicy = {
@@ -14894,6 +14902,75 @@ export type WorkflowUpdateInput = {
14894
14902
  /** The storage stage of the content workflow. */
14895
14903
  storage?: InputMaybe<StorageWorkflowStageInput>;
14896
14904
  };
14905
+ /** Represents xAI model properties. */
14906
+ export type XaiModelProperties = {
14907
+ __typename?: 'XAIModelProperties';
14908
+ /** The limit of tokens generated by prompt completion. */
14909
+ completionTokenLimit?: Maybe<Scalars['Int']['output']>;
14910
+ /** The xAI API endpoint, if using developer's own account. */
14911
+ endpoint?: Maybe<Scalars['URL']['output']>;
14912
+ /** The xAI API key, if using developer's own account. */
14913
+ key?: Maybe<Scalars['String']['output']>;
14914
+ /** The xAI model, or custom, when using developer's own account. */
14915
+ model: XaiModels;
14916
+ /** The xAI model name, if using developer's own account. */
14917
+ modelName?: Maybe<Scalars['String']['output']>;
14918
+ /** The model token probability. */
14919
+ probability?: Maybe<Scalars['Float']['output']>;
14920
+ /** The model temperature. */
14921
+ temperature?: Maybe<Scalars['Float']['output']>;
14922
+ /** The number of tokens which can provided to the xAI model, if using developer's own account. */
14923
+ tokenLimit?: Maybe<Scalars['Int']['output']>;
14924
+ };
14925
+ /** Represents xAI model properties. */
14926
+ export type XaiModelPropertiesInput = {
14927
+ /** The limit of tokens generated by prompt completion. */
14928
+ completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
14929
+ /** The xAI API endpoint, if using developer's own account. */
14930
+ endpoint?: InputMaybe<Scalars['URL']['input']>;
14931
+ /** The xAI API key, if using developer's own account. */
14932
+ key?: InputMaybe<Scalars['String']['input']>;
14933
+ /** The xAI model, or custom, when using developer's own account. */
14934
+ model: XaiModels;
14935
+ /** The xAI model name, if using developer's own account. */
14936
+ modelName?: InputMaybe<Scalars['String']['input']>;
14937
+ /** The model token probability. */
14938
+ probability?: InputMaybe<Scalars['Float']['input']>;
14939
+ /** The model temperature. */
14940
+ temperature?: InputMaybe<Scalars['Float']['input']>;
14941
+ /** The number of tokens which can provided to the xAI model, if using developer's own account. */
14942
+ tokenLimit?: InputMaybe<Scalars['Int']['input']>;
14943
+ };
14944
+ /** Represents xAI model properties. */
14945
+ export type XaiModelPropertiesUpdateInput = {
14946
+ /** The limit of tokens generated by prompt completion. */
14947
+ completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
14948
+ /** The xAI API endpoint, if using developer's own account. */
14949
+ endpoint?: InputMaybe<Scalars['URL']['input']>;
14950
+ /** The xAI API key, if using developer's own account. */
14951
+ key?: InputMaybe<Scalars['String']['input']>;
14952
+ /** The xAI model, or custom, when using developer's own account. */
14953
+ model?: InputMaybe<XaiModels>;
14954
+ /** The xAI model name, if using developer's own account. */
14955
+ modelName?: InputMaybe<Scalars['String']['input']>;
14956
+ /** The model token probability. */
14957
+ probability?: InputMaybe<Scalars['Float']['input']>;
14958
+ /** The model temperature. */
14959
+ temperature?: InputMaybe<Scalars['Float']['input']>;
14960
+ /** The number of tokens which can provided to the xAI model, if using developer's own account. */
14961
+ tokenLimit?: InputMaybe<Scalars['Int']['input']>;
14962
+ };
14963
+ /** xAI model type */
14964
+ export declare enum XaiModels {
14965
+ /** Developer-specified model */
14966
+ Custom = "CUSTOM",
14967
+ /** Grok 3 (Latest) */
14968
+ Grok_3 = "GROK_3",
14969
+ /** Grok 3 Mini (Latest) */
14970
+ Grok_3Mini = "GROK_3_MINI",
14971
+ /** Grok 4 (Latest) */
14972
+ Grok_4 = "GROK_4"
14973
+ }
14897
14974
  /** Represents YouTube feed properties. */
14898
14975
  export type YouTubeFeedProperties = {
14899
14976
  __typename?: 'YouTubeFeedProperties';
@@ -25878,6 +25955,17 @@ export type GetSpecificationQuery = {
25878
25955
  temperature?: number | null;
25879
25956
  probability?: number | null;
25880
25957
  } | null;
25958
+ xai?: {
25959
+ __typename?: 'XAIModelProperties';
25960
+ tokenLimit?: number | null;
25961
+ completionTokenLimit?: number | null;
25962
+ model: XaiModels;
25963
+ key?: string | null;
25964
+ modelName?: string | null;
25965
+ endpoint?: any | null;
25966
+ temperature?: number | null;
25967
+ probability?: number | null;
25968
+ } | null;
25881
25969
  groq?: {
25882
25970
  __typename?: 'GroqModelProperties';
25883
25971
  tokenLimit?: number | null;
@@ -26283,6 +26371,17 @@ export type QuerySpecificationsQuery = {
26283
26371
  temperature?: number | null;
26284
26372
  probability?: number | null;
26285
26373
  } | null;
26374
+ xai?: {
26375
+ __typename?: 'XAIModelProperties';
26376
+ tokenLimit?: number | null;
26377
+ completionTokenLimit?: number | null;
26378
+ model: XaiModels;
26379
+ key?: string | null;
26380
+ modelName?: string | null;
26381
+ endpoint?: any | null;
26382
+ temperature?: number | null;
26383
+ probability?: number | null;
26384
+ } | null;
26286
26385
  groq?: {
26287
26386
  __typename?: 'GroqModelProperties';
26288
26387
  tokenLimit?: number | null;
@@ -31,13 +31,13 @@ export var AnthropicModels;
31
31
  AnthropicModels["Claude_3Haiku"] = "CLAUDE_3_HAIKU";
32
32
  /** Claude 3 Haiku (03-07-2024 version) */
33
33
  AnthropicModels["Claude_3Haiku_20240307"] = "CLAUDE_3_HAIKU_20240307";
34
- /** Claude 3 Opus (Latest) */
34
+ /** @deprecated Use Claude 4 Opus instead. */
35
35
  AnthropicModels["Claude_3Opus"] = "CLAUDE_3_OPUS";
36
36
  /** Claude 3 Opus (02-29-2024 version) */
37
37
  AnthropicModels["Claude_3Opus_20240229"] = "CLAUDE_3_OPUS_20240229";
38
- /** Claude 3 Sonnet (Latest) */
38
+ /** @deprecated Use Claude 4 Sonnet instead. */
39
39
  AnthropicModels["Claude_3Sonnet"] = "CLAUDE_3_SONNET";
40
- /** Claude 3 Sonnet (02-29-2024 version) */
40
+ /** @deprecated Use Claude 4 Sonnet instead. */
41
41
  AnthropicModels["Claude_3Sonnet_20240229"] = "CLAUDE_3_SONNET_20240229";
42
42
  /** Claude 4 Opus (Latest) */
43
43
  AnthropicModels["Claude_4Opus"] = "CLAUDE_4_OPUS";
@@ -1469,6 +1469,8 @@ export var ModelServiceTypes;
1469
1469
  ModelServiceTypes["Replicate"] = "REPLICATE";
1470
1470
  /** Voyage */
1471
1471
  ModelServiceTypes["Voyage"] = "VOYAGE";
1472
+ /** xAI */
1473
+ ModelServiceTypes["Xai"] = "XAI";
1472
1474
  })(ModelServiceTypes || (ModelServiceTypes = {}));
1473
1475
  /** Model type */
1474
1476
  export var ModelTypes;
@@ -2246,6 +2248,18 @@ export var VoyageModels;
2246
2248
  /** Voyage Multilingual 2.0 */
2247
2249
  VoyageModels["VoyageMultilingual_2_0"] = "VOYAGE_MULTILINGUAL_2_0";
2248
2250
  })(VoyageModels || (VoyageModels = {}));
2251
+ /** xAI model type */
2252
+ export var XaiModels;
2253
+ (function (XaiModels) {
2254
+ /** Developer-specified model */
2255
+ XaiModels["Custom"] = "CUSTOM";
2256
+ /** Grok 3 (Latest) */
2257
+ XaiModels["Grok_3"] = "GROK_3";
2258
+ /** Grok 3 Mini (Latest) */
2259
+ XaiModels["Grok_3Mini"] = "GROK_3_MINI";
2260
+ /** Grok 4 (Latest) */
2261
+ XaiModels["Grok_4"] = "GROK_4";
2262
+ })(XaiModels || (XaiModels = {}));
2249
2263
  export var YouTubeTypes;
2250
2264
  (function (YouTubeTypes) {
2251
2265
  /** YouTube Channel */
@@ -136,6 +136,12 @@ const DEEPSEEK_MODEL_MAP = {
136
136
  [Types.DeepseekModels.Chat]: "deepseek-chat",
137
137
  [Types.DeepseekModels.Reasoner]: "deepseek-reasoner",
138
138
  };
139
+ // xAI model mappings
140
+ const XAI_MODEL_MAP = {
141
+ [Types.XaiModels.Grok_4]: "grok-4",
142
+ [Types.XaiModels.Grok_3]: "grok-3",
143
+ [Types.XaiModels.Grok_3Mini]: "grok-3-mini",
144
+ };
139
145
  /**
140
146
  * Get the actual model name for a given specification
141
147
  * @param specification - The Graphlit specification object
@@ -171,6 +177,9 @@ export function getModelName(specification) {
171
177
  if (specification?.deepseek?.modelName) {
172
178
  return specification.deepseek.modelName;
173
179
  }
180
+ if (specification?.xai?.modelName) {
181
+ return specification.xai.modelName;
182
+ }
174
183
  // Map based on service type and model enum
175
184
  switch (serviceType) {
176
185
  case Types.ModelServiceTypes.OpenAi:
@@ -201,6 +210,9 @@ export function getModelName(specification) {
201
210
  case Types.ModelServiceTypes.Deepseek:
202
211
  const deepseekModel = specification?.deepseek?.model;
203
212
  return deepseekModel ? DEEPSEEK_MODEL_MAP[deepseekModel] : undefined;
213
+ case Types.ModelServiceTypes.Xai:
214
+ const xaiModel = specification?.xai?.model;
215
+ return xaiModel ? XAI_MODEL_MAP[xaiModel] : undefined;
204
216
  default:
205
217
  return undefined;
206
218
  }
@@ -221,6 +233,7 @@ export function isStreamingSupported(serviceType) {
221
233
  Types.ModelServiceTypes.Mistral,
222
234
  Types.ModelServiceTypes.Bedrock,
223
235
  Types.ModelServiceTypes.Deepseek,
236
+ Types.ModelServiceTypes.Xai,
224
237
  ];
225
238
  return streamingServices.includes(serviceType);
226
239
  }
@@ -50,4 +50,6 @@ onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls:
50
50
  */
51
51
  export declare function streamWithBedrock(specification: Specification, messages: BedrockMessage[], systemPrompt: string | undefined, tools: ToolDefinitionInput[] | undefined, bedrockClient: any, // BedrockRuntimeClient instance
52
52
  onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
53
+ export declare function streamWithXai(specification: Specification, messages: OpenAIMessage[], tools: ToolDefinitionInput[] | undefined, xaiClient: any, // OpenAI client instance configured for xAI
54
+ onEvent: (event: StreamEvent) => void, onComplete: (message: string, toolCalls: ConversationToolCall[], usage?: any) => void, abortSignal?: AbortSignal): Promise<void>;
53
55
  export {};
@@ -2526,3 +2526,31 @@ onEvent, onComplete, abortSignal) {
2526
2526
  throw error;
2527
2527
  }
2528
2528
  }
2529
+ export async function streamWithXai(specification, messages, tools, xaiClient, // OpenAI client instance configured for xAI
2530
+ onEvent, onComplete, abortSignal) {
2531
+ try {
2532
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2533
+ console.log(`🚀 [xAI] Starting stream | Model: ${getModelName(specification)} | Messages: ${messages.length} | Tools: ${tools?.length || 0}`);
2534
+ }
2535
+ // xAI uses the same API as OpenAI, so we can reuse the OpenAI streaming logic
2536
+ return await streamWithOpenAI(specification, messages, tools, xaiClient, onEvent, onComplete, abortSignal);
2537
+ }
2538
+ catch (error) {
2539
+ // Handle xAI-specific errors if any
2540
+ const errorMessage = error.message || error.toString();
2541
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2542
+ console.log(`⚠️ [xAI] Error: ${errorMessage}`);
2543
+ }
2544
+ // Check for rate limit errors
2545
+ if (error.status === 429 || error.statusCode === 429) {
2546
+ if (process.env.DEBUG_GRAPHLIT_SDK_STREAMING) {
2547
+ console.log(`⚠️ [xAI] Rate limit hit (429)`);
2548
+ }
2549
+ // Re-throw with proper status code for retry logic
2550
+ const rateLimitError = new Error("xAI rate limit exceeded");
2551
+ rateLimitError.statusCode = 429;
2552
+ throw rateLimitError;
2553
+ }
2554
+ throw error;
2555
+ }
2556
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20250705001",
3
+ "version": "1.0.20250710002",
4
4
  "description": "Graphlit API Client for TypeScript",
5
5
  "type": "module",
6
6
  "main": "./dist/client.js",