graphlit-client 1.0.20241212003 → 1.0.20241213001

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2100,6 +2100,7 @@ exports.CompleteConversation = (0, graphql_tag_1.default) `
2100
2100
  formattedObservables
2101
2101
  formattedInstructions
2102
2102
  formattedTools
2103
+ specification
2103
2104
  messages {
2104
2105
  role
2105
2106
  author
@@ -2221,7 +2222,6 @@ exports.CompleteConversation = (0, graphql_tag_1.default) `
2221
2222
  modelService
2222
2223
  model
2223
2224
  }
2224
- assistantMessage
2225
2225
  }
2226
2226
  }
2227
2227
  }
@@ -2408,6 +2408,7 @@ exports.ContinueConversation = (0, graphql_tag_1.default) `
2408
2408
  formattedObservables
2409
2409
  formattedInstructions
2410
2410
  formattedTools
2411
+ specification
2411
2412
  messages {
2412
2413
  role
2413
2414
  author
@@ -2529,7 +2530,6 @@ exports.ContinueConversation = (0, graphql_tag_1.default) `
2529
2530
  modelService
2530
2531
  model
2531
2532
  }
2532
- assistantMessage
2533
2533
  }
2534
2534
  }
2535
2535
  }
@@ -2763,6 +2763,7 @@ exports.FormatConversation = (0, graphql_tag_1.default) `
2763
2763
  formattedObservables
2764
2764
  formattedInstructions
2765
2765
  formattedTools
2766
+ specification
2766
2767
  messages {
2767
2768
  role
2768
2769
  author
@@ -2884,7 +2885,6 @@ exports.FormatConversation = (0, graphql_tag_1.default) `
2884
2885
  modelService
2885
2886
  model
2886
2887
  }
2887
- assistantMessage
2888
2888
  }
2889
2889
  }
2890
2890
  }
@@ -3486,6 +3486,7 @@ exports.PromptConversation = (0, graphql_tag_1.default) `
3486
3486
  formattedObservables
3487
3487
  formattedInstructions
3488
3488
  formattedTools
3489
+ specification
3489
3490
  messages {
3490
3491
  role
3491
3492
  author
@@ -3607,7 +3608,6 @@ exports.PromptConversation = (0, graphql_tag_1.default) `
3607
3608
  modelService
3608
3609
  model
3609
3610
  }
3610
- assistantMessage
3611
3611
  }
3612
3612
  }
3613
3613
  }
@@ -7011,7 +7011,6 @@ exports.GetSpecification = (0, graphql_tag_1.default) `
7011
7011
  embedCitations
7012
7012
  flattenCitations
7013
7013
  enableFacets
7014
- disableGuardrails
7015
7014
  messagesWeight
7016
7015
  contentsWeight
7017
7016
  }
@@ -7051,6 +7050,7 @@ exports.GetSpecification = (0, graphql_tag_1.default) `
7051
7050
  completionTokenLimit
7052
7051
  model
7053
7052
  key
7053
+ endpoint
7054
7054
  modelName
7055
7055
  temperature
7056
7056
  probability
@@ -7316,7 +7316,6 @@ exports.QuerySpecifications = (0, graphql_tag_1.default) `
7316
7316
  embedCitations
7317
7317
  flattenCitations
7318
7318
  enableFacets
7319
- disableGuardrails
7320
7319
  messagesWeight
7321
7320
  contentsWeight
7322
7321
  }
@@ -7356,6 +7355,7 @@ exports.QuerySpecifications = (0, graphql_tag_1.default) `
7356
7355
  completionTokenLimit
7357
7356
  model
7358
7357
  key
7358
+ endpoint
7359
7359
  modelName
7360
7360
  temperature
7361
7361
  probability
@@ -1782,8 +1782,6 @@ export type ConversationCitation = {
1782
1782
  /** Represents the RAG pipeline details for a prompted conversation. */
1783
1783
  export type ConversationDetails = {
1784
1784
  __typename?: 'ConversationDetails';
1785
- /** The LLM assistant message, prior to parsing JSON guardrails. */
1786
- assistantMessage?: Maybe<Scalars['String']['output']>;
1787
1785
  /** The LLM completion token limit. */
1788
1786
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
1789
1787
  /** The formatted RAG instructions. */
@@ -1818,6 +1816,8 @@ export type ConversationDetails = {
1818
1816
  sourceCount?: Maybe<Scalars['Int']['output']>;
1819
1817
  /** JSON representation of the source to content mapping. */
1820
1818
  sources?: Maybe<Scalars['String']['output']>;
1819
+ /** JSON representation of the LLM specification. */
1820
+ specification?: Maybe<Scalars['String']['output']>;
1821
1821
  /** Whether the LLM supports tool calling. */
1822
1822
  supportsToolCalling?: Maybe<Scalars['Boolean']['output']>;
1823
1823
  /** The LLM prompt token limit. */
@@ -1933,8 +1933,6 @@ export type ConversationStrategy = {
1933
1933
  __typename?: 'ConversationStrategy';
1934
1934
  /** The weight of contents within prompt context, in range [0.0 - 1.0]. */
1935
1935
  contentsWeight?: Maybe<Scalars['Float']['output']>;
1936
- /** Disable JSON guardrails, if system prompt asks for non-JSON output format. */
1937
- disableGuardrails?: Maybe<Scalars['Boolean']['output']>;
1938
1936
  /** Embed content citations into completed converation messages. */
1939
1937
  embedCitations?: Maybe<Scalars['Boolean']['output']>;
1940
1938
  /** Provide content facets with completed conversation. */
@@ -1952,8 +1950,6 @@ export type ConversationStrategy = {
1952
1950
  export type ConversationStrategyInput = {
1953
1951
  /** The weight of contents within prompt context, in range [0.0 - 1.0]. */
1954
1952
  contentsWeight?: InputMaybe<Scalars['Float']['input']>;
1955
- /** Disable JSON guardrails, if system prompt asks for non-JSON output format. */
1956
- disableGuardrails?: InputMaybe<Scalars['Boolean']['input']>;
1957
1953
  /** Embed content citations into completed converation messages. */
1958
1954
  embedCitations?: InputMaybe<Scalars['Boolean']['input']>;
1959
1955
  /** Provide content facets with completed conversation. */
@@ -1978,8 +1974,6 @@ export declare enum ConversationStrategyTypes {
1978
1974
  export type ConversationStrategyUpdateInput = {
1979
1975
  /** The weight of contents within prompt context, in range [0.0 - 1.0]. */
1980
1976
  contentsWeight?: InputMaybe<Scalars['Float']['input']>;
1981
- /** Disable JSON guardrails, if system prompt asks for non-JSON output format. */
1982
- disableGuardrails?: InputMaybe<Scalars['Boolean']['input']>;
1983
1977
  /** Embed content citations into completed converation messages. */
1984
1978
  embedCitations?: InputMaybe<Scalars['Boolean']['input']>;
1985
1979
  /** Provide content facets with completed conversation. */
@@ -8059,11 +8053,13 @@ export type OpenAiModelProperties = {
8059
8053
  completionTokenLimit?: Maybe<Scalars['Int']['output']>;
8060
8054
  /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
8061
8055
  detailLevel?: Maybe<OpenAiVisionDetailLevels>;
8062
- /** The OpenAI API key, if using developer's own account. */
8056
+ /** The OpenAI-compatible API endpoint, if using developer's own account. */
8057
+ endpoint?: Maybe<Scalars['URL']['output']>;
8058
+ /** The OpenAI-compatible API key, if using developer's own account. */
8063
8059
  key?: Maybe<Scalars['String']['output']>;
8064
8060
  /** The OpenAI model, or custom, when using developer's own account. */
8065
8061
  model: OpenAiModels;
8066
- /** The OpenAI model name, if using developer's own account. */
8062
+ /** The OpenAI-compatible model name, if using developer's own account. */
8067
8063
  modelName?: Maybe<Scalars['String']['output']>;
8068
8064
  /** The model token probability. */
8069
8065
  probability?: Maybe<Scalars['Float']['output']>;
@@ -8080,11 +8076,13 @@ export type OpenAiModelPropertiesInput = {
8080
8076
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
8081
8077
  /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
8082
8078
  detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
8083
- /** The OpenAI API key, if using developer's own account. */
8079
+ /** The OpenAI-compatible API endpoint, if using developer's own account. */
8080
+ endpoint?: InputMaybe<Scalars['URL']['input']>;
8081
+ /** The OpenAI-compatible API key, if using developer's own account. */
8084
8082
  key?: InputMaybe<Scalars['String']['input']>;
8085
8083
  /** The OpenAI model, or custom, when using developer's own account. */
8086
8084
  model: OpenAiModels;
8087
- /** The OpenAI model name, if using developer's own account. */
8085
+ /** The OpenAI-compatible model name, if using developer's own account. */
8088
8086
  modelName?: InputMaybe<Scalars['String']['input']>;
8089
8087
  /** The model token probability. */
8090
8088
  probability?: InputMaybe<Scalars['Float']['input']>;
@@ -8101,11 +8099,13 @@ export type OpenAiModelPropertiesUpdateInput = {
8101
8099
  completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
8102
8100
  /** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
8103
8101
  detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
8104
- /** The OpenAI API key, if using developer's own account. */
8102
+ /** The OpenAI-compatible API endpoint, if using developer's own account. */
8103
+ endpoint?: InputMaybe<Scalars['URL']['input']>;
8104
+ /** The OpenAI-compatible API key, if using developer's own account. */
8105
8105
  key?: InputMaybe<Scalars['String']['input']>;
8106
8106
  /** The Azure OpenAI model, or custom, when using developer's own account. */
8107
8107
  model?: InputMaybe<OpenAiModels>;
8108
- /** The OpenAI model name, if using developer's own account. */
8108
+ /** The OpenAI-compatible model name, if using developer's own account. */
8109
8109
  modelName?: InputMaybe<Scalars['String']['input']>;
8110
8110
  /** The model token probability. */
8111
8111
  probability?: InputMaybe<Scalars['Float']['input']>;
@@ -14372,7 +14372,7 @@ export type CompleteConversationMutation = {
14372
14372
  formattedObservables?: string | null;
14373
14373
  formattedInstructions?: string | null;
14374
14374
  formattedTools?: string | null;
14375
- assistantMessage?: string | null;
14375
+ specification?: string | null;
14376
14376
  messages?: Array<{
14377
14377
  __typename?: 'ConversationMessage';
14378
14378
  role: ConversationRoleTypes;
@@ -14706,7 +14706,7 @@ export type ContinueConversationMutation = {
14706
14706
  formattedObservables?: string | null;
14707
14707
  formattedInstructions?: string | null;
14708
14708
  formattedTools?: string | null;
14709
- assistantMessage?: string | null;
14709
+ specification?: string | null;
14710
14710
  messages?: Array<{
14711
14711
  __typename?: 'ConversationMessage';
14712
14712
  role: ConversationRoleTypes;
@@ -15102,7 +15102,7 @@ export type FormatConversationMutation = {
15102
15102
  formattedObservables?: string | null;
15103
15103
  formattedInstructions?: string | null;
15104
15104
  formattedTools?: string | null;
15105
- assistantMessage?: string | null;
15105
+ specification?: string | null;
15106
15106
  messages?: Array<{
15107
15107
  __typename?: 'ConversationMessage';
15108
15108
  role: ConversationRoleTypes;
@@ -15916,7 +15916,7 @@ export type PromptConversationMutation = {
15916
15916
  formattedObservables?: string | null;
15917
15917
  formattedInstructions?: string | null;
15918
15918
  formattedTools?: string | null;
15919
- assistantMessage?: string | null;
15919
+ specification?: string | null;
15920
15920
  messages?: Array<{
15921
15921
  __typename?: 'ConversationMessage';
15922
15922
  role: ConversationRoleTypes;
@@ -20186,7 +20186,6 @@ export type GetSpecificationQuery = {
20186
20186
  embedCitations?: boolean | null;
20187
20187
  flattenCitations?: boolean | null;
20188
20188
  enableFacets?: boolean | null;
20189
- disableGuardrails?: boolean | null;
20190
20189
  messagesWeight?: number | null;
20191
20190
  contentsWeight?: number | null;
20192
20191
  } | null;
@@ -20233,6 +20232,7 @@ export type GetSpecificationQuery = {
20233
20232
  completionTokenLimit?: number | null;
20234
20233
  model: OpenAiModels;
20235
20234
  key?: string | null;
20235
+ endpoint?: any | null;
20236
20236
  modelName?: string | null;
20237
20237
  temperature?: number | null;
20238
20238
  probability?: number | null;
@@ -20528,7 +20528,6 @@ export type QuerySpecificationsQuery = {
20528
20528
  embedCitations?: boolean | null;
20529
20529
  flattenCitations?: boolean | null;
20530
20530
  enableFacets?: boolean | null;
20531
- disableGuardrails?: boolean | null;
20532
20531
  messagesWeight?: number | null;
20533
20532
  contentsWeight?: number | null;
20534
20533
  } | null;
@@ -20575,6 +20574,7 @@ export type QuerySpecificationsQuery = {
20575
20574
  completionTokenLimit?: number | null;
20576
20575
  model: OpenAiModels;
20577
20576
  key?: string | null;
20577
+ endpoint?: any | null;
20578
20578
  modelName?: string | null;
20579
20579
  temperature?: number | null;
20580
20580
  probability?: number | null;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "graphlit-client",
3
- "version": "1.0.20241212003",
3
+ "version": "1.0.20241213001",
4
4
  "description": "Graphlit API TypeScript Client",
5
5
  "main": "dist/client.js",
6
6
  "types": "dist/client.d.ts",