graphlit-client 1.0.20241212002 → 1.0.20241212004
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2221,7 +2221,6 @@ exports.CompleteConversation = (0, graphql_tag_1.default) `
|
|
2221
2221
|
modelService
|
2222
2222
|
model
|
2223
2223
|
}
|
2224
|
-
assistantMessage
|
2225
2224
|
}
|
2226
2225
|
}
|
2227
2226
|
}
|
@@ -2529,7 +2528,6 @@ exports.ContinueConversation = (0, graphql_tag_1.default) `
|
|
2529
2528
|
modelService
|
2530
2529
|
model
|
2531
2530
|
}
|
2532
|
-
assistantMessage
|
2533
2531
|
}
|
2534
2532
|
}
|
2535
2533
|
}
|
@@ -2884,7 +2882,6 @@ exports.FormatConversation = (0, graphql_tag_1.default) `
|
|
2884
2882
|
modelService
|
2885
2883
|
model
|
2886
2884
|
}
|
2887
|
-
assistantMessage
|
2888
2885
|
}
|
2889
2886
|
}
|
2890
2887
|
}
|
@@ -3607,7 +3604,6 @@ exports.PromptConversation = (0, graphql_tag_1.default) `
|
|
3607
3604
|
modelService
|
3608
3605
|
model
|
3609
3606
|
}
|
3610
|
-
assistantMessage
|
3611
3607
|
}
|
3612
3608
|
}
|
3613
3609
|
}
|
@@ -7051,6 +7047,7 @@ exports.GetSpecification = (0, graphql_tag_1.default) `
|
|
7051
7047
|
completionTokenLimit
|
7052
7048
|
model
|
7053
7049
|
key
|
7050
|
+
endpoint
|
7054
7051
|
modelName
|
7055
7052
|
temperature
|
7056
7053
|
probability
|
@@ -7356,6 +7353,7 @@ exports.QuerySpecifications = (0, graphql_tag_1.default) `
|
|
7356
7353
|
completionTokenLimit
|
7357
7354
|
model
|
7358
7355
|
key
|
7356
|
+
endpoint
|
7359
7357
|
modelName
|
7360
7358
|
temperature
|
7361
7359
|
probability
|
@@ -1782,8 +1782,6 @@ export type ConversationCitation = {
|
|
1782
1782
|
/** Represents the RAG pipeline details for a prompted conversation. */
|
1783
1783
|
export type ConversationDetails = {
|
1784
1784
|
__typename?: 'ConversationDetails';
|
1785
|
-
/** The LLM assistant message, prior to parsing JSON guardrails. */
|
1786
|
-
assistantMessage?: Maybe<Scalars['String']['output']>;
|
1787
1785
|
/** The LLM completion token limit. */
|
1788
1786
|
completionTokenLimit?: Maybe<Scalars['Int']['output']>;
|
1789
1787
|
/** The formatted RAG instructions. */
|
@@ -8059,11 +8057,13 @@ export type OpenAiModelProperties = {
|
|
8059
8057
|
completionTokenLimit?: Maybe<Scalars['Int']['output']>;
|
8060
8058
|
/** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
|
8061
8059
|
detailLevel?: Maybe<OpenAiVisionDetailLevels>;
|
8062
|
-
/** The OpenAI API
|
8060
|
+
/** The OpenAI-compatible API endpoint, if using developer's own account. */
|
8061
|
+
endpoint?: Maybe<Scalars['URL']['output']>;
|
8062
|
+
/** The OpenAI-compatible API key, if using developer's own account. */
|
8063
8063
|
key?: Maybe<Scalars['String']['output']>;
|
8064
8064
|
/** The OpenAI model, or custom, when using developer's own account. */
|
8065
8065
|
model: OpenAiModels;
|
8066
|
-
/** The OpenAI model name, if using developer's own account. */
|
8066
|
+
/** The OpenAI-compatible model name, if using developer's own account. */
|
8067
8067
|
modelName?: Maybe<Scalars['String']['output']>;
|
8068
8068
|
/** The model token probability. */
|
8069
8069
|
probability?: Maybe<Scalars['Float']['output']>;
|
@@ -8080,11 +8080,13 @@ export type OpenAiModelPropertiesInput = {
|
|
8080
8080
|
completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
8081
8081
|
/** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
|
8082
8082
|
detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
|
8083
|
-
/** The OpenAI API
|
8083
|
+
/** The OpenAI-compatible API endpoint, if using developer's own account. */
|
8084
|
+
endpoint?: InputMaybe<Scalars['URL']['input']>;
|
8085
|
+
/** The OpenAI-compatible API key, if using developer's own account. */
|
8084
8086
|
key?: InputMaybe<Scalars['String']['input']>;
|
8085
8087
|
/** The OpenAI model, or custom, when using developer's own account. */
|
8086
8088
|
model: OpenAiModels;
|
8087
|
-
/** The OpenAI model name, if using developer's own account. */
|
8089
|
+
/** The OpenAI-compatible model name, if using developer's own account. */
|
8088
8090
|
modelName?: InputMaybe<Scalars['String']['input']>;
|
8089
8091
|
/** The model token probability. */
|
8090
8092
|
probability?: InputMaybe<Scalars['Float']['input']>;
|
@@ -8101,11 +8103,13 @@ export type OpenAiModelPropertiesUpdateInput = {
|
|
8101
8103
|
completionTokenLimit?: InputMaybe<Scalars['Int']['input']>;
|
8102
8104
|
/** The OpenAI vision detail mode. Only applies when using OpenAI for image completion. */
|
8103
8105
|
detailLevel?: InputMaybe<OpenAiVisionDetailLevels>;
|
8104
|
-
/** The OpenAI API
|
8106
|
+
/** The OpenAI-compatible API endpoint, if using developer's own account. */
|
8107
|
+
endpoint?: InputMaybe<Scalars['URL']['input']>;
|
8108
|
+
/** The OpenAI-compatible API key, if using developer's own account. */
|
8105
8109
|
key?: InputMaybe<Scalars['String']['input']>;
|
8106
8110
|
/** The Azure OpenAI model, or custom, when using developer's own account. */
|
8107
8111
|
model?: InputMaybe<OpenAiModels>;
|
8108
|
-
/** The OpenAI model name, if using developer's own account. */
|
8112
|
+
/** The OpenAI-compatible model name, if using developer's own account. */
|
8109
8113
|
modelName?: InputMaybe<Scalars['String']['input']>;
|
8110
8114
|
/** The model token probability. */
|
8111
8115
|
probability?: InputMaybe<Scalars['Float']['input']>;
|
@@ -14372,7 +14376,6 @@ export type CompleteConversationMutation = {
|
|
14372
14376
|
formattedObservables?: string | null;
|
14373
14377
|
formattedInstructions?: string | null;
|
14374
14378
|
formattedTools?: string | null;
|
14375
|
-
assistantMessage?: string | null;
|
14376
14379
|
messages?: Array<{
|
14377
14380
|
__typename?: 'ConversationMessage';
|
14378
14381
|
role: ConversationRoleTypes;
|
@@ -14706,7 +14709,6 @@ export type ContinueConversationMutation = {
|
|
14706
14709
|
formattedObservables?: string | null;
|
14707
14710
|
formattedInstructions?: string | null;
|
14708
14711
|
formattedTools?: string | null;
|
14709
|
-
assistantMessage?: string | null;
|
14710
14712
|
messages?: Array<{
|
14711
14713
|
__typename?: 'ConversationMessage';
|
14712
14714
|
role: ConversationRoleTypes;
|
@@ -15102,7 +15104,6 @@ export type FormatConversationMutation = {
|
|
15102
15104
|
formattedObservables?: string | null;
|
15103
15105
|
formattedInstructions?: string | null;
|
15104
15106
|
formattedTools?: string | null;
|
15105
|
-
assistantMessage?: string | null;
|
15106
15107
|
messages?: Array<{
|
15107
15108
|
__typename?: 'ConversationMessage';
|
15108
15109
|
role: ConversationRoleTypes;
|
@@ -15916,7 +15917,6 @@ export type PromptConversationMutation = {
|
|
15916
15917
|
formattedObservables?: string | null;
|
15917
15918
|
formattedInstructions?: string | null;
|
15918
15919
|
formattedTools?: string | null;
|
15919
|
-
assistantMessage?: string | null;
|
15920
15920
|
messages?: Array<{
|
15921
15921
|
__typename?: 'ConversationMessage';
|
15922
15922
|
role: ConversationRoleTypes;
|
@@ -20233,6 +20233,7 @@ export type GetSpecificationQuery = {
|
|
20233
20233
|
completionTokenLimit?: number | null;
|
20234
20234
|
model: OpenAiModels;
|
20235
20235
|
key?: string | null;
|
20236
|
+
endpoint?: any | null;
|
20236
20237
|
modelName?: string | null;
|
20237
20238
|
temperature?: number | null;
|
20238
20239
|
probability?: number | null;
|
@@ -20575,6 +20576,7 @@ export type QuerySpecificationsQuery = {
|
|
20575
20576
|
completionTokenLimit?: number | null;
|
20576
20577
|
model: OpenAiModels;
|
20577
20578
|
key?: string | null;
|
20579
|
+
endpoint?: any | null;
|
20578
20580
|
modelName?: string | null;
|
20579
20581
|
temperature?: number | null;
|
20580
20582
|
probability?: number | null;
|