@wildix/wim-knowledge-base-client 0.0.44 → 0.0.46

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -382,11 +382,11 @@ const se_QueryKnowledgeBaseCommand = async (input, context) => {
382
382
  b.p('knowledgeBaseId', () => input.knowledgeBaseId, '{knowledgeBaseId}', false);
383
383
  const query = (0, smithy_client_1.map)({
384
384
  [_c]: [, input[_cI]],
385
- [_q]: [, (0, smithy_client_1.expectNonNull)(input[_q], `query`)],
386
385
  });
387
386
  let body;
388
387
  body = JSON.stringify((0, smithy_client_1.take)(input, {
389
388
  'llmConfig': _ => se_LlmConfig(_, context),
389
+ 'query': [],
390
390
  'searchConfig': _ => se_SearchConfig(_, context),
391
391
  }));
392
392
  b.m("POST")
@@ -431,10 +431,10 @@ const se_SearchKnowledgeBaseCommand = async (input, context) => {
431
431
  b.p('knowledgeBaseId', () => input.knowledgeBaseId, '{knowledgeBaseId}', false);
432
432
  const query = (0, smithy_client_1.map)({
433
433
  [_c]: [, input[_cI]],
434
- [_q]: [, (0, smithy_client_1.expectNonNull)(input[_q], `query`)],
435
434
  });
436
435
  let body;
437
436
  body = JSON.stringify((0, smithy_client_1.take)(input, {
437
+ 'query': [],
438
438
  'searchConfig': _ => se_SearchConfig(_, context),
439
439
  'withMetadata': [],
440
440
  }));
@@ -1091,7 +1091,6 @@ const se_LlmConfig = (input, context) => {
1091
1091
  return (0, smithy_client_1.take)(input, {
1092
1092
  'maxOutputTokens': [],
1093
1093
  'model': [],
1094
- 'provider': [],
1095
1094
  'systemPrompt': [],
1096
1095
  'temperature': smithy_client_1.serializeFloat,
1097
1096
  });
@@ -1197,14 +1196,12 @@ const de_SearchHistoryResults = (output, context) => {
1197
1196
  };
1198
1197
  const de_SearchKnowledgeBaseResult = (output, context) => {
1199
1198
  return (0, smithy_client_1.take)(output, {
1200
- 'bm25Score': smithy_client_1.limitedParseDouble,
1201
1199
  'content': smithy_client_1.expectString,
1202
1200
  'dataSourceId': smithy_client_1.expectString,
1203
1201
  'dataSourceType': smithy_client_1.expectString,
1204
1202
  'documentId': smithy_client_1.expectString,
1205
1203
  'documentTitle': smithy_client_1.expectString,
1206
1204
  'documentUrl': smithy_client_1.expectString,
1207
- 'knnScore': smithy_client_1.limitedParseDouble,
1208
1205
  'score': smithy_client_1.limitedParseDouble,
1209
1206
  });
1210
1207
  };
@@ -1227,7 +1224,6 @@ const _dT = "documentType";
1227
1224
  const _eSI = "externalSessionId";
1228
1225
  const _l = "limit";
1229
1226
  const _o = "offset";
1230
- const _q = "query";
1231
1227
  const _s = "sort";
1232
1228
  const _sT = "syncType";
1233
1229
  const _sU = "searchUser";
@@ -358,11 +358,11 @@ export const se_QueryKnowledgeBaseCommand = async (input, context) => {
358
358
  b.p('knowledgeBaseId', () => input.knowledgeBaseId, '{knowledgeBaseId}', false);
359
359
  const query = map({
360
360
  [_c]: [, input[_cI]],
361
- [_q]: [, __expectNonNull(input[_q], `query`)],
362
361
  });
363
362
  let body;
364
363
  body = JSON.stringify(take(input, {
365
364
  'llmConfig': _ => se_LlmConfig(_, context),
365
+ 'query': [],
366
366
  'searchConfig': _ => se_SearchConfig(_, context),
367
367
  }));
368
368
  b.m("POST")
@@ -405,10 +405,10 @@ export const se_SearchKnowledgeBaseCommand = async (input, context) => {
405
405
  b.p('knowledgeBaseId', () => input.knowledgeBaseId, '{knowledgeBaseId}', false);
406
406
  const query = map({
407
407
  [_c]: [, input[_cI]],
408
- [_q]: [, __expectNonNull(input[_q], `query`)],
409
408
  });
410
409
  let body;
411
410
  body = JSON.stringify(take(input, {
411
+ 'query': [],
412
412
  'searchConfig': _ => se_SearchConfig(_, context),
413
413
  'withMetadata': [],
414
414
  }));
@@ -1031,7 +1031,6 @@ const se_LlmConfig = (input, context) => {
1031
1031
  return take(input, {
1032
1032
  'maxOutputTokens': [],
1033
1033
  'model': [],
1034
- 'provider': [],
1035
1034
  'systemPrompt': [],
1036
1035
  'temperature': __serializeFloat,
1037
1036
  });
@@ -1137,14 +1136,12 @@ const de_SearchHistoryResults = (output, context) => {
1137
1136
  };
1138
1137
  const de_SearchKnowledgeBaseResult = (output, context) => {
1139
1138
  return take(output, {
1140
- 'bm25Score': __limitedParseDouble,
1141
1139
  'content': __expectString,
1142
1140
  'dataSourceId': __expectString,
1143
1141
  'dataSourceType': __expectString,
1144
1142
  'documentId': __expectString,
1145
1143
  'documentTitle': __expectString,
1146
1144
  'documentUrl': __expectString,
1147
- 'knnScore': __limitedParseDouble,
1148
1145
  'score': __limitedParseDouble,
1149
1146
  });
1150
1147
  };
@@ -1167,7 +1164,6 @@ const _dT = "documentType";
1167
1164
  const _eSI = "externalSessionId";
1168
1165
  const _l = "limit";
1169
1166
  const _o = "offset";
1170
- const _q = "query";
1171
1167
  const _s = "sort";
1172
1168
  const _sT = "syncType";
1173
1169
  const _sU = "searchUser";
@@ -46,7 +46,6 @@ declare const QueryKnowledgeBaseCommand_base: {
46
46
  * extendMetadata: true || false,
47
47
  * },
48
48
  * llmConfig: { // LlmConfig
49
- * provider: "STRING_VALUE",
50
49
  * model: "STRING_VALUE",
51
50
  * temperature: Number("double"),
52
51
  * maxOutputTokens: Number("int"),
@@ -61,8 +61,6 @@ declare const SearchKnowledgeBaseCommand_base: {
61
61
  * // dataSourceType: "files" || "confluence" || "gdrive" || "proxy", // required
62
62
  * // content: "STRING_VALUE", // required
63
63
  * // score: Number("double"), // required
64
- * // knnScore: Number("double"), // required
65
- * // bm25Score: Number("double"), // required
66
64
  * // },
67
65
  * // ],
68
66
  * // metadata: "DOCUMENT_VALUE",
@@ -1376,12 +1376,7 @@ export interface ListKnowledgeBasesOutput {
1376
1376
  */
1377
1377
  export interface LlmConfig {
1378
1378
  /**
1379
- * The LLM provider to use for generating answers. Common values: 'openai', 'anthropic', 'google', 'aws', 'azure', etc. Must match a configured provider in your environment
1380
- * @public
1381
- */
1382
- provider?: string | undefined;
1383
- /**
1384
- * The specific model identifier from the selected provider. Examples: 'gpt-4o' or 'gpt-4o-mini' for OpenAI, 'claude-3-opus' for Anthropic. Check provider documentation for available models
1379
+ * The specific provider and model identifier. Examples: 'openai/gpt-4o' or 'openai/gpt-4o-mini' for OpenAI, 'mistral/mistral-small-2506' for Mistral. Check provider documentation for available models
1385
1380
  * @public
1386
1381
  */
1387
1382
  model?: string | undefined;
@@ -1635,16 +1630,6 @@ export interface SearchKnowledgeBaseResult {
1635
1630
  * @public
1636
1631
  */
1637
1632
  score: number;
1638
- /**
1639
- * The score of semantic search.
1640
- * @public
1641
- */
1642
- knnScore: number;
1643
- /**
1644
- * The score of the result from the full-text search.
1645
- * @public
1646
- */
1647
- bm25Score: number;
1648
1633
  }
1649
1634
  /**
1650
1635
  * @public
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@wildix/wim-knowledge-base-client",
3
3
  "description": "@wildix/wim-knowledge-base-client client",
4
- "version": "0.0.44",
4
+ "version": "0.0.46",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",