@wildix/wim-knowledge-base-client 0.0.14 → 0.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -206,6 +206,7 @@ const se_GetDocumentCommand = async (input, context) => {
206
206
  b.p('documentId', () => input.documentId, '{documentId}', false);
207
207
  const query = (0, smithy_client_1.map)({
208
208
  [_c]: [, input[_cI]],
209
+ [_wC]: [() => input.withChunks !== void 0, () => (input[_wC].toString())],
209
210
  });
210
211
  let body;
211
212
  b.m("GET")
@@ -643,6 +644,7 @@ const de_GetDocumentCommand = async (output, context) => {
643
644
  });
644
645
  const data = (0, smithy_client_1.expectNonNull)(((0, smithy_client_1.expectObject)(await (0, core_1.parseJsonBody)(output.body, context))), "body");
645
646
  const doc = (0, smithy_client_1.take)(data, {
647
+ 'chunks': smithy_client_1._json,
646
648
  'document': smithy_client_1._json,
647
649
  });
648
650
  Object.assign(contents, doc);
@@ -998,6 +1000,7 @@ const se_DataSourceConfig = (input, context) => {
998
1000
  };
999
1001
  const se_LlmConfig = (input, context) => {
1000
1002
  return (0, smithy_client_1.take)(input, {
1003
+ 'maxOutputTokens': [],
1001
1004
  'model': [],
1002
1005
  'provider': [],
1003
1006
  'systemPrompt': [],
@@ -1118,3 +1121,4 @@ const _l = "limit";
1118
1121
  const _o = "offset";
1119
1122
  const _q = "query";
1120
1123
  const _sT = "syncType";
1124
+ const _wC = "withChunks";
@@ -192,6 +192,7 @@ export const se_GetDocumentCommand = async (input, context) => {
192
192
  b.p('documentId', () => input.documentId, '{documentId}', false);
193
193
  const query = map({
194
194
  [_c]: [, input[_cI]],
195
+ [_wC]: [() => input.withChunks !== void 0, () => (input[_wC].toString())],
195
196
  });
196
197
  let body;
197
198
  b.m("GET")
@@ -603,6 +604,7 @@ export const de_GetDocumentCommand = async (output, context) => {
603
604
  });
604
605
  const data = __expectNonNull((__expectObject(await parseBody(output.body, context))), "body");
605
606
  const doc = take(data, {
607
+ 'chunks': _json,
606
608
  'document': _json,
607
609
  });
608
610
  Object.assign(contents, doc);
@@ -942,6 +944,7 @@ const se_DataSourceConfig = (input, context) => {
942
944
  };
943
945
  const se_LlmConfig = (input, context) => {
944
946
  return take(input, {
947
+ 'maxOutputTokens': [],
945
948
  'model': [],
946
949
  'provider': [],
947
950
  'systemPrompt': [],
@@ -1062,3 +1065,4 @@ const _l = "limit";
1062
1065
  const _o = "offset";
1063
1066
  const _q = "query";
1064
1067
  const _sT = "syncType";
1068
+ const _wC = "withChunks";
@@ -38,6 +38,7 @@ declare const GetDocumentCommand_base: {
38
38
  * companyId: "STRING_VALUE",
39
39
  * dataSourceId: "STRING_VALUE", // required
40
40
  * documentId: "STRING_VALUE", // required
41
+ * withChunks: true || false,
41
42
  * };
42
43
  * const command = new GetDocumentCommand(input);
43
44
  * const response = await client.send(command);
@@ -59,6 +60,15 @@ declare const GetDocumentCommand_base: {
59
60
  * // status: "draft" || "pending" || "processing" || "completed" || "failed", // required
60
61
  * // errorMessage: "STRING_VALUE",
61
62
  * // },
63
+ * // chunks: [ // ChunksList
64
+ * // { // ChunkItem
65
+ * // id: "STRING_VALUE", // required
66
+ * // documentId: "STRING_VALUE", // required
67
+ * // content: "STRING_VALUE", // required
68
+ * // createdAt: "STRING_VALUE", // required
69
+ * // updatedAt: "STRING_VALUE", // required
70
+ * // },
71
+ * // ],
62
72
  * // };
63
73
  *
64
74
  * ```
@@ -44,9 +44,10 @@ declare const QueryKnowledgeBaseCommand_base: {
44
44
  * searchStrategy: "bm25" || "vector" || "hybrid",
45
45
  * },
46
46
  * llmConfig: { // LlmConfig
47
- * provider: "STRING_VALUE", // required
48
- * model: "STRING_VALUE", // required
47
+ * provider: "STRING_VALUE",
48
+ * model: "STRING_VALUE",
49
49
  * temperature: Number("double"),
50
+ * maxOutputTokens: Number("int"),
50
51
  * systemPrompt: "STRING_VALUE",
51
52
  * },
52
53
  * };
@@ -34,6 +34,24 @@ export declare class UnauthorizedException extends __BaseException {
34
34
  */
35
35
  constructor(opts: __ExceptionOptionType<UnauthorizedException, __BaseException>);
36
36
  }
37
+ /**
38
+ * @public
39
+ */
40
+ export interface ChunkItem {
41
+ /**
42
+ * The ID of the chunk. Example: 123e4567-e89b-12d3-a456-426614174000
43
+ * @public
44
+ */
45
+ id: string;
46
+ /**
47
+ * The ID of the document. Example: 123e4567-e89b-12d3-a456-426614174000
48
+ * @public
49
+ */
50
+ documentId: string;
51
+ content: string;
52
+ createdAt: string;
53
+ updatedAt: string;
54
+ }
37
55
  /**
38
56
  * @public
39
57
  */
@@ -732,12 +750,14 @@ export interface GetDocumentInput {
732
750
  * @public
733
751
  */
734
752
  documentId: string;
753
+ withChunks?: boolean | undefined;
735
754
  }
736
755
  /**
737
756
  * @public
738
757
  */
739
758
  export interface GetDocumentOutput {
740
759
  document: DocumentItem;
760
+ chunks?: (ChunkItem)[] | undefined;
741
761
  }
742
762
  /**
743
763
  * @public
@@ -963,17 +983,22 @@ export interface LlmConfig {
963
983
  * The LLM provider to use for the answer. Example: openai, anthropic, google, etc.
964
984
  * @public
965
985
  */
966
- provider: string;
986
+ provider?: string | undefined;
967
987
  /**
968
988
  * The model to use for the answer. Example: gpt-4o, gpt-4o-mini, etc.
969
989
  * @public
970
990
  */
971
- model: string;
991
+ model?: string | undefined;
972
992
  /**
973
- * The temperature to use for the answer. Example: 0.5
993
+ * The temperature to use for the answer. The lower the temperature, the more deterministic the answer will be.
974
994
  * @public
975
995
  */
976
996
  temperature?: number | undefined;
997
+ /**
998
+ * The max output tokens to use for the answer. The bigger the number, the more detailed the answer will be.
999
+ * @public
1000
+ */
1001
+ maxOutputTokens?: number | undefined;
977
1002
  /**
978
1003
  * The system prompt to use for the answer. Example: You are a helpful assistant that can answer questions about the knowledge base.
979
1004
  * @public
@@ -1003,12 +1028,12 @@ export interface SearchConfig {
1003
1028
  */
1004
1029
  topK?: number | undefined;
1005
1030
  /**
1006
- * The threshold for the results. Example: 0.5
1031
+ * The threshold for the search results. Example: 0.5. The bigger the threshold, the less results will be returned.
1007
1032
  * @public
1008
1033
  */
1009
1034
  threshold?: number | undefined;
1010
1035
  /**
1011
- * The search strategy to use. Example: hybrid
1036
+ * The search strategy to use. Example: hybrid. The hybrid strategy will use the vector search and the text search to return the results.
1012
1037
  * @public
1013
1038
  */
1014
1039
  searchStrategy?: SearchStrategy | undefined;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@wildix/wim-knowledge-base-client",
3
3
  "description": "@wildix/wim-knowledge-base-client client",
4
- "version": "0.0.14",
4
+ "version": "0.0.17",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",