@wildix/wim-knowledge-base-client 0.0.14 → 0.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -998,6 +998,7 @@ const se_DataSourceConfig = (input, context) => {
998
998
  };
999
999
  const se_LlmConfig = (input, context) => {
1000
1000
  return (0, smithy_client_1.take)(input, {
1001
+ 'maxOutputTokens': [],
1001
1002
  'model': [],
1002
1003
  'provider': [],
1003
1004
  'systemPrompt': [],
@@ -942,6 +942,7 @@ const se_DataSourceConfig = (input, context) => {
942
942
  };
943
943
  const se_LlmConfig = (input, context) => {
944
944
  return take(input, {
945
+ 'maxOutputTokens': [],
945
946
  'model': [],
946
947
  'provider': [],
947
948
  'systemPrompt': [],
@@ -44,9 +44,10 @@ declare const QueryKnowledgeBaseCommand_base: {
44
44
  * searchStrategy: "bm25" || "vector" || "hybrid",
45
45
  * },
46
46
  * llmConfig: { // LlmConfig
47
- * provider: "STRING_VALUE", // required
48
- * model: "STRING_VALUE", // required
47
+ * provider: "STRING_VALUE",
48
+ * model: "STRING_VALUE",
49
49
  * temperature: Number("double"),
50
+ * maxOutputTokens: Number("int"),
50
51
  * systemPrompt: "STRING_VALUE",
51
52
  * },
52
53
  * };
@@ -963,17 +963,22 @@ export interface LlmConfig {
963
963
  * The LLM provider to use for the answer. Example: openai, anthropic, google, etc.
964
964
  * @public
965
965
  */
966
- provider: string;
966
+ provider?: string | undefined;
967
967
  /**
968
968
  * The model to use for the answer. Example: gpt-4o, gpt-4o-mini, etc.
969
969
  * @public
970
970
  */
971
- model: string;
971
+ model?: string | undefined;
972
972
  /**
973
- * The temperature to use for the answer. Example: 0.5
973
+ * The temperature to use for the answer. The lower the temperature, the more deterministic the answer will be.
974
974
  * @public
975
975
  */
976
976
  temperature?: number | undefined;
977
+ /**
978
+ * The max output tokens to use for the answer. The bigger the number, the more detailed the answer will be.
979
+ * @public
980
+ */
981
+ maxOutputTokens?: number | undefined;
977
982
  /**
978
983
  * The system prompt to use for the answer. Example: You are a helpful assistant that can answer questions about the knowledge base.
979
984
  * @public
@@ -1003,12 +1008,12 @@ export interface SearchConfig {
1003
1008
  */
1004
1009
  topK?: number | undefined;
1005
1010
  /**
1006
- * The threshold for the results. Example: 0.5
1011
+ * The threshold for the search results. Example: 0.5. The bigger the threshold, the less results will be returned.
1007
1012
  * @public
1008
1013
  */
1009
1014
  threshold?: number | undefined;
1010
1015
  /**
1011
- * The search strategy to use. Example: hybrid
1016
+ * The search strategy to use. Example: hybrid. The hybrid strategy will use the vector search and the text search to return the results.
1012
1017
  * @public
1013
1018
  */
1014
1019
  searchStrategy?: SearchStrategy | undefined;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@wildix/wim-knowledge-base-client",
3
3
  "description": "@wildix/wim-knowledge-base-client client",
4
- "version": "0.0.14",
4
+ "version": "0.0.16",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",