exa-js 1.8.15 → 1.8.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -80,6 +80,15 @@ const answerResult = await exa.answer(
80
80
  "What is the population of New York City?"
81
81
  );
82
82
 
83
+ // Get answer with citation contents and use the exa-pro model, which passes 2 extra queries to exa to increase coverage of the search space.
84
+ const answerWithTextResults = await exa.answer(
85
+ "What is the population of New York City?",
86
+ {
87
+ text: true,
88
+ model: "exa-pro",
89
+ }
90
+ );
91
+
83
92
  // Get an answer with streaming
84
93
  for await (const chunk of exa.streamAnswer(
85
94
  "What is the population of New York City?"
package/dist/index.d.mts CHANGED
@@ -3255,14 +3255,14 @@ type Status = {
3255
3255
  * @typedef {Object} AnswerOptions
3256
3256
  * @property {boolean} [stream] - Whether to stream the response. Default false.
3257
3257
  * @property {boolean} [text] - Whether to include text in the source results. Default false.
3258
- * @property {"exa"} [model] - The model to use for generating the answer. Default "exa".
3258
+ * @property {"exa" | "exa-pro"} [model] - The model to use for generating the answer. Default "exa".
3259
3259
  * @property {string} [systemPrompt] - A system prompt to guide the LLM's behavior when generating the answer.
3260
3260
  * @property {Object} [outputSchema] - A JSON Schema specification for the structure you expect the output to take
3261
3261
  */
3262
3262
  type AnswerOptions = {
3263
3263
  stream?: boolean;
3264
3264
  text?: boolean;
3265
- model?: "exa";
3265
+ model?: "exa" | "exa-pro";
3266
3266
  systemPrompt?: string;
3267
3267
  outputSchema?: Record<string, unknown>;
3268
3268
  };
@@ -3389,7 +3389,7 @@ declare class Exa {
3389
3389
  * ```ts
3390
3390
  * const answer = await exa.answer("What is quantum computing?", {
3391
3391
  * text: true,
3392
- * model: "exa",
3392
+ * model: "exa-pro",
3393
3393
  * systemPrompt: "Answer in a technical manner suitable for experts."
3394
3394
  * });
3395
3395
  * ```
package/dist/index.d.ts CHANGED
@@ -3255,14 +3255,14 @@ type Status = {
3255
3255
  * @typedef {Object} AnswerOptions
3256
3256
  * @property {boolean} [stream] - Whether to stream the response. Default false.
3257
3257
  * @property {boolean} [text] - Whether to include text in the source results. Default false.
3258
- * @property {"exa"} [model] - The model to use for generating the answer. Default "exa".
3258
+ * @property {"exa" | "exa-pro"} [model] - The model to use for generating the answer. Default "exa".
3259
3259
  * @property {string} [systemPrompt] - A system prompt to guide the LLM's behavior when generating the answer.
3260
3260
  * @property {Object} [outputSchema] - A JSON Schema specification for the structure you expect the output to take
3261
3261
  */
3262
3262
  type AnswerOptions = {
3263
3263
  stream?: boolean;
3264
3264
  text?: boolean;
3265
- model?: "exa";
3265
+ model?: "exa" | "exa-pro";
3266
3266
  systemPrompt?: string;
3267
3267
  outputSchema?: Record<string, unknown>;
3268
3268
  };
@@ -3389,7 +3389,7 @@ declare class Exa {
3389
3389
  * ```ts
3390
3390
  * const answer = await exa.answer("What is quantum computing?", {
3391
3391
  * text: true,
3392
- * model: "exa",
3392
+ * model: "exa-pro",
3393
3393
  * systemPrompt: "Answer in a technical manner suitable for experts."
3394
3394
  * });
3395
3395
  * ```
package/dist/index.js CHANGED
@@ -1436,7 +1436,7 @@ var Exa2 = class {
1436
1436
  * ```ts
1437
1437
  * const answer = await exa.answer("What is quantum computing?", {
1438
1438
  * text: true,
1439
- * model: "exa",
1439
+ * model: "exa-pro",
1440
1440
  * systemPrompt: "Answer in a technical manner suitable for experts."
1441
1441
  * });
1442
1442
  * ```