@mastra/core 0.2.0-alpha.93 → 0.2.0-alpha.95

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. package/README.md +11 -8
  2. package/dist/action/index.d.ts +1 -3
  3. package/dist/agent/index.d.ts +2 -4
  4. package/dist/agent/index.js +3 -5
  5. package/dist/{chunk-SDKEPBBH.js → chunk-4ZUSEHLH.js} +109 -12
  6. package/dist/{chunk-F7ILHRX5.js → chunk-5DZIXJRV.js} +1 -1
  7. package/dist/chunk-AE3H2QEY.js +1 -0
  8. package/dist/{chunk-IE37CBXB.js → chunk-G4LP2IJU.js} +1 -3
  9. package/dist/{chunk-JWVCENG2.js → chunk-JY322VLW.js} +445 -32
  10. package/dist/{chunk-ECUVL2G5.js → chunk-K3N7KJHH.js} +1 -1
  11. package/dist/chunk-MBOUQZQT.js +16 -0
  12. package/dist/{chunk-GGYXCZUW.js → chunk-N44BCOWP.js} +4 -12
  13. package/dist/{chunk-WBPDZBUT.js → chunk-TYIBRZOY.js} +2 -17
  14. package/dist/eval/index.d.ts +1 -3
  15. package/dist/index-CBxGSZZE.d.ts +808 -0
  16. package/dist/index.d.ts +6 -12
  17. package/dist/index.js +12 -26
  18. package/dist/integration/index.d.ts +5 -7
  19. package/dist/llm/index.d.ts +4 -6
  20. package/dist/llm/index.js +1 -6
  21. package/dist/mastra/index.d.ts +2 -5
  22. package/dist/mastra/index.js +2 -6
  23. package/dist/memory/index.d.ts +1 -3
  24. package/dist/memory/index.js +1 -1
  25. package/dist/relevance/index.d.ts +2 -19
  26. package/dist/relevance/index.js +4 -6
  27. package/dist/storage/index.d.ts +2 -4
  28. package/dist/storage/index.js +2 -3
  29. package/dist/telemetry/index.js +1 -2
  30. package/dist/tools/index.d.ts +2 -4
  31. package/dist/tts/index.js +2 -3
  32. package/dist/vector/index.d.ts +0 -7
  33. package/dist/vector/index.js +1 -2
  34. package/dist/vector/libsql/index.d.ts +0 -1
  35. package/dist/vector/libsql/index.js +2 -3
  36. package/dist/{workflow-CSwxE-4q.d.ts → workflow-BA2Pnq90.d.ts} +1 -1
  37. package/dist/workflows/index.d.ts +3 -5
  38. package/package.json +20 -56
  39. package/dist/chunk-5DYJZVB7.js +0 -66
  40. package/dist/chunk-6ZVFVYLE.js +0 -101
  41. package/dist/chunk-AD6IIOCI.js +0 -11
  42. package/dist/chunk-BOS3IA23.js +0 -76
  43. package/dist/chunk-CQU72KBX.js +0 -54
  44. package/dist/chunk-HH5JIATB.js +0 -36
  45. package/dist/chunk-IEFQZ3IL.js +0 -78
  46. package/dist/chunk-IJ55HGH4.js +0 -24
  47. package/dist/chunk-NNNAWW2H.js +0 -66
  48. package/dist/chunk-NYBGZL6Z.js +0 -30
  49. package/dist/chunk-QX2CHXA7.js +0 -408
  50. package/dist/chunk-RAR7V3E3.js +0 -66
  51. package/dist/chunk-RI3ECMVF.js +0 -636
  52. package/dist/chunk-VPSYTVWP.js +0 -66
  53. package/dist/chunk-ZA7MIXNW.js +0 -66
  54. package/dist/embeddings/index.d.ts +0 -8
  55. package/dist/embeddings/index.js +0 -2
  56. package/dist/embeddings/model/providers/bedrock.d.ts +0 -42
  57. package/dist/embeddings/model/providers/bedrock.js +0 -3
  58. package/dist/embeddings/model/providers/bedrock.test.d.ts +0 -2
  59. package/dist/embeddings/model/providers/bedrock.test.js +0 -37
  60. package/dist/embeddings/model/providers/cohere.d.ts +0 -34
  61. package/dist/embeddings/model/providers/cohere.js +0 -3
  62. package/dist/embeddings/model/providers/cohere.test.d.ts +0 -2
  63. package/dist/embeddings/model/providers/cohere.test.js +0 -37
  64. package/dist/embeddings/model/providers/embedder.d.ts +0 -28
  65. package/dist/embeddings/model/providers/embedder.js +0 -2
  66. package/dist/embeddings/model/providers/google.d.ts +0 -34
  67. package/dist/embeddings/model/providers/google.js +0 -3
  68. package/dist/embeddings/model/providers/google.test.d.ts +0 -2
  69. package/dist/embeddings/model/providers/google.test.js +0 -37
  70. package/dist/embeddings/model/providers/mistral.d.ts +0 -34
  71. package/dist/embeddings/model/providers/mistral.js +0 -3
  72. package/dist/embeddings/model/providers/mistral.test.d.ts +0 -2
  73. package/dist/embeddings/model/providers/mistral.test.js +0 -37
  74. package/dist/embeddings/model/providers/mock.d.ts +0 -20
  75. package/dist/embeddings/model/providers/mock.js +0 -3
  76. package/dist/embeddings/model/providers/mock.test.d.ts +0 -2
  77. package/dist/embeddings/model/providers/mock.test.js +0 -29
  78. package/dist/embeddings/model/providers/openai.d.ts +0 -34
  79. package/dist/embeddings/model/providers/openai.js +0 -3
  80. package/dist/embeddings/model/providers/openai.test.d.ts +0 -2
  81. package/dist/embeddings/model/providers/openai.test.js +0 -37
  82. package/dist/embeddings/model/providers/voyage.d.ts +0 -34
  83. package/dist/embeddings/model/providers/voyage.js +0 -3
  84. package/dist/embeddings/model/providers/voyage.test.d.ts +0 -2
  85. package/dist/embeddings/model/providers/voyage.test.js +0 -37
  86. package/dist/index-B48181D5.d.ts +0 -1070
  87. package/dist/llm/model/index.d.ts +0 -45
  88. package/dist/llm/model/index.js +0 -5
  89. package/dist/llm/model/providers/anthropic-vertex.d.ts +0 -31
  90. package/dist/llm/model/providers/anthropic-vertex.js +0 -23
  91. package/dist/llm/model/providers/anthropic.d.ts +0 -29
  92. package/dist/llm/model/providers/anthropic.js +0 -21
  93. package/dist/llm/model/providers/azure.d.ts +0 -48
  94. package/dist/llm/model/providers/azure.js +0 -50
  95. package/dist/llm/model/providers/baseten.d.ts +0 -33
  96. package/dist/llm/model/providers/baseten.js +0 -29
  97. package/dist/llm/model/providers/bedrock.d.ts +0 -32
  98. package/dist/llm/model/providers/bedrock.js +0 -24
  99. package/dist/llm/model/providers/cerebras.d.ts +0 -30
  100. package/dist/llm/model/providers/cerebras.js +0 -22
  101. package/dist/llm/model/providers/cohere.d.ts +0 -30
  102. package/dist/llm/model/providers/cohere.js +0 -22
  103. package/dist/llm/model/providers/deepinfra.d.ts +0 -30
  104. package/dist/llm/model/providers/deepinfra.js +0 -22
  105. package/dist/llm/model/providers/deepseek.d.ts +0 -30
  106. package/dist/llm/model/providers/deepseek.js +0 -22
  107. package/dist/llm/model/providers/fireworks.d.ts +0 -35
  108. package/dist/llm/model/providers/fireworks.js +0 -40
  109. package/dist/llm/model/providers/google-vertex.d.ts +0 -48
  110. package/dist/llm/model/providers/google-vertex.js +0 -22
  111. package/dist/llm/model/providers/google.d.ts +0 -54
  112. package/dist/llm/model/providers/google.js +0 -23
  113. package/dist/llm/model/providers/grok.d.ts +0 -32
  114. package/dist/llm/model/providers/grok.js +0 -22
  115. package/dist/llm/model/providers/groq.d.ts +0 -37
  116. package/dist/llm/model/providers/groq.js +0 -42
  117. package/dist/llm/model/providers/lmstudio.d.ts +0 -29
  118. package/dist/llm/model/providers/lmstudio.js +0 -22
  119. package/dist/llm/model/providers/mistral.d.ts +0 -30
  120. package/dist/llm/model/providers/mistral.js +0 -22
  121. package/dist/llm/model/providers/mock.d.ts +0 -30
  122. package/dist/llm/model/providers/mock.js +0 -83
  123. package/dist/llm/model/providers/ollama.d.ts +0 -31
  124. package/dist/llm/model/providers/ollama.js +0 -23
  125. package/dist/llm/model/providers/openai-compat.d.ts +0 -39
  126. package/dist/llm/model/providers/openai-compat.js +0 -6
  127. package/dist/llm/model/providers/openai.d.ts +0 -34
  128. package/dist/llm/model/providers/openai.js +0 -6
  129. package/dist/llm/model/providers/openai.test.d.ts +0 -2
  130. package/dist/llm/model/providers/openai.test.js +0 -220
  131. package/dist/llm/model/providers/perplexity.d.ts +0 -30
  132. package/dist/llm/model/providers/perplexity.js +0 -22
  133. package/dist/llm/model/providers/portkey.d.ts +0 -34
  134. package/dist/llm/model/providers/portkey.js +0 -22
  135. package/dist/llm/model/providers/togetherai.d.ts +0 -30
  136. package/dist/llm/model/providers/togetherai.js +0 -22
  137. package/dist/types-M16hSruO.d.ts +0 -40
@@ -1,636 +0,0 @@
1
- import { InstrumentClass } from './chunk-6ZVFVYLE.js';
2
- import { delay } from './chunk-KNPBNSJ7.js';
3
- import { MastraBase } from './chunk-G4MCO7XF.js';
4
- import { RegisteredLogger, LogLevel } from './chunk-ICMEXHKD.js';
5
- import { __name, __privateAdd, __privateSet, __privateGet } from './chunk-AJJZUHB4.js';
6
- import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
7
- import { createAnthropic } from '@ai-sdk/anthropic';
8
- import { createAzure } from '@ai-sdk/azure';
9
- import { createCohere } from '@ai-sdk/cohere';
10
- import { createGoogleGenerativeAI } from '@ai-sdk/google';
11
- import { createMistral } from '@ai-sdk/mistral';
12
- import { createOpenAI } from '@ai-sdk/openai';
13
- import { createXai } from '@ai-sdk/xai';
14
- import { tool, generateText, jsonSchema, generateObject, streamText, streamObject } from 'ai';
15
- import { createAnthropicVertex } from 'anthropic-vertex-ai';
16
- import { z } from 'zod';
17
-
18
- function _ts_decorate(decorators, target, key, desc) {
19
- var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
20
- if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
21
- else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
22
- return c > 3 && r && Object.defineProperty(target, key, r), r;
23
- }
24
- __name(_ts_decorate, "_ts_decorate");
25
- function _ts_metadata(k, v) {
26
- if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
27
- }
28
- __name(_ts_metadata, "_ts_metadata");
29
- var _model, _mastra;
30
- var _LLM = class _LLM extends MastraBase {
31
- constructor({ model }) {
32
- super({
33
- component: RegisteredLogger.LLM
34
- });
35
- __privateAdd(this, _model);
36
- __privateAdd(this, _mastra);
37
- __privateSet(this, _model, model);
38
- __privateSet(this, _mastra, {});
39
- }
40
- __registerPrimitives(p) {
41
- if (p.telemetry) {
42
- this.__setTelemetry(p.telemetry);
43
- }
44
- if (p.logger) {
45
- this.__setLogger(p.logger);
46
- }
47
- __privateSet(this, _mastra, p);
48
- }
49
- getModelType() {
50
- const model = __privateGet(this, _model);
51
- if (!("provider" in model)) {
52
- throw new Error("Model provider is required");
53
- }
54
- const providerToType = {
55
- OPEN_AI: "openai",
56
- ANTHROPIC: "anthropic",
57
- GROQ: "groq",
58
- PERPLEXITY: "perplexity",
59
- FIREWORKS: "fireworks",
60
- TOGETHER_AI: "togetherai",
61
- LM_STUDIO: "lmstuido",
62
- BASETEN: "baseten",
63
- GOOGLE: "google",
64
- MISTRAL: "mistral",
65
- X_GROK: "grok",
66
- COHERE: "cohere",
67
- AZURE: "azure",
68
- AMAZON: "amazon",
69
- ANTHROPIC_VERTEX: "anthropic-vertex",
70
- DEEPSEEK: "deepseek"
71
- };
72
- const type = providerToType[model.provider] ?? model.provider;
73
- this.logger.debug(`[LLM] - Model resolved to provider ${model.provider}`, {
74
- sdk_type: type,
75
- provider: model.provider
76
- });
77
- return type;
78
- }
79
- createOpenAICompatibleModel({ baseURL, apiKey, defaultModelName, modelName, fetch }) {
80
- this.log(LogLevel.DEBUG, `Creating OpenAI compatible model with baseURL: ${baseURL}`);
81
- const client = createOpenAI({
82
- baseURL,
83
- apiKey,
84
- fetch
85
- });
86
- return client(modelName || defaultModelName);
87
- }
88
- createModelDef({ model }) {
89
- let modelDef;
90
- if (model.type === "openai") {
91
- this.logger.debug(`[LLM] - Initializing OpenAI model ${model.name || "gpt-4o-2024-08-06"}`);
92
- const openai = createOpenAI({
93
- apiKey: model?.apiKey || process.env.OPENAI_API_KEY
94
- });
95
- modelDef = openai(model.name || "gpt-4o-2024-08-06", {
96
- structuredOutputs: true
97
- });
98
- } else if (model.type === "anthropic") {
99
- this.log(LogLevel.DEBUG, `Initializing Anthropic model ${model.name || "claude-3-5-sonnet-20240620"}`);
100
- const anthropic = createAnthropic({
101
- apiKey: model?.apiKey || process.env.ANTHROPIC_API_KEY
102
- });
103
- modelDef = anthropic(model.name || "claude-3-5-sonnet-20240620");
104
- } else if (model.type === "google") {
105
- this.log(LogLevel.DEBUG, `Initializing Google model ${model.name || "gemini-1.5-pro-latest"}`);
106
- const google = createGoogleGenerativeAI({
107
- baseURL: "https://generativelanguage.googleapis.com/v1beta",
108
- apiKey: model?.apiKey || process.env.GOOGLE_GENERATIVE_AI_API_KEY || ""
109
- });
110
- modelDef = google(model.name || "gemini-1.5-pro-latest");
111
- } else if (model.type === "groq") {
112
- this.log(LogLevel.DEBUG, `Initializing Groq model ${model.name || "llama-3.2-90b-text-preview"}`);
113
- modelDef = this.createOpenAICompatibleModel({
114
- baseURL: "https://api.groq.com/openai/v1",
115
- apiKey: model?.apiKey || process.env.GROQ_API_KEY || "",
116
- defaultModelName: "llama-3.2-90b-text-preview",
117
- modelName: model.name
118
- });
119
- } else if (model.type === "perplexity") {
120
- this.log(LogLevel.DEBUG, `Initializing Perplexity model ${model.name || "llama-3.1-sonar-large-128k-chat"}`);
121
- modelDef = this.createOpenAICompatibleModel({
122
- baseURL: "https://api.perplexity.ai/",
123
- apiKey: model?.apiKey || process.env.PERPLEXITY_API_KEY || "",
124
- defaultModelName: "llama-3.1-sonar-large-128k-chat",
125
- modelName: model.name
126
- });
127
- } else if (model.type === "fireworks") {
128
- this.log(LogLevel.DEBUG, `Initializing Fireworks model ${model.name || "llama-v3p1-70b-instruct"}`);
129
- modelDef = this.createOpenAICompatibleModel({
130
- baseURL: "https://api.fireworks.ai/inference/v1",
131
- apiKey: model?.apiKey || process.env.FIREWORKS_API_KEY || "",
132
- defaultModelName: "llama-v3p1-70b-instruct",
133
- modelName: model.name
134
- });
135
- } else if (model.type === "togetherai") {
136
- this.log(LogLevel.DEBUG, `Initializing TogetherAI model ${model.name || "google/gemma-2-9b-it"}`);
137
- modelDef = this.createOpenAICompatibleModel({
138
- baseURL: "https://api.together.xyz/v1/",
139
- apiKey: model?.apiKey || process.env.TOGETHER_AI_API_KEY || "",
140
- defaultModelName: "google/gemma-2-9b-it",
141
- modelName: model.name
142
- });
143
- } else if (model.type === "lmstudio") {
144
- this.log(LogLevel.DEBUG, `Initializing LMStudio model ${model.name || "llama-3.2-1b"}`);
145
- if (!model?.baseURL) {
146
- const error = `LMStudio model requires a baseURL`;
147
- this.logger.error(error);
148
- throw new Error(error);
149
- }
150
- modelDef = this.createOpenAICompatibleModel({
151
- baseURL: model.baseURL,
152
- apiKey: "not-needed",
153
- defaultModelName: "llama-3.2-1b",
154
- modelName: model.name
155
- });
156
- } else if (model.type === "baseten") {
157
- this.log(LogLevel.DEBUG, `Initializing BaseTen model ${model.name || "llama-3.1-70b-instruct"}`);
158
- if (model?.fetch) {
159
- const error = `Custom fetch is required to use ${model.type}. see https://docs.baseten.co/api-reference/openai for more information`;
160
- this.logger.error(error);
161
- throw new Error(error);
162
- }
163
- modelDef = this.createOpenAICompatibleModel({
164
- baseURL: "https://bridge.baseten.co/v1/direct",
165
- apiKey: model?.apiKey || process.env.BASETEN_API_KEY || "",
166
- defaultModelName: "llama-3.1-70b-instruct",
167
- modelName: model.name
168
- });
169
- } else if (model.type === "mistral") {
170
- this.log(LogLevel.DEBUG, `Initializing Mistral model ${model.name || "pixtral-large-latest"}`);
171
- const mistral = createMistral({
172
- baseURL: "https://api.mistral.ai/v1",
173
- apiKey: model?.apiKey || process.env.MISTRAL_API_KEY || ""
174
- });
175
- modelDef = mistral(model.name || "pixtral-large-latest");
176
- } else if (model.type === "grok") {
177
- this.log(LogLevel.DEBUG, `Initializing X Grok model ${model.name || "grok-beta"}`);
178
- const xAi = createXai({
179
- baseURL: "https://api.x.ai/v1",
180
- apiKey: process.env.XAI_API_KEY ?? ""
181
- });
182
- modelDef = xAi(model.name || "grok-beta");
183
- } else if (model.type === "cohere") {
184
- this.log(LogLevel.DEBUG, `Initializing Cohere model ${model.name || "command-r-plus"}`);
185
- const cohere = createCohere({
186
- baseURL: "https://api.cohere.com/v2",
187
- apiKey: model?.apiKey || process.env.COHERE_API_KEY || ""
188
- });
189
- modelDef = cohere(model.name || "command-r-plus");
190
- } else if (model.type === "azure") {
191
- this.log(LogLevel.DEBUG, `Initializing Azure model ${model.name || "gpt-35-turbo-instruct"}`);
192
- const azure = createAzure({
193
- resourceName: process.env.AZURE_RESOURCE_NAME || "",
194
- apiKey: model?.apiKey || process.env.AZURE_API_KEY || "",
195
- apiVersion: model?.apiVersion,
196
- baseURL: model?.baseURL,
197
- headers: model?.headers,
198
- fetch: model?.fetch
199
- });
200
- modelDef = azure(model.name || "gpt-35-turbo-instruct");
201
- } else if (model.type === "amazon") {
202
- this.log(LogLevel.DEBUG, `Initializing Amazon model ${model.name || "amazon-titan-tg1-large"}`);
203
- const amazon = createAmazonBedrock({
204
- region: process.env.AWS_REGION || "",
205
- accessKeyId: process.env.AWS_ACCESS_KEY_ID || "",
206
- secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY || "",
207
- sessionToken: process.env.AWS_SESSION_TOKEN || ""
208
- });
209
- modelDef = amazon(model.name || "amazon-titan-tg1-large");
210
- } else if (model.type === "anthropic-vertex") {
211
- this.log(LogLevel.DEBUG, `Initializing Anthropic Vertex model ${model.name || "claude-3-5-sonnet@20240620"}`);
212
- const anthropicVertex = createAnthropicVertex({
213
- region: process.env.GOOGLE_VERTEX_REGION,
214
- projectId: process.env.GOOGLE_VERTEX_PROJECT_ID,
215
- apiKey: process.env.ANTHROPIC_API_KEY ?? ""
216
- });
217
- modelDef = anthropicVertex(model.name || "claude-3-5-sonnet@20240620");
218
- } else if (model.type === "deepseek") {
219
- this.log(LogLevel.DEBUG, `Initializing Deepseek model ${model.name || "deepseek-chat"}`);
220
- modelDef = this.createOpenAICompatibleModel({
221
- baseURL: "https://api.deepseek.com/v1/",
222
- apiKey: model?.apiKey || process.env.DEEPSEEK_API_KEY || "",
223
- defaultModelName: "deepseek-chat",
224
- modelName: model.name
225
- });
226
- } else {
227
- const error = `Invalid model type: ${model.type}`;
228
- this.logger.error(error);
229
- throw new Error(error);
230
- }
231
- return modelDef;
232
- }
233
- async getParams({ tools, resultTool, model }) {
234
- const toolsConverted = Object.entries(tools).reduce((memo, [key, val]) => {
235
- memo[key] = tool(val);
236
- return memo;
237
- }, {});
238
- let answerTool = {};
239
- if (resultTool) {
240
- answerTool = {
241
- answer: tool(resultTool)
242
- };
243
- }
244
- let modelDef;
245
- if ("type" in model) {
246
- modelDef = this.createModelDef({
247
- model
248
- });
249
- } else {
250
- if (model.model instanceof Function) {
251
- modelDef = await model.model();
252
- } else {
253
- modelDef = model.model;
254
- }
255
- }
256
- return {
257
- toolsConverted,
258
- modelDef,
259
- answerTool,
260
- toolChoice: model.toolChoice || "auto"
261
- };
262
- }
263
- convertTools(tools) {
264
- const converted = Object.entries(tools || {}).reduce((memo, value) => {
265
- const k = value[0];
266
- const tool2 = value[1];
267
- if (tool2) {
268
- memo[k] = {
269
- description: tool2.description,
270
- parameters: tool2.inputSchema,
271
- execute: /* @__PURE__ */ __name(async (props) => {
272
- try {
273
- this.logger.debug(`Executing tool ${k}`, {
274
- tool: k,
275
- props
276
- });
277
- return tool2.execute({
278
- context: props,
279
- mastra: __privateGet(this, _mastra)
280
- });
281
- } catch (error) {
282
- this.logger.error(`Error executing tool ${k}`, {
283
- tool: k,
284
- props,
285
- error
286
- });
287
- throw error;
288
- }
289
- }, "execute")
290
- };
291
- }
292
- return memo;
293
- }, {});
294
- return converted;
295
- }
296
- convertToMessages(messages) {
297
- if (Array.isArray(messages)) {
298
- return messages.map((m) => {
299
- if (typeof m === "string") {
300
- return {
301
- role: "user",
302
- content: m
303
- };
304
- }
305
- return m;
306
- });
307
- }
308
- return [
309
- {
310
- role: "user",
311
- content: messages
312
- }
313
- ];
314
- }
315
- async generate(messages, { maxSteps = 5, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
316
- const msgs = this.convertToMessages(messages);
317
- if (output === "text") {
318
- return await this.__text({
319
- messages: msgs,
320
- onStepFinish,
321
- maxSteps,
322
- tools,
323
- convertedTools,
324
- runId,
325
- temperature
326
- });
327
- }
328
- return await this.__textObject({
329
- messages: msgs,
330
- structuredOutput: output,
331
- onStepFinish,
332
- maxSteps,
333
- tools,
334
- convertedTools,
335
- runId
336
- });
337
- }
338
- async stream(messages, { maxSteps = 5, onFinish, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
339
- const msgs = this.convertToMessages(messages);
340
- if (output === "text") {
341
- return await this.__stream({
342
- messages: msgs,
343
- onStepFinish,
344
- onFinish,
345
- maxSteps,
346
- tools,
347
- convertedTools,
348
- runId,
349
- temperature
350
- });
351
- }
352
- return await this.__streamObject({
353
- messages: msgs,
354
- structuredOutput: output,
355
- onStepFinish,
356
- onFinish,
357
- maxSteps,
358
- tools,
359
- convertedTools,
360
- runId,
361
- temperature
362
- });
363
- }
364
- async __text({ messages, onStepFinish, maxSteps = 5, tools, runId, convertedTools, temperature }) {
365
- const model = __privateGet(this, _model);
366
- this.logger.debug(`[LLM] - Generating text`, {
367
- runId,
368
- messages,
369
- maxSteps,
370
- tools: Object.keys(tools || convertedTools || {})
371
- });
372
- let modelToPass;
373
- if ("name" in model) {
374
- modelToPass = this.__getNamedModel();
375
- } else {
376
- modelToPass = model;
377
- }
378
- const params = await this.getParams({
379
- tools: convertedTools || this.convertTools(tools),
380
- model: modelToPass
381
- });
382
- const argsForExecute = {
383
- model: params.modelDef,
384
- temperature,
385
- tools: {
386
- ...params.toolsConverted,
387
- ...params.answerTool
388
- },
389
- toolChoice: params.toolChoice,
390
- maxSteps,
391
- onStepFinish: /* @__PURE__ */ __name(async (props) => {
392
- onStepFinish?.(JSON.stringify(props, null, 2));
393
- this.logger.debug("[LLM] - Step Change:", {
394
- text: props?.text,
395
- toolCalls: props?.toolCalls,
396
- toolResults: props?.toolResults,
397
- finishReason: props?.finishReason,
398
- usage: props?.usage,
399
- runId
400
- });
401
- if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
402
- this.logger.warn("Rate limit approaching, waiting 10 seconds");
403
- await delay(10 * 1e3);
404
- }
405
- }, "onStepFinish")
406
- };
407
- return await generateText({
408
- messages,
409
- ...argsForExecute,
410
- experimental_telemetry: this.experimental_telemetry
411
- });
412
- }
413
- __getNamedModel() {
414
- const model = __privateGet(this, _model);
415
- if (!("name" in model)) {
416
- throw new Error("Model name is required");
417
- }
418
- return {
419
- type: this.getModelType(),
420
- name: model.name,
421
- toolChoice: model.toolChoice,
422
- apiKey: model?.apiKey,
423
- baseURL: model?.baseURL,
424
- headers: model?.headers,
425
- fetch: model?.fetch
426
- };
427
- }
428
- async __textObject({ messages, onStepFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature }) {
429
- const model = __privateGet(this, _model);
430
- this.logger.debug(`[LLM] - Generating a text object`, {
431
- runId
432
- });
433
- let modelToPass;
434
- if ("name" in model) {
435
- modelToPass = this.__getNamedModel();
436
- } else {
437
- modelToPass = model;
438
- }
439
- const params = await this.getParams({
440
- tools: convertedTools || this.convertTools(tools),
441
- model: modelToPass
442
- });
443
- const argsForExecute = {
444
- model: params.modelDef,
445
- temperature,
446
- tools: {
447
- ...params.toolsConverted,
448
- ...params.answerTool
449
- },
450
- toolChoice: params.toolChoice,
451
- maxSteps,
452
- onStepFinish: /* @__PURE__ */ __name(async (props) => {
453
- this.logger.debug("[LLM] - Step Change:", {
454
- text: props?.text,
455
- toolCalls: props?.toolCalls,
456
- toolResults: props?.toolResults,
457
- finishReason: props?.finishReason,
458
- usage: props?.usage
459
- });
460
- onStepFinish?.(JSON.stringify(props, null, 2));
461
- if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
462
- this.logger.warn("Rate limit approaching, waiting 10 seconds", {
463
- runId
464
- });
465
- await delay(10 * 1e3);
466
- }
467
- }, "onStepFinish")
468
- };
469
- let schema;
470
- let output = "object";
471
- if (typeof structuredOutput.parse === "function") {
472
- schema = structuredOutput;
473
- if (schema instanceof z.ZodArray) {
474
- output = "array";
475
- schema = schema._def.type;
476
- }
477
- } else {
478
- schema = jsonSchema(structuredOutput);
479
- }
480
- return await generateObject({
481
- messages,
482
- ...argsForExecute,
483
- output,
484
- schema,
485
- experimental_telemetry: this.experimental_telemetry
486
- });
487
- }
488
- async __stream({ messages, onStepFinish, onFinish, maxSteps = 5, tools, runId, convertedTools, temperature }) {
489
- const model = __privateGet(this, _model);
490
- this.log(LogLevel.DEBUG, `Streaming text with ${messages.length} messages`, {
491
- runId
492
- });
493
- let modelToPass;
494
- if ("name" in model) {
495
- modelToPass = this.__getNamedModel();
496
- } else {
497
- modelToPass = model;
498
- }
499
- const params = await this.getParams({
500
- tools: convertedTools || this.convertTools(tools),
501
- model: modelToPass
502
- });
503
- const argsForExecute = {
504
- temperature,
505
- model: params.modelDef,
506
- tools: {
507
- ...params.toolsConverted,
508
- ...params.answerTool
509
- },
510
- toolChoice: params.toolChoice,
511
- maxSteps,
512
- onStepFinish: /* @__PURE__ */ __name(async (props) => {
513
- this.logger.debug("[LLM] - Step Change:", {
514
- text: props?.text,
515
- toolCalls: props?.toolCalls,
516
- toolResults: props?.toolResults,
517
- finishReason: props?.finishReason,
518
- usage: props?.usage
519
- });
520
- onStepFinish?.(JSON.stringify(props, null, 2));
521
- if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
522
- this.logger.warn("Rate limit approaching, waiting 10 seconds", {
523
- runId
524
- });
525
- await delay(10 * 1e3);
526
- }
527
- }, "onStepFinish"),
528
- onFinish: /* @__PURE__ */ __name(async (props) => {
529
- this.logger.debug("[LLM] - On Finish:", {
530
- text: props?.text,
531
- toolCalls: props?.toolCalls,
532
- toolResults: props?.toolResults,
533
- finishReason: props?.finishReason,
534
- usage: props?.usage
535
- });
536
- onFinish?.(JSON.stringify(props, null, 2));
537
- }, "onFinish")
538
- };
539
- return await streamText({
540
- messages,
541
- ...argsForExecute,
542
- experimental_telemetry: this.experimental_telemetry
543
- });
544
- }
545
- async __streamObject({ messages, onStepFinish, onFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature }) {
546
- const model = __privateGet(this, _model);
547
- this.log(LogLevel.DEBUG, `Streaming text with ${messages.length} messages`, {
548
- runId
549
- });
550
- let modelToPass;
551
- if ("name" in model) {
552
- modelToPass = this.__getNamedModel();
553
- } else {
554
- modelToPass = model;
555
- }
556
- const params = await this.getParams({
557
- tools: convertedTools || this.convertTools(tools),
558
- model: modelToPass
559
- });
560
- const argsForExecute = {
561
- temperature,
562
- model: params.modelDef,
563
- tools: {
564
- ...params.toolsConverted,
565
- ...params.answerTool
566
- },
567
- toolChoice: params.toolChoice,
568
- maxSteps,
569
- onStepFinish: /* @__PURE__ */ __name(async (props) => {
570
- this.logger.debug("[LLM] - Step Change:", {
571
- text: props?.text,
572
- toolCalls: props?.toolCalls,
573
- toolResults: props?.toolResults,
574
- finishReason: props?.finishReason,
575
- usage: props?.usage
576
- });
577
- onStepFinish?.(JSON.stringify(props, null, 2));
578
- if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
579
- this.logger.warn("Rate limit approaching, waiting 10 seconds", {
580
- runId
581
- });
582
- await delay(10 * 1e3);
583
- }
584
- }, "onStepFinish"),
585
- onFinish: /* @__PURE__ */ __name(async (props) => {
586
- this.logger.debug("[LLM] - On Finish:", {
587
- text: props?.text,
588
- toolCalls: props?.toolCalls,
589
- toolResults: props?.toolResults,
590
- finishReason: props?.finishReason,
591
- usage: props?.usage
592
- });
593
- onFinish?.(JSON.stringify(props, null, 2));
594
- }, "onFinish")
595
- };
596
- let schema;
597
- let output = "object";
598
- if (typeof structuredOutput.parse === "function") {
599
- schema = structuredOutput;
600
- if (schema instanceof z.ZodArray) {
601
- output = "array";
602
- schema = schema._def.type;
603
- }
604
- } else {
605
- schema = jsonSchema(structuredOutput);
606
- }
607
- return await streamObject({
608
- messages,
609
- ...argsForExecute,
610
- output,
611
- schema,
612
- experimental_telemetry: this.experimental_telemetry
613
- });
614
- }
615
- };
616
- _model = new WeakMap();
617
- _mastra = new WeakMap();
618
- __name(_LLM, "LLM");
619
- var LLM = _LLM;
620
- LLM = _ts_decorate([
621
- InstrumentClass({
622
- prefix: "llm",
623
- excludeMethods: [
624
- "__setTools",
625
- "__setLogger",
626
- "__setTelemetry",
627
- "#log"
628
- ]
629
- }),
630
- _ts_metadata("design:type", Function),
631
- _ts_metadata("design:paramtypes", [
632
- Object
633
- ])
634
- ], LLM);
635
-
636
- export { LLM };
@@ -1,66 +0,0 @@
1
- import { MastraEmbedder } from './chunk-AD6IIOCI.js';
2
- import { __name, __publicField } from './chunk-AJJZUHB4.js';
3
- import { createCohere } from '@ai-sdk/cohere';
4
- import { embed as embed$1, embedMany as embedMany$1 } from 'ai';
5
-
6
- async function embed(value, { apiKey = process.env.COHERE_API_KEY || "", model = "embed-english-v3.0", baseURL, maxRetries = 3 }) {
7
- const cohere = createCohere({
8
- baseURL,
9
- apiKey
10
- });
11
- const eModel = cohere.textEmbeddingModel(model);
12
- return await embed$1({
13
- model: eModel,
14
- value,
15
- maxRetries
16
- });
17
- }
18
- __name(embed, "embed");
19
- async function embedMany(values, { apiKey = process.env.COHERE_API_KEY || "", model = "embed-english-v3.0", baseURL, maxRetries = 3 }) {
20
- const cohere = createCohere({
21
- baseURL,
22
- apiKey
23
- });
24
- const eModel = cohere.textEmbeddingModel(model);
25
- return await embedMany$1({
26
- model: eModel,
27
- values,
28
- maxRetries
29
- });
30
- }
31
- __name(embedMany, "embedMany");
32
- var _CohereEmbedder = class _CohereEmbedder extends MastraEmbedder {
33
- constructor({ apiKey = process.env.COHERE_API_KEY || "", model = "embed-english-v3.0", baseURL }) {
34
- super();
35
- __publicField(this, "apiKey");
36
- __publicField(this, "model");
37
- __publicField(this, "baseURL");
38
- this.apiKey = apiKey;
39
- this.model = model;
40
- this.baseURL = baseURL;
41
- }
42
- async embed(value, { maxRetries } = {
43
- maxRetries: 3
44
- }) {
45
- return embed(value, {
46
- apiKey: this.apiKey,
47
- model: this.model,
48
- baseURL: this.baseURL,
49
- maxRetries
50
- });
51
- }
52
- async embedMany(values, { maxRetries } = {
53
- maxRetries: 3
54
- }) {
55
- return embedMany(values, {
56
- apiKey: this.apiKey,
57
- model: this.model,
58
- baseURL: this.baseURL,
59
- maxRetries
60
- });
61
- }
62
- };
63
- __name(_CohereEmbedder, "CohereEmbedder");
64
- var CohereEmbedder = _CohereEmbedder;
65
-
66
- export { CohereEmbedder, embed, embedMany };