@mastra/core 0.2.0-alpha.84 → 0.2.0-alpha.87

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +1 -1
  2. package/dist/action/index.d.ts +2 -2
  3. package/dist/agent/index.d.ts +6 -6
  4. package/dist/agent/index.js +1 -1
  5. package/dist/chunk-2ISN3AA7.js +392 -0
  6. package/dist/chunk-2J5OHBUG.js +24 -0
  7. package/dist/{chunk-Y7TKCKRI.js → chunk-5NQ3MEZM.js} +8 -8
  8. package/dist/{chunk-SAXFXAKK.js → chunk-73XDWPXJ.js} +41 -24
  9. package/dist/{chunk-3IV6WDJY.js → chunk-D66E7L7R.js} +1 -1
  10. package/dist/{chunk-3THCTISX.js → chunk-EVYBUFXB.js} +12 -8
  11. package/dist/{chunk-PRYZIZXD.js → chunk-I3MJB67Z.js} +8 -8
  12. package/dist/{chunk-42THOFKJ.js → chunk-RCS7AVH6.js} +1 -1
  13. package/dist/chunk-RLPH6TDJ.js +30 -0
  14. package/dist/embeddings/index.d.ts +2 -2
  15. package/dist/embeddings/index.js +1 -1
  16. package/dist/eval/index.d.ts +2 -2
  17. package/dist/filter/index.d.ts +6 -6
  18. package/dist/{index-62DyKJRU.d.ts → index-Duqv1Yom.d.ts} +340 -322
  19. package/dist/index.d.ts +6 -6
  20. package/dist/index.js +11 -11
  21. package/dist/integration/index.d.ts +7 -7
  22. package/dist/llm/index.d.ts +3 -3
  23. package/dist/llm/model/providers/anthropic-vertex.d.ts +31 -0
  24. package/dist/llm/model/providers/anthropic-vertex.js +23 -0
  25. package/dist/llm/model/providers/anthropic.d.ts +29 -0
  26. package/dist/llm/model/providers/anthropic.js +21 -0
  27. package/dist/llm/model/providers/azure.d.ts +48 -0
  28. package/dist/llm/model/providers/azure.js +50 -0
  29. package/dist/llm/model/providers/baseten.d.ts +33 -0
  30. package/dist/llm/model/providers/baseten.js +29 -0
  31. package/dist/llm/model/providers/bedrock.d.ts +32 -0
  32. package/dist/llm/model/providers/bedrock.js +24 -0
  33. package/dist/llm/model/providers/cerebras.d.ts +30 -0
  34. package/dist/llm/model/providers/cerebras.js +22 -0
  35. package/dist/llm/model/providers/cohere.d.ts +30 -0
  36. package/dist/llm/model/providers/cohere.js +22 -0
  37. package/dist/llm/model/providers/deepinfra.d.ts +30 -0
  38. package/dist/llm/model/providers/deepinfra.js +22 -0
  39. package/dist/llm/model/providers/deepseek.d.ts +30 -0
  40. package/dist/llm/model/providers/deepseek.js +22 -0
  41. package/dist/llm/model/providers/fireworks.d.ts +35 -0
  42. package/dist/llm/model/providers/fireworks.js +40 -0
  43. package/dist/llm/model/providers/google-vertex.d.ts +48 -0
  44. package/dist/llm/model/providers/google-vertex.js +22 -0
  45. package/dist/llm/model/providers/google.d.ts +54 -0
  46. package/dist/llm/model/providers/google.js +23 -0
  47. package/dist/llm/model/providers/grok.d.ts +32 -0
  48. package/dist/llm/model/providers/grok.js +22 -0
  49. package/dist/llm/model/providers/groq.d.ts +37 -0
  50. package/dist/llm/model/providers/groq.js +42 -0
  51. package/dist/llm/model/providers/lmstudio.d.ts +29 -0
  52. package/dist/llm/model/providers/lmstudio.js +22 -0
  53. package/dist/llm/model/providers/mistral.d.ts +30 -0
  54. package/dist/llm/model/providers/mistral.js +22 -0
  55. package/dist/llm/model/providers/mock.d.ts +30 -0
  56. package/dist/llm/model/providers/mock.js +83 -0
  57. package/dist/llm/model/providers/ollama.d.ts +31 -0
  58. package/dist/llm/model/providers/ollama.js +23 -0
  59. package/dist/llm/model/providers/openai-compat.d.ts +39 -0
  60. package/dist/llm/model/providers/openai-compat.js +6 -0
  61. package/dist/llm/model/providers/openai.d.ts +34 -0
  62. package/dist/llm/model/providers/openai.js +6 -0
  63. package/dist/llm/model/providers/openai.test.d.ts +2 -0
  64. package/dist/llm/model/providers/openai.test.js +220 -0
  65. package/dist/llm/model/providers/perplexity.d.ts +30 -0
  66. package/dist/llm/model/providers/perplexity.js +22 -0
  67. package/dist/llm/model/providers/portkey.d.ts +34 -0
  68. package/dist/llm/model/providers/portkey.js +22 -0
  69. package/dist/llm/model/providers/togetherai.d.ts +30 -0
  70. package/dist/llm/model/providers/togetherai.js +22 -0
  71. package/dist/mastra/index.d.ts +3 -3
  72. package/dist/memory/index.d.ts +1 -1
  73. package/dist/memory/index.js +1 -1
  74. package/dist/model-QGWIMOSx.d.ts +31 -0
  75. package/dist/relevance/index.d.ts +2 -2
  76. package/dist/relevance/index.js +2 -2
  77. package/dist/storage/index.d.ts +3 -3
  78. package/dist/storage/index.js +1 -1
  79. package/dist/tools/index.d.ts +3 -3
  80. package/dist/vector/index.js +2 -2
  81. package/dist/{workflow-DGktrYAL.d.ts → workflow-DQ8CtzzU.d.ts} +1 -1
  82. package/dist/workflows/index.d.ts +4 -4
  83. package/package.json +30 -14
package/README.md CHANGED
@@ -53,7 +53,7 @@ import { embed, EmbeddingOptions } from '@mastra/core';
53
53
 
54
54
  const embeddings = await embed('text to embed', {
55
55
  provider: 'OPEN_AI',
56
- model: 'text-embedding-ada-002',
56
+ model: 'text-embedding-3-small',
57
57
  });
58
58
  ```
59
59
 
@@ -1,5 +1,5 @@
1
1
  import 'zod';
2
- export { I as IAction, aR as IExecutionContext, d as MastraPrimitives } from '../index-62DyKJRU.js';
2
+ export { I as IAction, aT as IExecutionContext, d as MastraPrimitives } from '../index-Duqv1Yom.js';
3
3
  import '../engine-EwEG-4Iv.js';
4
4
  import '../index-B0-NXUYv.js';
5
5
  import '../telemetry-oCUM52DG.js';
@@ -14,5 +14,5 @@ import 'stream';
14
14
  import '@opentelemetry/sdk-node';
15
15
  import '@opentelemetry/sdk-trace-base';
16
16
  import '../metric-D2V4CR8D.js';
17
- import '../types-M16hSruO.js';
18
17
  import 'sift';
18
+ import '../types-M16hSruO.js';
@@ -1,18 +1,18 @@
1
1
  import 'ai';
2
2
  import 'json-schema';
3
3
  import 'zod';
4
- export { A as Agent } from '../index-62DyKJRU.js';
4
+ export { A as Agent } from '../index-Duqv1Yom.js';
5
5
  import '../base.js';
6
6
  import '../metric-D2V4CR8D.js';
7
- import '../types-M16hSruO.js';
8
7
  import '../telemetry-oCUM52DG.js';
9
- import '@opentelemetry/api';
10
- import '@opentelemetry/sdk-node';
11
- import '@opentelemetry/sdk-trace-base';
12
- import 'sift';
13
8
  import '../index-B0-NXUYv.js';
14
9
  import 'pino';
15
10
  import 'stream';
11
+ import 'sift';
12
+ import '../types-M16hSruO.js';
16
13
  import '../vector/index.js';
14
+ import '@opentelemetry/api';
15
+ import '@opentelemetry/sdk-node';
16
+ import '@opentelemetry/sdk-trace-base';
17
17
  import '../engine-EwEG-4Iv.js';
18
18
  import '../tts/index.js';
@@ -1,4 +1,4 @@
1
- export { Agent } from '../chunk-SAXFXAKK.js';
1
+ export { Agent } from '../chunk-73XDWPXJ.js';
2
2
  import '../chunk-HBTQNIAX.js';
3
3
  import '../chunk-SDKEPBBH.js';
4
4
  import '../chunk-6WJREZ5F.js';
@@ -0,0 +1,392 @@
1
+ import { delay } from './chunk-LUULSM4U.js';
2
+ import { MastraBase } from './chunk-JCRGAEY6.js';
3
+ import { RegisteredLogger } from './chunk-TJK6TGSR.js';
4
+ import { __name, __privateAdd, __privateSet, __privateGet } from './chunk-AJJZUHB4.js';
5
+ import { generateText, jsonSchema, generateObject, streamText, streamObject } from 'ai';
6
+ import { z } from 'zod';
7
+
8
+ // src/llm/model/base.ts
9
+ var _mastra;
10
+ var _MastraLLMBase = class _MastraLLMBase extends MastraBase {
11
+ constructor({ name }) {
12
+ super({
13
+ component: RegisteredLogger.LLM,
14
+ name
15
+ });
16
+ // @ts-ignore
17
+ __privateAdd(this, _mastra);
18
+ }
19
+ convertToMessages(messages) {
20
+ if (Array.isArray(messages)) {
21
+ return messages.map((m) => {
22
+ if (typeof m === "string") {
23
+ return {
24
+ role: "user",
25
+ content: m
26
+ };
27
+ }
28
+ return m;
29
+ });
30
+ }
31
+ return [
32
+ {
33
+ role: "user",
34
+ content: messages
35
+ }
36
+ ];
37
+ }
38
+ __registerPrimitives(p) {
39
+ if (p.telemetry) {
40
+ this.__setTelemetry(p.telemetry);
41
+ }
42
+ if (p.logger) {
43
+ this.__setLogger(p.logger);
44
+ }
45
+ __privateSet(this, _mastra, p);
46
+ }
47
+ async __text(input) {
48
+ this.logger.debug(`[LLMs:${this.name}] Generating text.`, {
49
+ input
50
+ });
51
+ throw new Error("Method not implemented.");
52
+ }
53
+ async __textObject(input) {
54
+ this.logger.debug(`[LLMs:${this.name}] Generating object.`, {
55
+ input
56
+ });
57
+ throw new Error("Method not implemented.");
58
+ }
59
+ async generate(messages, options = {}) {
60
+ this.logger.debug(`[LLMs:${this.name}] Generating text.`, {
61
+ messages,
62
+ options
63
+ });
64
+ throw new Error("Method not implemented.");
65
+ }
66
+ async __stream(input) {
67
+ this.logger.debug(`[LLMs:${this.name}] Streaming text.`, {
68
+ input
69
+ });
70
+ throw new Error("Method not implemented.");
71
+ }
72
+ async __streamObject(input) {
73
+ this.logger.debug(`[LLMs:${this.name}] Streaming object.`, {
74
+ input
75
+ });
76
+ throw new Error("Method not implemented.");
77
+ }
78
+ async stream(messages, options = {}) {
79
+ this.logger.debug(`[LLMs:${this.name}] Streaming text.`, {
80
+ messages,
81
+ options
82
+ });
83
+ throw new Error("Method not implemented.");
84
+ }
85
+ };
86
+ _mastra = new WeakMap();
87
+ __name(_MastraLLMBase, "MastraLLMBase");
88
+ var MastraLLMBase = _MastraLLMBase;
89
+
90
+ // src/llm/model/model.ts
91
+ var _model, _mastra2;
92
+ var _MastraLLM = class _MastraLLM extends MastraLLMBase {
93
+ constructor({ model, mastra }) {
94
+ super({
95
+ name: "aisdk"
96
+ });
97
+ __privateAdd(this, _model);
98
+ __privateAdd(this, _mastra2);
99
+ __privateSet(this, _model, model);
100
+ if (mastra) {
101
+ __privateSet(this, _mastra2, mastra);
102
+ if (mastra.logger) {
103
+ this.__setLogger(mastra.logger);
104
+ }
105
+ }
106
+ }
107
+ convertTools(tools) {
108
+ this.logger.debug("Starting tool conversion for LLM");
109
+ const converted = Object.entries(tools || {}).reduce((memo, value) => {
110
+ const k = value[0];
111
+ const tool = value[1];
112
+ if (tool) {
113
+ memo[k] = {
114
+ description: tool.description,
115
+ parameters: tool.inputSchema,
116
+ execute: /* @__PURE__ */ __name(async (props) => {
117
+ this.logger.debug("Executing tool", {
118
+ tool: k,
119
+ props
120
+ });
121
+ return tool.execute({
122
+ context: props,
123
+ mastra: __privateGet(this, _mastra2)
124
+ });
125
+ }, "execute")
126
+ };
127
+ }
128
+ return memo;
129
+ }, {});
130
+ this.logger.debug(`Converted tools for LLM`);
131
+ return converted;
132
+ }
133
+ async __text({ runId, messages, maxSteps, tools, convertedTools, temperature, toolChoice = "auto", onStepFinish }) {
134
+ const model = __privateGet(this, _model);
135
+ this.logger.debug(`[LLM] - Generating text`, {
136
+ runId,
137
+ messages,
138
+ maxSteps,
139
+ tools: Object.keys(tools || convertedTools || {})
140
+ });
141
+ const finalTools = convertedTools || this.convertTools(tools);
142
+ const argsForExecute = {
143
+ model,
144
+ temperature,
145
+ tools: {
146
+ ...finalTools
147
+ },
148
+ toolChoice,
149
+ maxSteps,
150
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
151
+ onStepFinish?.(JSON.stringify(props, null, 2));
152
+ this.logger.debug("[LLM] - Step Change:", {
153
+ text: props?.text,
154
+ toolCalls: props?.toolCalls,
155
+ toolResults: props?.toolResults,
156
+ finishReason: props?.finishReason,
157
+ usage: props?.usage,
158
+ runId
159
+ });
160
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
161
+ this.logger.warn("Rate limit approaching, waiting 10 seconds");
162
+ await delay(10 * 1e3);
163
+ }
164
+ }, "onStepFinish")
165
+ };
166
+ return await generateText({
167
+ messages,
168
+ ...argsForExecute,
169
+ experimental_telemetry: this.experimental_telemetry
170
+ });
171
+ }
172
+ async __textObject({ messages, onStepFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature }) {
173
+ const model = __privateGet(this, _model);
174
+ this.logger.debug(`[LLM] - Generating a text object`, {
175
+ runId
176
+ });
177
+ const finalTools = convertedTools || this.convertTools(tools);
178
+ const argsForExecute = {
179
+ model,
180
+ temperature,
181
+ tools: {
182
+ ...finalTools
183
+ },
184
+ maxSteps,
185
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
186
+ onStepFinish?.(JSON.stringify(props, null, 2));
187
+ this.logger.debug("[LLM] - Step Change:", {
188
+ text: props?.text,
189
+ toolCalls: props?.toolCalls,
190
+ toolResults: props?.toolResults,
191
+ finishReason: props?.finishReason,
192
+ usage: props?.usage,
193
+ runId
194
+ });
195
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
196
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
197
+ runId
198
+ });
199
+ await delay(10 * 1e3);
200
+ }
201
+ }, "onStepFinish")
202
+ };
203
+ let schema;
204
+ let output = "object";
205
+ if (typeof structuredOutput.parse === "function") {
206
+ schema = structuredOutput;
207
+ if (schema instanceof z.ZodArray) {
208
+ output = "array";
209
+ schema = schema._def.type;
210
+ }
211
+ } else {
212
+ schema = jsonSchema(structuredOutput);
213
+ }
214
+ return await generateObject({
215
+ messages,
216
+ ...argsForExecute,
217
+ output,
218
+ schema,
219
+ experimental_telemetry: this.experimental_telemetry
220
+ });
221
+ }
222
+ async __stream({ messages, onStepFinish, onFinish, maxSteps = 5, tools, convertedTools, runId, temperature }) {
223
+ const model = __privateGet(this, _model);
224
+ this.logger.debug(`[LLM] - Streaming text`, {
225
+ runId,
226
+ messages,
227
+ maxSteps,
228
+ tools: Object.keys(tools || convertedTools || {})
229
+ });
230
+ const finalTools = convertedTools || this.convertTools(tools);
231
+ const argsForExecute = {
232
+ model,
233
+ temperature,
234
+ tools: {
235
+ ...finalTools
236
+ },
237
+ maxSteps,
238
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
239
+ onStepFinish?.(JSON.stringify(props, null, 2));
240
+ this.logger.debug("[LLM] - Stream Step Change:", {
241
+ text: props?.text,
242
+ toolCalls: props?.toolCalls,
243
+ toolResults: props?.toolResults,
244
+ finishReason: props?.finishReason,
245
+ usage: props?.usage,
246
+ runId
247
+ });
248
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
249
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
250
+ runId
251
+ });
252
+ await delay(10 * 1e3);
253
+ }
254
+ }, "onStepFinish"),
255
+ onFinish: /* @__PURE__ */ __name(async (props) => {
256
+ onFinish?.(JSON.stringify(props, null, 2));
257
+ this.logger.debug("[LLM] - Stream Finished:", {
258
+ text: props?.text,
259
+ toolCalls: props?.toolCalls,
260
+ toolResults: props?.toolResults,
261
+ finishReason: props?.finishReason,
262
+ usage: props?.usage,
263
+ runId
264
+ });
265
+ }, "onFinish")
266
+ };
267
+ return await streamText({
268
+ messages,
269
+ ...argsForExecute,
270
+ experimental_telemetry: this.experimental_telemetry
271
+ });
272
+ }
273
+ async __streamObject({ messages, onStepFinish, onFinish, maxSteps = 5, tools, convertedTools, structuredOutput, runId, temperature }) {
274
+ const model = __privateGet(this, _model);
275
+ this.logger.debug(`[LLM] - Streaming structured output`, {
276
+ runId,
277
+ messages,
278
+ maxSteps,
279
+ tools: Object.keys(tools || convertedTools || {})
280
+ });
281
+ const finalTools = convertedTools || this.convertTools(tools);
282
+ const argsForExecute = {
283
+ model,
284
+ temperature,
285
+ tools: {
286
+ ...finalTools
287
+ },
288
+ maxSteps,
289
+ onStepFinish: /* @__PURE__ */ __name(async (props) => {
290
+ onStepFinish?.(JSON.stringify(props, null, 2));
291
+ this.logger.debug("[LLM] - Stream Step Change:", {
292
+ text: props?.text,
293
+ toolCalls: props?.toolCalls,
294
+ toolResults: props?.toolResults,
295
+ finishReason: props?.finishReason,
296
+ usage: props?.usage,
297
+ runId
298
+ });
299
+ if (props?.response?.headers?.["x-ratelimit-remaining-tokens"] && parseInt(props?.response?.headers?.["x-ratelimit-remaining-tokens"], 10) < 2e3) {
300
+ this.logger.warn("Rate limit approaching, waiting 10 seconds", {
301
+ runId
302
+ });
303
+ await delay(10 * 1e3);
304
+ }
305
+ }, "onStepFinish"),
306
+ onFinish: /* @__PURE__ */ __name(async (props) => {
307
+ onFinish?.(JSON.stringify(props, null, 2));
308
+ this.logger.debug("[LLM] - Stream Finished:", {
309
+ text: props?.text,
310
+ toolCalls: props?.toolCalls,
311
+ toolResults: props?.toolResults,
312
+ finishReason: props?.finishReason,
313
+ usage: props?.usage,
314
+ runId
315
+ });
316
+ }, "onFinish")
317
+ };
318
+ let schema;
319
+ let output = "object";
320
+ if (typeof structuredOutput.parse === "function") {
321
+ schema = structuredOutput;
322
+ if (schema instanceof z.ZodArray) {
323
+ output = "array";
324
+ schema = schema._def.type;
325
+ }
326
+ } else {
327
+ schema = jsonSchema(structuredOutput);
328
+ }
329
+ return streamObject({
330
+ messages,
331
+ ...argsForExecute,
332
+ output,
333
+ schema,
334
+ experimental_telemetry: this.experimental_telemetry
335
+ });
336
+ }
337
+ async generate(messages, { maxSteps = 5, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
338
+ const msgs = this.convertToMessages(messages);
339
+ if (output === "text") {
340
+ return await this.__text({
341
+ messages: msgs,
342
+ onStepFinish,
343
+ maxSteps,
344
+ tools,
345
+ convertedTools,
346
+ runId,
347
+ temperature
348
+ });
349
+ }
350
+ return await this.__textObject({
351
+ messages: msgs,
352
+ structuredOutput: output,
353
+ onStepFinish,
354
+ maxSteps,
355
+ tools,
356
+ convertedTools,
357
+ runId
358
+ });
359
+ }
360
+ async stream(messages, { maxSteps = 5, onFinish, onStepFinish, tools, convertedTools, runId, output = "text", temperature } = {}) {
361
+ const msgs = this.convertToMessages(messages);
362
+ if (output === "text") {
363
+ return await this.__stream({
364
+ messages: msgs,
365
+ onStepFinish,
366
+ onFinish,
367
+ maxSteps,
368
+ tools,
369
+ convertedTools,
370
+ runId,
371
+ temperature
372
+ });
373
+ }
374
+ return await this.__streamObject({
375
+ messages: msgs,
376
+ structuredOutput: output,
377
+ onStepFinish,
378
+ onFinish,
379
+ maxSteps,
380
+ tools,
381
+ convertedTools,
382
+ runId,
383
+ temperature
384
+ });
385
+ }
386
+ };
387
+ _model = new WeakMap();
388
+ _mastra2 = new WeakMap();
389
+ __name(_MastraLLM, "MastraLLM");
390
+ var MastraLLM = _MastraLLM;
391
+
392
+ export { MastraLLM };
@@ -0,0 +1,24 @@
1
+ import { MastraLLM } from './chunk-2ISN3AA7.js';
2
+ import { __name } from './chunk-AJJZUHB4.js';
3
+ import { createOpenAI } from '@ai-sdk/openai';
4
+
5
+ var _OpenAI = class _OpenAI extends MastraLLM {
6
+ constructor({ name, apiKey, headers, fetch, baseURL, settings }) {
7
+ const openai = createOpenAI({
8
+ apiKey: apiKey || process.env.OPENAI_API_KEY,
9
+ baseURL,
10
+ headers,
11
+ fetch
12
+ });
13
+ super({
14
+ model: openai(name || "gpt-4o-mini", {
15
+ structuredOutputs: true,
16
+ ...settings
17
+ })
18
+ });
19
+ }
20
+ };
21
+ __name(_OpenAI, "OpenAI");
22
+ var OpenAI = _OpenAI;
23
+
24
+ export { OpenAI };
@@ -8,9 +8,9 @@ import { embed as embed$1, embedMany as embedMany$1 } from 'ai';
8
8
  import { createVoyage } from 'voyage-ai-provider';
9
9
  import 'dotenv/config';
10
10
 
11
- function getEmbeddingModel(embeddingOptions) {
11
+ function getEmbeddingModel(embedding) {
12
12
  let embeddingModel;
13
- const { provider, model, apiKey } = embeddingOptions;
13
+ const { provider, model, apiKey } = embedding;
14
14
  if (provider === "OPEN_AI") {
15
15
  const openai = createOpenAI({
16
16
  apiKey: apiKey || process.env.OPENAI_API_KEY
@@ -53,9 +53,9 @@ function getEmbeddingModel(embeddingOptions) {
53
53
  return embeddingModel;
54
54
  }
55
55
  __name(getEmbeddingModel, "getEmbeddingModel");
56
- async function embed(value, embeddingOptions) {
57
- const embeddingModel = await getEmbeddingModel(embeddingOptions);
58
- const { maxRetries } = embeddingOptions;
56
+ async function embed(value, embedding) {
57
+ const embeddingModel = await getEmbeddingModel(embedding);
58
+ const { maxRetries } = embedding;
59
59
  return await embed$1({
60
60
  model: embeddingModel,
61
61
  value,
@@ -63,9 +63,9 @@ async function embed(value, embeddingOptions) {
63
63
  });
64
64
  }
65
65
  __name(embed, "embed");
66
- async function embedMany(value, embeddingOptions) {
67
- const embeddingModel = await getEmbeddingModel(embeddingOptions);
68
- const { maxRetries } = embeddingOptions;
66
+ async function embedMany(value, embedding) {
67
+ const embeddingModel = await getEmbeddingModel(embedding);
68
+ const { maxRetries } = embedding;
69
69
  return await embedMany$1({
70
70
  model: embeddingModel,
71
71
  values: value,
@@ -26,6 +26,7 @@ var _Agent = class _Agent extends MastraBase {
26
26
  component: RegisteredLogger.AGENT
27
27
  });
28
28
  __publicField(this, "name");
29
+ // @ts-ignore
29
30
  __publicField(this, "llm");
30
31
  __publicField(this, "instructions");
31
32
  __publicField(this, "model");
@@ -35,10 +36,17 @@ var _Agent = class _Agent extends MastraBase {
35
36
  __publicField(this, "metrics");
36
37
  this.name = config.name;
37
38
  this.instructions = config.instructions;
38
- this.llm = new LLM({
39
- model: config.model
40
- });
41
- this.model = config.model;
39
+ if (!config.model && !config.llm) {
40
+ throw new Error("Either model or llm is required");
41
+ }
42
+ if (config.llm) {
43
+ this.llm = config.llm;
44
+ } else if (config.model) {
45
+ this.model = config.model;
46
+ this.llm = new LLM({
47
+ model: config.model
48
+ });
49
+ }
42
50
  this.tools = {};
43
51
  this.metrics = {};
44
52
  if (config.tools) {
@@ -244,7 +252,7 @@ var _Agent = class _Agent extends MastraBase {
244
252
  messages: userMessages
245
253
  };
246
254
  }
247
- async saveResponse({ result, threadId, runId, memoryConfig }) {
255
+ async saveResponse({ result, threadId, resourceId, runId, memoryConfig }) {
248
256
  const { response } = result;
249
257
  try {
250
258
  if (response.messages) {
@@ -254,9 +262,11 @@ var _Agent = class _Agent extends MastraBase {
254
262
  const responseMessagesWithoutIncompleteToolCalls = this.sanitizeResponseMessages(ms);
255
263
  const memory = this.getMemory();
256
264
  if (memory) {
257
- this.log(LogLevel.DEBUG, "Saving response to memory", {
265
+ this.logger.debug(`[Agent:${this.name}] - Memory persistence: store=${__privateGet(this, _mastra)?.memory?.constructor.name} threadId=${threadId}`, {
266
+ runId,
267
+ resourceId,
258
268
  threadId,
259
- runId
269
+ memoryStore: __privateGet(this, _mastra)?.memory?.constructor.name
260
270
  });
261
271
  await memory.saveMessages({
262
272
  memoryConfig,
@@ -415,6 +425,12 @@ var _Agent = class _Agent extends MastraBase {
415
425
  let coreMessages = messages;
416
426
  let threadIdToUse = threadId;
417
427
  if (this.getMemory() && resourceId) {
428
+ this.logger.debug(`[Agent:${this.name}] - Memory persistence enabled: store=${__privateGet(this, _mastra)?.memory?.constructor.name}, resourceId=${resourceId}`, {
429
+ runId,
430
+ resourceId,
431
+ threadId: threadIdToUse,
432
+ memoryStore: __privateGet(this, _mastra)?.memory?.constructor.name
433
+ });
418
434
  const preExecuteResult = await this.preExecute({
419
435
  resourceId,
420
436
  runId,
@@ -424,22 +440,31 @@ var _Agent = class _Agent extends MastraBase {
424
440
  });
425
441
  coreMessages = preExecuteResult.coreMessages;
426
442
  threadIdToUse = preExecuteResult.threadIdToUse;
427
- } else {
428
- this.logger.debug(`[Agents:${this.name}] - No memory store or resourceid identifier found. Skipping memory persistence.`, {
429
- runId
430
- });
431
443
  }
432
444
  let convertedTools;
433
445
  if (toolsets && Object.keys(toolsets || {}).length > 0 || this.getMemory() && resourceId || __privateGet(this, _mastra)?.engine) {
446
+ const reasons = [];
447
+ if (toolsets && Object.keys(toolsets || {}).length > 0) {
448
+ reasons.push(`toolsets present (${Object.keys(toolsets || {}).length} tools)`);
449
+ }
450
+ if (this.getMemory() && resourceId) {
451
+ reasons.push("memory and resourceId available");
452
+ }
453
+ if (__privateGet(this, _mastra)?.engine) {
454
+ reasons.push("mastra engine enabled");
455
+ }
456
+ this.logger.debug(`[Agent:${this.name}] - Enhancing tools: ${reasons.join(", ")}`, {
457
+ runId,
458
+ toolsets: toolsets ? Object.keys(toolsets) : undefined,
459
+ hasMemory: !!this.getMemory(),
460
+ hasResourceId: !!resourceId,
461
+ hasEngine: !!__privateGet(this, _mastra)?.engine
462
+ });
434
463
  convertedTools = this.convertTools({
435
464
  toolsets,
436
465
  threadId: threadIdToUse,
437
466
  runId
438
467
  });
439
- } else {
440
- this.logger.debug(`Skipping tool conversion for agent ${this.name}`, {
441
- runId
442
- });
443
468
  }
444
469
  const messageObjects = [
445
470
  systemMessage,
@@ -477,13 +502,10 @@ var _Agent = class _Agent extends MastraBase {
477
502
  });
478
503
  if (this.getMemory() && resourceId) {
479
504
  try {
480
- this.logger.debug(`Saving assistant message in memory for agent ${this.name}`, {
481
- runId: runId2,
482
- threadId: threadId2
483
- });
484
505
  await this.saveResponse({
485
506
  result,
486
507
  threadId: threadId2,
508
+ resourceId,
487
509
  memoryConfig: memoryConfig2,
488
510
  runId: runId2
489
511
  });
@@ -495,11 +517,6 @@ var _Agent = class _Agent extends MastraBase {
495
517
  threadId: threadId2
496
518
  });
497
519
  }
498
- } else {
499
- this.logger.debug(`[Agents:${this.name}] - No memory store or resourceid identifier found. Skipping memory persistence.`, {
500
- runId: runId2,
501
- threadId: threadId2
502
- });
503
520
  }
504
521
  if (Object.keys(this.metrics || {}).length > 0) {
505
522
  const input = messages.map((message) => message.content).join("\n");
@@ -1,4 +1,4 @@
1
- import { Agent } from './chunk-SAXFXAKK.js';
1
+ import { Agent } from './chunk-73XDWPXJ.js';
2
2
  import { __name, __publicField } from './chunk-AJJZUHB4.js';
3
3
  import { CohereClient } from 'cohere-ai';
4
4