@upstash/workflow 0.2.5-agents → 0.2.5-agents-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,6 @@ import { PublishRequest, Client, Receiver, HTTPMethods as HTTPMethods$1 } from '
2
2
  import * as ai from 'ai';
3
3
  import { CoreTool, generateText } from 'ai';
4
4
  import * as _ai_sdk_openai from '@ai-sdk/openai';
5
- import { Tool } from 'langchain/tools';
6
5
 
7
6
  /**
8
7
  * Base class outlining steps. Basically, each step kind (run/sleep/sleepUntil)
@@ -391,53 +390,155 @@ declare class WorkflowApi extends BaseWorkflowApi {
391
390
  get anthropic(): AnthropicAPI;
392
391
  }
393
392
 
393
+ /**
394
+ * An Agent which utilizes the model and tools available to it
395
+ * to achieve a given task
396
+ *
397
+ * @param name Name of the agent
398
+ * @param background Background of the agent
399
+ * @param model LLM model to use
400
+ * @param tools tools available to the agent
401
+ * @param maxSteps number of times the agent can call the LLM at most. If
402
+ * the agent abruptly stops execution after calling tools, you may need
403
+ * to increase maxSteps
404
+ * @param temparature temparature used when calling the LLM
405
+ */
394
406
  declare class Agent {
395
407
  readonly name: AgentParameters["name"];
396
408
  readonly tools: AgentParameters["tools"];
397
409
  readonly maxSteps: AgentParameters["maxSteps"];
398
410
  readonly background: AgentParameters["background"];
399
411
  readonly model: AgentParameters["model"];
400
- constructor({ tools, maxSteps, background, name, model }: AgentParameters);
412
+ readonly temparature: AgentParameters["temparature"];
413
+ constructor({ tools, maxSteps, background, name, model, temparature }: AgentParameters);
414
+ /**
415
+ * Trigger the agent by passing a prompt
416
+ *
417
+ * @param prompt task to assign to the agent
418
+ * @returns Response as `{ text: string }`
419
+ */
401
420
  call({ prompt }: {
402
421
  prompt: string;
403
- }): Promise<ai.GenerateTextResult<Record<string, AISDKTool>, never>>;
422
+ }): Promise<{
423
+ text: string;
424
+ }>;
425
+ /**
426
+ * Convert the agent to a tool which can be used by other agents.
427
+ *
428
+ * @returns the agent as a tool
429
+ */
404
430
  asTool(): AISDKTool;
405
431
  }
406
- type ManagerAgentParameters = {
407
- agents: Agent[];
408
- model: Model;
409
- } & Pick<Partial<AgentParameters>, "name" | "background"> & Pick<AgentParameters, "maxSteps">;
410
- declare class ManagerAgent extends Agent {
411
- agents: ManagerAgentParameters["agents"];
412
- constructor({ maxSteps, background, agents, model, name, }: ManagerAgentParameters);
413
- }
414
432
 
415
433
  type AISDKTool = CoreTool;
416
- type LangchainTool = Tool;
434
+ type LangchainTool = {
435
+ description: string;
436
+ schema: AISDKTool["parameters"];
437
+ invoke: (...params: any[]) => any;
438
+ };
417
439
  type GenerateTextParams = Parameters<typeof generateText>[0];
418
440
  type Model = GenerateTextParams["model"];
419
441
  type AgentParameters<TTool extends AISDKTool | LangchainTool = AISDKTool> = {
442
+ /**
443
+ * number of times the agent can call the LLM at most. If
444
+ * the agent abruptly stops execution after calling tools, you may need
445
+ * to increase maxSteps
446
+ */
420
447
  maxSteps: number;
448
+ /**
449
+ * Background of the agent
450
+ */
421
451
  background: string;
452
+ /**
453
+ * tools available to the agent
454
+ */
422
455
  tools: Record<string, TTool>;
456
+ /**
457
+ * Name of the agent
458
+ */
423
459
  name: string;
460
+ /**
461
+ * LLM model to use
462
+ */
424
463
  model: Model;
464
+ /**
465
+ * temparature used when calling the LLM
466
+ *
467
+ * @default 0.1
468
+ */
469
+ temparature?: number;
425
470
  };
426
471
  type TaskParams = {
472
+ /**
473
+ * task assigned to the agent
474
+ */
427
475
  prompt: string;
428
476
  };
429
477
  type SingleAgentTaskParams = TaskParams & {
478
+ /**
479
+ * agent to perform the task
480
+ */
430
481
  agent: Agent;
431
482
  };
432
483
  type MultiAgentTaskParams = TaskParams & {
484
+ /**
485
+ * Agents which will collaborate to achieve the task
486
+ */
433
487
  agents: Agent[];
488
+ /**
489
+ * number of times the manager agent can call the LLM at most.
490
+ * If the agent abruptly stops execution after calling other agents, you may
491
+ * need to increase maxSteps
492
+ */
434
493
  maxSteps: number;
494
+ /**
495
+ * LLM model to use
496
+ */
435
497
  model: Model;
498
+ /**
499
+ * Background of the agent. If not passed, default will be used.
500
+ */
436
501
  background?: string;
437
502
  };
438
503
 
504
+ /**
505
+ * creates an AI SDK openai client with a custom
506
+ * fetch implementation which uses context.call.
507
+ *
508
+ * @param context workflow context
509
+ * @returns ai sdk openai
510
+ */
439
511
  declare const createWorkflowOpenAI: (context: WorkflowContext) => _ai_sdk_openai.OpenAIProvider;
440
512
 
513
+ /**
514
+ * An Agent Task
515
+ *
516
+ * Can be run to make the agent(s) complete it using the tools available to them
517
+ *
518
+ * Can consist of a single agent or multiple agents.
519
+ *
520
+ * Single agent:
521
+ *
522
+ * ```ts
523
+ * const task = context.agents.task({
524
+ * agent: researcherAgent,
525
+ * prompt: "Tell me about 5 topics in advanced physics.",
526
+ * });
527
+ * const { text } = await task.run();
528
+ * ```
529
+ *
530
+ * Multi Agent:
531
+ *
532
+ * ```ts
533
+ * const task = context.agents.task({
534
+ * model,
535
+ * maxSteps: 3,
536
+ * agents: [researcherAgent, mathAgent],
537
+ * prompt: "Tell me about 3 cities in Japan and calculate the sum of their populations",
538
+ * });
539
+ * const { text } = await task.run();
540
+ * ```
541
+ */
441
542
  declare class Task {
442
543
  private readonly context;
443
544
  private readonly taskParameters;
@@ -445,19 +546,80 @@ declare class Task {
445
546
  context: WorkflowContext;
446
547
  taskParameters: SingleAgentTaskParams | MultiAgentTaskParams;
447
548
  });
549
+ /**
550
+ * Run the agents to complete the task
551
+ *
552
+ * @returns Result of the task as { text: string }
553
+ */
448
554
  run(): Promise<{
449
555
  text: string;
450
556
  }>;
451
557
  }
452
558
 
559
+ /**
560
+ * Workflow Agents API
561
+ *
562
+ * https://upstash.com/docs/workflow/agents/overview
563
+ *
564
+ * Allows defining agents which can complete a given task
565
+ * using tools available to them.
566
+ */
453
567
  declare class WorkflowAgents {
454
568
  private context;
455
569
  constructor({ context }: {
456
570
  context: WorkflowContext;
457
571
  });
572
+ /**
573
+ * Defines an agent
574
+ *
575
+ * ```ts
576
+ * const researcherAgent = context.agents.agent({
577
+ * model,
578
+ * name: 'academic',
579
+ * maxSteps: 2,
580
+ * tools: {
581
+ * wikiTool: new WikipediaQueryRun({
582
+ * topKResults: 1,
583
+ * maxDocContentLength: 500,
584
+ * })
585
+ * },
586
+ * background:
587
+ * 'You are researcher agent with access to Wikipedia. ' +
588
+ * 'Utilize Wikipedia as much as possible for correct information',
589
+ * });
590
+ * ```
591
+ *
592
+ * @param params agent parameters
593
+ * @returns
594
+ */
458
595
  agent(params: AgentParameters<AISDKTool | LangchainTool>): Agent;
596
+ /**
597
+ * Defines a task to be executed by a single agent
598
+ *
599
+ * ```ts
600
+ * const task = context.agents.task({
601
+ * agent: researcherAgent,
602
+ * prompt: "Tell me about 5 topics in advanced physics.",
603
+ * });
604
+ * ```
605
+ */
459
606
  task(taskParameters: SingleAgentTaskParams): Task;
607
+ /**
608
+ * Defines a task to be executed by multiple collaborating agents
609
+ *
610
+ * ```ts
611
+ * const task = context.agents.task({
612
+ * model,
613
+ * maxSteps: 3,
614
+ * agents: [researcherAgent, mathAgent],
615
+ * prompt: "Tell me about 3 cities in Japan and calculate the sum of their populations",
616
+ * });
617
+ * ```
618
+ */
460
619
  task(taskParameters: MultiAgentTaskParams): Task;
620
+ /**
621
+ * creates an openai model for agents
622
+ */
461
623
  openai(...params: Parameters<ReturnType<typeof createWorkflowOpenAI>>): ai.LanguageModelV1;
462
624
  }
463
625
 
@@ -1132,4 +1294,4 @@ type HeaderParams = {
1132
1294
  callTimeout?: never;
1133
1295
  });
1134
1296
 
1135
- export { type AsyncStepFunction as A, type CallResponse as C, type Duration as D, type FinishCondition as F, type HeaderParams as H, type LogLevel as L, ManagerAgent as M, type NotifyResponse as N, type ParallelCallState as P, type RouteFunction as R, type Step as S, type Telemetry as T, type WorkflowServeOptions as W, type Waiter as a, WorkflowContext as b, type WorkflowClient as c, type WorkflowReceiver as d, StepTypes as e, type StepType as f, type RawStep as g, type SyncStepFunction as h, type StepFunction as i, type PublicServeOptions as j, type FailureFunctionPayload as k, type RequiredExceptFields as l, type WaitRequest as m, type WaitStepResponse as n, type NotifyStepResponse as o, type WaitEventOptions as p, type CallSettings as q, type WorkflowLoggerOptions as r, WorkflowLogger as s, WorkflowAgents as t, createWorkflowOpenAI as u, Agent as v };
1297
+ export { type AsyncStepFunction as A, type CallResponse as C, type Duration as D, type FinishCondition as F, type HeaderParams as H, type LogLevel as L, type NotifyResponse as N, type ParallelCallState as P, type RouteFunction as R, type Step as S, type Telemetry as T, type WorkflowServeOptions as W, type Waiter as a, WorkflowContext as b, type WorkflowClient as c, type WorkflowReceiver as d, StepTypes as e, type StepType as f, type RawStep as g, type SyncStepFunction as h, type StepFunction as i, type PublicServeOptions as j, type FailureFunctionPayload as k, type RequiredExceptFields as l, type WaitRequest as m, type WaitStepResponse as n, type NotifyStepResponse as o, type WaitEventOptions as p, type CallSettings as q, type WorkflowLoggerOptions as r, WorkflowLogger as s };
@@ -2,7 +2,6 @@ import { PublishRequest, Client, Receiver, HTTPMethods as HTTPMethods$1 } from '
2
2
  import * as ai from 'ai';
3
3
  import { CoreTool, generateText } from 'ai';
4
4
  import * as _ai_sdk_openai from '@ai-sdk/openai';
5
- import { Tool } from 'langchain/tools';
6
5
 
7
6
  /**
8
7
  * Base class outlining steps. Basically, each step kind (run/sleep/sleepUntil)
@@ -391,53 +390,155 @@ declare class WorkflowApi extends BaseWorkflowApi {
391
390
  get anthropic(): AnthropicAPI;
392
391
  }
393
392
 
393
+ /**
394
+ * An Agent which utilizes the model and tools available to it
395
+ * to achieve a given task
396
+ *
397
+ * @param name Name of the agent
398
+ * @param background Background of the agent
399
+ * @param model LLM model to use
400
+ * @param tools tools available to the agent
401
+ * @param maxSteps number of times the agent can call the LLM at most. If
402
+ * the agent abruptly stops execution after calling tools, you may need
403
+ * to increase maxSteps
404
+ * @param temparature temparature used when calling the LLM
405
+ */
394
406
  declare class Agent {
395
407
  readonly name: AgentParameters["name"];
396
408
  readonly tools: AgentParameters["tools"];
397
409
  readonly maxSteps: AgentParameters["maxSteps"];
398
410
  readonly background: AgentParameters["background"];
399
411
  readonly model: AgentParameters["model"];
400
- constructor({ tools, maxSteps, background, name, model }: AgentParameters);
412
+ readonly temparature: AgentParameters["temparature"];
413
+ constructor({ tools, maxSteps, background, name, model, temparature }: AgentParameters);
414
+ /**
415
+ * Trigger the agent by passing a prompt
416
+ *
417
+ * @param prompt task to assign to the agent
418
+ * @returns Response as `{ text: string }`
419
+ */
401
420
  call({ prompt }: {
402
421
  prompt: string;
403
- }): Promise<ai.GenerateTextResult<Record<string, AISDKTool>, never>>;
422
+ }): Promise<{
423
+ text: string;
424
+ }>;
425
+ /**
426
+ * Convert the agent to a tool which can be used by other agents.
427
+ *
428
+ * @returns the agent as a tool
429
+ */
404
430
  asTool(): AISDKTool;
405
431
  }
406
- type ManagerAgentParameters = {
407
- agents: Agent[];
408
- model: Model;
409
- } & Pick<Partial<AgentParameters>, "name" | "background"> & Pick<AgentParameters, "maxSteps">;
410
- declare class ManagerAgent extends Agent {
411
- agents: ManagerAgentParameters["agents"];
412
- constructor({ maxSteps, background, agents, model, name, }: ManagerAgentParameters);
413
- }
414
432
 
415
433
  type AISDKTool = CoreTool;
416
- type LangchainTool = Tool;
434
+ type LangchainTool = {
435
+ description: string;
436
+ schema: AISDKTool["parameters"];
437
+ invoke: (...params: any[]) => any;
438
+ };
417
439
  type GenerateTextParams = Parameters<typeof generateText>[0];
418
440
  type Model = GenerateTextParams["model"];
419
441
  type AgentParameters<TTool extends AISDKTool | LangchainTool = AISDKTool> = {
442
+ /**
443
+ * number of times the agent can call the LLM at most. If
444
+ * the agent abruptly stops execution after calling tools, you may need
445
+ * to increase maxSteps
446
+ */
420
447
  maxSteps: number;
448
+ /**
449
+ * Background of the agent
450
+ */
421
451
  background: string;
452
+ /**
453
+ * tools available to the agent
454
+ */
422
455
  tools: Record<string, TTool>;
456
+ /**
457
+ * Name of the agent
458
+ */
423
459
  name: string;
460
+ /**
461
+ * LLM model to use
462
+ */
424
463
  model: Model;
464
+ /**
465
+ * temparature used when calling the LLM
466
+ *
467
+ * @default 0.1
468
+ */
469
+ temparature?: number;
425
470
  };
426
471
  type TaskParams = {
472
+ /**
473
+ * task assigned to the agent
474
+ */
427
475
  prompt: string;
428
476
  };
429
477
  type SingleAgentTaskParams = TaskParams & {
478
+ /**
479
+ * agent to perform the task
480
+ */
430
481
  agent: Agent;
431
482
  };
432
483
  type MultiAgentTaskParams = TaskParams & {
484
+ /**
485
+ * Agents which will collaborate to achieve the task
486
+ */
433
487
  agents: Agent[];
488
+ /**
489
+ * number of times the manager agent can call the LLM at most.
490
+ * If the agent abruptly stops execution after calling other agents, you may
491
+ * need to increase maxSteps
492
+ */
434
493
  maxSteps: number;
494
+ /**
495
+ * LLM model to use
496
+ */
435
497
  model: Model;
498
+ /**
499
+ * Background of the agent. If not passed, default will be used.
500
+ */
436
501
  background?: string;
437
502
  };
438
503
 
504
+ /**
505
+ * creates an AI SDK openai client with a custom
506
+ * fetch implementation which uses context.call.
507
+ *
508
+ * @param context workflow context
509
+ * @returns ai sdk openai
510
+ */
439
511
  declare const createWorkflowOpenAI: (context: WorkflowContext) => _ai_sdk_openai.OpenAIProvider;
440
512
 
513
+ /**
514
+ * An Agent Task
515
+ *
516
+ * Can be run to make the agent(s) complete it using the tools available to them
517
+ *
518
+ * Can consist of a single agent or multiple agents.
519
+ *
520
+ * Single agent:
521
+ *
522
+ * ```ts
523
+ * const task = context.agents.task({
524
+ * agent: researcherAgent,
525
+ * prompt: "Tell me about 5 topics in advanced physics.",
526
+ * });
527
+ * const { text } = await task.run();
528
+ * ```
529
+ *
530
+ * Multi Agent:
531
+ *
532
+ * ```ts
533
+ * const task = context.agents.task({
534
+ * model,
535
+ * maxSteps: 3,
536
+ * agents: [researcherAgent, mathAgent],
537
+ * prompt: "Tell me about 3 cities in Japan and calculate the sum of their populations",
538
+ * });
539
+ * const { text } = await task.run();
540
+ * ```
541
+ */
441
542
  declare class Task {
442
543
  private readonly context;
443
544
  private readonly taskParameters;
@@ -445,19 +546,80 @@ declare class Task {
445
546
  context: WorkflowContext;
446
547
  taskParameters: SingleAgentTaskParams | MultiAgentTaskParams;
447
548
  });
549
+ /**
550
+ * Run the agents to complete the task
551
+ *
552
+ * @returns Result of the task as { text: string }
553
+ */
448
554
  run(): Promise<{
449
555
  text: string;
450
556
  }>;
451
557
  }
452
558
 
559
+ /**
560
+ * Workflow Agents API
561
+ *
562
+ * https://upstash.com/docs/workflow/agents/overview
563
+ *
564
+ * Allows defining agents which can complete a given task
565
+ * using tools available to them.
566
+ */
453
567
  declare class WorkflowAgents {
454
568
  private context;
455
569
  constructor({ context }: {
456
570
  context: WorkflowContext;
457
571
  });
572
+ /**
573
+ * Defines an agent
574
+ *
575
+ * ```ts
576
+ * const researcherAgent = context.agents.agent({
577
+ * model,
578
+ * name: 'academic',
579
+ * maxSteps: 2,
580
+ * tools: {
581
+ * wikiTool: new WikipediaQueryRun({
582
+ * topKResults: 1,
583
+ * maxDocContentLength: 500,
584
+ * })
585
+ * },
586
+ * background:
587
+ * 'You are researcher agent with access to Wikipedia. ' +
588
+ * 'Utilize Wikipedia as much as possible for correct information',
589
+ * });
590
+ * ```
591
+ *
592
+ * @param params agent parameters
593
+ * @returns
594
+ */
458
595
  agent(params: AgentParameters<AISDKTool | LangchainTool>): Agent;
596
+ /**
597
+ * Defines a task to be executed by a single agent
598
+ *
599
+ * ```ts
600
+ * const task = context.agents.task({
601
+ * agent: researcherAgent,
602
+ * prompt: "Tell me about 5 topics in advanced physics.",
603
+ * });
604
+ * ```
605
+ */
459
606
  task(taskParameters: SingleAgentTaskParams): Task;
607
+ /**
608
+ * Defines a task to be executed by multiple collaborating agents
609
+ *
610
+ * ```ts
611
+ * const task = context.agents.task({
612
+ * model,
613
+ * maxSteps: 3,
614
+ * agents: [researcherAgent, mathAgent],
615
+ * prompt: "Tell me about 3 cities in Japan and calculate the sum of their populations",
616
+ * });
617
+ * ```
618
+ */
460
619
  task(taskParameters: MultiAgentTaskParams): Task;
620
+ /**
621
+ * creates an openai model for agents
622
+ */
461
623
  openai(...params: Parameters<ReturnType<typeof createWorkflowOpenAI>>): ai.LanguageModelV1;
462
624
  }
463
625
 
@@ -1132,4 +1294,4 @@ type HeaderParams = {
1132
1294
  callTimeout?: never;
1133
1295
  });
1134
1296
 
1135
- export { type AsyncStepFunction as A, type CallResponse as C, type Duration as D, type FinishCondition as F, type HeaderParams as H, type LogLevel as L, ManagerAgent as M, type NotifyResponse as N, type ParallelCallState as P, type RouteFunction as R, type Step as S, type Telemetry as T, type WorkflowServeOptions as W, type Waiter as a, WorkflowContext as b, type WorkflowClient as c, type WorkflowReceiver as d, StepTypes as e, type StepType as f, type RawStep as g, type SyncStepFunction as h, type StepFunction as i, type PublicServeOptions as j, type FailureFunctionPayload as k, type RequiredExceptFields as l, type WaitRequest as m, type WaitStepResponse as n, type NotifyStepResponse as o, type WaitEventOptions as p, type CallSettings as q, type WorkflowLoggerOptions as r, WorkflowLogger as s, WorkflowAgents as t, createWorkflowOpenAI as u, Agent as v };
1297
+ export { type AsyncStepFunction as A, type CallResponse as C, type Duration as D, type FinishCondition as F, type HeaderParams as H, type LogLevel as L, type NotifyResponse as N, type ParallelCallState as P, type RouteFunction as R, type Step as S, type Telemetry as T, type WorkflowServeOptions as W, type Waiter as a, WorkflowContext as b, type WorkflowClient as c, type WorkflowReceiver as d, StepTypes as e, type StepType as f, type RawStep as g, type SyncStepFunction as h, type StepFunction as i, type PublicServeOptions as j, type FailureFunctionPayload as k, type RequiredExceptFields as l, type WaitRequest as m, type WaitStepResponse as n, type NotifyStepResponse as o, type WaitEventOptions as p, type CallSettings as q, type WorkflowLoggerOptions as r, WorkflowLogger as s };
package/agents.d.mts DELETED
@@ -1,5 +0,0 @@
1
- import 'ai';
2
- export { v as Agent, M as ManagerAgent, t as WorkflowAgents, u as createWorkflowOpenAI } from './types-BEyIoCRe.mjs';
3
- import '@upstash/qstash';
4
- import '@ai-sdk/openai';
5
- import 'langchain/tools';
package/agents.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import 'ai';
2
- export { v as Agent, M as ManagerAgent, t as WorkflowAgents, u as createWorkflowOpenAI } from './types-BEyIoCRe.js';
3
- import '@upstash/qstash';
4
- import '@ai-sdk/openai';
5
- import 'langchain/tools';