illuma-agents 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/cjs/common/enum.cjs +163 -0
- package/dist/cjs/common/enum.cjs.map +1 -0
- package/dist/cjs/events.cjs +143 -0
- package/dist/cjs/events.cjs.map +1 -0
- package/dist/cjs/graphs/Graph.cjs +581 -0
- package/dist/cjs/graphs/Graph.cjs.map +1 -0
- package/dist/cjs/instrumentation.cjs +21 -0
- package/dist/cjs/instrumentation.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/index.cjs +292 -0
- package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/types.cjs +50 -0
- package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +553 -0
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
- package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
- package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
- package/dist/cjs/llm/fake.cjs +97 -0
- package/dist/cjs/llm/fake.cjs.map +1 -0
- package/dist/cjs/llm/google/index.cjs +147 -0
- package/dist/cjs/llm/google/index.cjs.map +1 -0
- package/dist/cjs/llm/google/utils/common.cjs +490 -0
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
- package/dist/cjs/llm/ollama/index.cjs +70 -0
- package/dist/cjs/llm/ollama/index.cjs.map +1 -0
- package/dist/cjs/llm/ollama/utils.cjs +158 -0
- package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +613 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -0
- package/dist/cjs/llm/openai/utils/index.cjs +677 -0
- package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
- package/dist/cjs/llm/openrouter/index.cjs +29 -0
- package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
- package/dist/cjs/llm/providers.cjs +47 -0
- package/dist/cjs/llm/providers.cjs.map +1 -0
- package/dist/cjs/llm/text.cjs +69 -0
- package/dist/cjs/llm/text.cjs.map +1 -0
- package/dist/cjs/llm/vertexai/index.cjs +330 -0
- package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
- package/dist/cjs/main.cjs +127 -0
- package/dist/cjs/main.cjs.map +1 -0
- package/dist/cjs/messages/core.cjs +359 -0
- package/dist/cjs/messages/core.cjs.map +1 -0
- package/dist/cjs/messages/format.cjs +455 -0
- package/dist/cjs/messages/format.cjs.map +1 -0
- package/dist/cjs/messages/ids.cjs +23 -0
- package/dist/cjs/messages/ids.cjs.map +1 -0
- package/dist/cjs/messages/prune.cjs +398 -0
- package/dist/cjs/messages/prune.cjs.map +1 -0
- package/dist/cjs/run.cjs +264 -0
- package/dist/cjs/run.cjs.map +1 -0
- package/dist/cjs/splitStream.cjs +210 -0
- package/dist/cjs/splitStream.cjs.map +1 -0
- package/dist/cjs/stream.cjs +504 -0
- package/dist/cjs/stream.cjs.map +1 -0
- package/dist/cjs/tools/CodeExecutor.cjs +192 -0
- package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
- package/dist/cjs/tools/ToolNode.cjs +125 -0
- package/dist/cjs/tools/ToolNode.cjs.map +1 -0
- package/dist/cjs/tools/handlers.cjs +250 -0
- package/dist/cjs/tools/handlers.cjs.map +1 -0
- package/dist/cjs/tools/search/anthropic.cjs +40 -0
- package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
- package/dist/cjs/tools/search/content.cjs +140 -0
- package/dist/cjs/tools/search/content.cjs.map +1 -0
- package/dist/cjs/tools/search/firecrawl.cjs +179 -0
- package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
- package/dist/cjs/tools/search/format.cjs +203 -0
- package/dist/cjs/tools/search/format.cjs.map +1 -0
- package/dist/cjs/tools/search/highlights.cjs +245 -0
- package/dist/cjs/tools/search/highlights.cjs.map +1 -0
- package/dist/cjs/tools/search/rerankers.cjs +174 -0
- package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
- package/dist/cjs/tools/search/schema.cjs +70 -0
- package/dist/cjs/tools/search/schema.cjs.map +1 -0
- package/dist/cjs/tools/search/search.cjs +561 -0
- package/dist/cjs/tools/search/search.cjs.map +1 -0
- package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
- package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
- package/dist/cjs/tools/search/tool.cjs +331 -0
- package/dist/cjs/tools/search/tool.cjs.map +1 -0
- package/dist/cjs/tools/search/utils.cjs +66 -0
- package/dist/cjs/tools/search/utils.cjs.map +1 -0
- package/dist/cjs/utils/graph.cjs +16 -0
- package/dist/cjs/utils/graph.cjs.map +1 -0
- package/dist/cjs/utils/llm.cjs +28 -0
- package/dist/cjs/utils/llm.cjs.map +1 -0
- package/dist/cjs/utils/misc.cjs +56 -0
- package/dist/cjs/utils/misc.cjs.map +1 -0
- package/dist/cjs/utils/run.cjs +69 -0
- package/dist/cjs/utils/run.cjs.map +1 -0
- package/dist/cjs/utils/title.cjs +111 -0
- package/dist/cjs/utils/title.cjs.map +1 -0
- package/dist/cjs/utils/tokens.cjs +65 -0
- package/dist/cjs/utils/tokens.cjs.map +1 -0
- package/dist/esm/common/enum.mjs +163 -0
- package/dist/esm/common/enum.mjs.map +1 -0
- package/dist/esm/events.mjs +135 -0
- package/dist/esm/events.mjs.map +1 -0
- package/dist/esm/graphs/Graph.mjs +578 -0
- package/dist/esm/graphs/Graph.mjs.map +1 -0
- package/dist/esm/instrumentation.mjs +19 -0
- package/dist/esm/instrumentation.mjs.map +1 -0
- package/dist/esm/llm/anthropic/index.mjs +290 -0
- package/dist/esm/llm/anthropic/index.mjs.map +1 -0
- package/dist/esm/llm/anthropic/types.mjs +48 -0
- package/dist/esm/llm/anthropic/types.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +550 -0
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
- package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
- package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
- package/dist/esm/llm/fake.mjs +94 -0
- package/dist/esm/llm/fake.mjs.map +1 -0
- package/dist/esm/llm/google/index.mjs +145 -0
- package/dist/esm/llm/google/index.mjs.map +1 -0
- package/dist/esm/llm/google/utils/common.mjs +484 -0
- package/dist/esm/llm/google/utils/common.mjs.map +1 -0
- package/dist/esm/llm/ollama/index.mjs +68 -0
- package/dist/esm/llm/ollama/index.mjs.map +1 -0
- package/dist/esm/llm/ollama/utils.mjs +155 -0
- package/dist/esm/llm/ollama/utils.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +604 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -0
- package/dist/esm/llm/openai/utils/index.mjs +671 -0
- package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
- package/dist/esm/llm/openrouter/index.mjs +27 -0
- package/dist/esm/llm/openrouter/index.mjs.map +1 -0
- package/dist/esm/llm/providers.mjs +43 -0
- package/dist/esm/llm/providers.mjs.map +1 -0
- package/dist/esm/llm/text.mjs +67 -0
- package/dist/esm/llm/text.mjs.map +1 -0
- package/dist/esm/llm/vertexai/index.mjs +328 -0
- package/dist/esm/llm/vertexai/index.mjs.map +1 -0
- package/dist/esm/main.mjs +20 -0
- package/dist/esm/main.mjs.map +1 -0
- package/dist/esm/messages/core.mjs +351 -0
- package/dist/esm/messages/core.mjs.map +1 -0
- package/dist/esm/messages/format.mjs +447 -0
- package/dist/esm/messages/format.mjs.map +1 -0
- package/dist/esm/messages/ids.mjs +21 -0
- package/dist/esm/messages/ids.mjs.map +1 -0
- package/dist/esm/messages/prune.mjs +393 -0
- package/dist/esm/messages/prune.mjs.map +1 -0
- package/dist/esm/run.mjs +261 -0
- package/dist/esm/run.mjs.map +1 -0
- package/dist/esm/splitStream.mjs +207 -0
- package/dist/esm/splitStream.mjs.map +1 -0
- package/dist/esm/stream.mjs +500 -0
- package/dist/esm/stream.mjs.map +1 -0
- package/dist/esm/tools/CodeExecutor.mjs +188 -0
- package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
- package/dist/esm/tools/ToolNode.mjs +122 -0
- package/dist/esm/tools/ToolNode.mjs.map +1 -0
- package/dist/esm/tools/handlers.mjs +245 -0
- package/dist/esm/tools/handlers.mjs.map +1 -0
- package/dist/esm/tools/search/anthropic.mjs +37 -0
- package/dist/esm/tools/search/anthropic.mjs.map +1 -0
- package/dist/esm/tools/search/content.mjs +119 -0
- package/dist/esm/tools/search/content.mjs.map +1 -0
- package/dist/esm/tools/search/firecrawl.mjs +176 -0
- package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
- package/dist/esm/tools/search/format.mjs +201 -0
- package/dist/esm/tools/search/format.mjs.map +1 -0
- package/dist/esm/tools/search/highlights.mjs +243 -0
- package/dist/esm/tools/search/highlights.mjs.map +1 -0
- package/dist/esm/tools/search/rerankers.mjs +168 -0
- package/dist/esm/tools/search/rerankers.mjs.map +1 -0
- package/dist/esm/tools/search/schema.mjs +61 -0
- package/dist/esm/tools/search/schema.mjs.map +1 -0
- package/dist/esm/tools/search/search.mjs +558 -0
- package/dist/esm/tools/search/search.mjs.map +1 -0
- package/dist/esm/tools/search/serper-scraper.mjs +129 -0
- package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
- package/dist/esm/tools/search/tool.mjs +329 -0
- package/dist/esm/tools/search/tool.mjs.map +1 -0
- package/dist/esm/tools/search/utils.mjs +61 -0
- package/dist/esm/tools/search/utils.mjs.map +1 -0
- package/dist/esm/utils/graph.mjs +13 -0
- package/dist/esm/utils/graph.mjs.map +1 -0
- package/dist/esm/utils/llm.mjs +25 -0
- package/dist/esm/utils/llm.mjs.map +1 -0
- package/dist/esm/utils/misc.mjs +53 -0
- package/dist/esm/utils/misc.mjs.map +1 -0
- package/dist/esm/utils/run.mjs +66 -0
- package/dist/esm/utils/run.mjs.map +1 -0
- package/dist/esm/utils/title.mjs +108 -0
- package/dist/esm/utils/title.mjs.map +1 -0
- package/dist/esm/utils/tokens.mjs +62 -0
- package/dist/esm/utils/tokens.mjs.map +1 -0
- package/dist/types/common/enum.d.ts +128 -0
- package/dist/types/common/index.d.ts +1 -0
- package/dist/types/events.d.ts +29 -0
- package/dist/types/graphs/Graph.d.ts +122 -0
- package/dist/types/graphs/index.d.ts +1 -0
- package/dist/types/index.d.ts +13 -0
- package/dist/types/instrumentation.d.ts +1 -0
- package/dist/types/llm/anthropic/index.d.ts +39 -0
- package/dist/types/llm/anthropic/types.d.ts +37 -0
- package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
- package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
- package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
- package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
- package/dist/types/llm/fake.d.ts +31 -0
- package/dist/types/llm/google/index.d.ts +14 -0
- package/dist/types/llm/google/types.d.ts +32 -0
- package/dist/types/llm/google/utils/common.d.ts +19 -0
- package/dist/types/llm/google/utils/tools.d.ts +10 -0
- package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
- package/dist/types/llm/ollama/index.d.ts +8 -0
- package/dist/types/llm/ollama/utils.d.ts +7 -0
- package/dist/types/llm/openai/index.d.ts +103 -0
- package/dist/types/llm/openai/types.d.ts +10 -0
- package/dist/types/llm/openai/utils/index.d.ts +20 -0
- package/dist/types/llm/openrouter/index.d.ts +12 -0
- package/dist/types/llm/providers.d.ts +5 -0
- package/dist/types/llm/text.d.ts +21 -0
- package/dist/types/llm/vertexai/index.d.ts +293 -0
- package/dist/types/messages/core.d.ts +14 -0
- package/dist/types/messages/format.d.ts +113 -0
- package/dist/types/messages/ids.d.ts +3 -0
- package/dist/types/messages/index.d.ts +4 -0
- package/dist/types/messages/prune.d.ts +51 -0
- package/dist/types/mockStream.d.ts +32 -0
- package/dist/types/prompts/collab.d.ts +1 -0
- package/dist/types/prompts/index.d.ts +2 -0
- package/dist/types/prompts/taskmanager.d.ts +41 -0
- package/dist/types/run.d.ts +30 -0
- package/dist/types/scripts/abort.d.ts +1 -0
- package/dist/types/scripts/ant_web_search.d.ts +1 -0
- package/dist/types/scripts/args.d.ts +7 -0
- package/dist/types/scripts/caching.d.ts +1 -0
- package/dist/types/scripts/cli.d.ts +1 -0
- package/dist/types/scripts/cli2.d.ts +1 -0
- package/dist/types/scripts/cli3.d.ts +1 -0
- package/dist/types/scripts/cli4.d.ts +1 -0
- package/dist/types/scripts/cli5.d.ts +1 -0
- package/dist/types/scripts/code_exec.d.ts +1 -0
- package/dist/types/scripts/code_exec_files.d.ts +1 -0
- package/dist/types/scripts/code_exec_simple.d.ts +1 -0
- package/dist/types/scripts/content.d.ts +1 -0
- package/dist/types/scripts/empty_input.d.ts +1 -0
- package/dist/types/scripts/image.d.ts +1 -0
- package/dist/types/scripts/memory.d.ts +1 -0
- package/dist/types/scripts/search.d.ts +1 -0
- package/dist/types/scripts/simple.d.ts +1 -0
- package/dist/types/scripts/stream.d.ts +1 -0
- package/dist/types/scripts/thinking.d.ts +1 -0
- package/dist/types/scripts/tools.d.ts +1 -0
- package/dist/types/specs/spec.utils.d.ts +1 -0
- package/dist/types/splitStream.d.ts +37 -0
- package/dist/types/stream.d.ts +14 -0
- package/dist/types/tools/CodeExecutor.d.ts +23 -0
- package/dist/types/tools/ToolNode.d.ts +22 -0
- package/dist/types/tools/example.d.ts +78 -0
- package/dist/types/tools/handlers.d.ts +19 -0
- package/dist/types/tools/search/anthropic.d.ts +16 -0
- package/dist/types/tools/search/content.d.ts +4 -0
- package/dist/types/tools/search/firecrawl.d.ts +54 -0
- package/dist/types/tools/search/format.d.ts +5 -0
- package/dist/types/tools/search/highlights.d.ts +13 -0
- package/dist/types/tools/search/index.d.ts +2 -0
- package/dist/types/tools/search/rerankers.d.ts +38 -0
- package/dist/types/tools/search/schema.d.ts +16 -0
- package/dist/types/tools/search/search.d.ts +8 -0
- package/dist/types/tools/search/serper-scraper.d.ts +59 -0
- package/dist/types/tools/search/test.d.ts +1 -0
- package/dist/types/tools/search/tool.d.ts +54 -0
- package/dist/types/tools/search/types.d.ts +591 -0
- package/dist/types/tools/search/utils.d.ts +10 -0
- package/dist/types/types/graph.d.ts +138 -0
- package/dist/types/types/index.d.ts +5 -0
- package/dist/types/types/llm.d.ts +102 -0
- package/dist/types/types/run.d.ts +74 -0
- package/dist/types/types/stream.d.ts +293 -0
- package/dist/types/types/tools.d.ts +61 -0
- package/dist/types/utils/graph.d.ts +2 -0
- package/dist/types/utils/index.d.ts +5 -0
- package/dist/types/utils/llm.d.ts +3 -0
- package/dist/types/utils/llmConfig.d.ts +3 -0
- package/dist/types/utils/logging.d.ts +1 -0
- package/dist/types/utils/misc.d.ts +7 -0
- package/dist/types/utils/run.d.ts +27 -0
- package/dist/types/utils/title.d.ts +4 -0
- package/dist/types/utils/tokens.d.ts +3 -0
- package/package.json +145 -0
- package/src/common/enum.ts +176 -0
- package/src/common/index.ts +2 -0
- package/src/events.ts +191 -0
- package/src/graphs/Graph.ts +846 -0
- package/src/graphs/index.ts +1 -0
- package/src/index.ts +24 -0
- package/src/instrumentation.ts +22 -0
- package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
- package/src/llm/anthropic/index.ts +413 -0
- package/src/llm/anthropic/llm.spec.ts +1442 -0
- package/src/llm/anthropic/types.ts +140 -0
- package/src/llm/anthropic/utils/message_inputs.ts +660 -0
- package/src/llm/anthropic/utils/message_outputs.ts +289 -0
- package/src/llm/anthropic/utils/output_parsers.ts +133 -0
- package/src/llm/anthropic/utils/tools.ts +29 -0
- package/src/llm/fake.ts +133 -0
- package/src/llm/google/index.ts +222 -0
- package/src/llm/google/types.ts +43 -0
- package/src/llm/google/utils/common.ts +660 -0
- package/src/llm/google/utils/tools.ts +160 -0
- package/src/llm/google/utils/zod_to_genai_parameters.ts +88 -0
- package/src/llm/ollama/index.ts +92 -0
- package/src/llm/ollama/utils.ts +193 -0
- package/src/llm/openai/index.ts +853 -0
- package/src/llm/openai/types.ts +24 -0
- package/src/llm/openai/utils/index.ts +918 -0
- package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
- package/src/llm/openrouter/index.ts +60 -0
- package/src/llm/providers.ts +57 -0
- package/src/llm/text.ts +94 -0
- package/src/llm/vertexai/index.ts +360 -0
- package/src/messages/core.ts +463 -0
- package/src/messages/format.ts +625 -0
- package/src/messages/formatAgentMessages.test.ts +917 -0
- package/src/messages/formatAgentMessages.tools.test.ts +400 -0
- package/src/messages/formatMessage.test.ts +693 -0
- package/src/messages/ids.ts +26 -0
- package/src/messages/index.ts +4 -0
- package/src/messages/prune.ts +567 -0
- package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
- package/src/mockStream.ts +99 -0
- package/src/prompts/collab.ts +6 -0
- package/src/prompts/index.ts +2 -0
- package/src/prompts/taskmanager.ts +61 -0
- package/src/proto/CollabGraph.ts +269 -0
- package/src/proto/TaskManager.ts +243 -0
- package/src/proto/collab.ts +200 -0
- package/src/proto/collab_design.ts +184 -0
- package/src/proto/collab_design_v2.ts +224 -0
- package/src/proto/collab_design_v3.ts +255 -0
- package/src/proto/collab_design_v4.ts +220 -0
- package/src/proto/collab_design_v5.ts +251 -0
- package/src/proto/collab_graph.ts +181 -0
- package/src/proto/collab_original.ts +123 -0
- package/src/proto/example.ts +93 -0
- package/src/proto/example_new.ts +68 -0
- package/src/proto/example_old.ts +201 -0
- package/src/proto/example_test.ts +152 -0
- package/src/proto/example_test_anthropic.ts +100 -0
- package/src/proto/log_stream.ts +202 -0
- package/src/proto/main_collab_community_event.ts +133 -0
- package/src/proto/main_collab_design_v2.ts +96 -0
- package/src/proto/main_collab_design_v4.ts +100 -0
- package/src/proto/main_collab_design_v5.ts +135 -0
- package/src/proto/main_collab_global_analysis.ts +122 -0
- package/src/proto/main_collab_hackathon_event.ts +153 -0
- package/src/proto/main_collab_space_mission.ts +153 -0
- package/src/proto/main_philosophy.ts +210 -0
- package/src/proto/original_script.ts +126 -0
- package/src/proto/standard.ts +100 -0
- package/src/proto/stream.ts +56 -0
- package/src/proto/tasks.ts +118 -0
- package/src/proto/tools/global_analysis_tools.ts +86 -0
- package/src/proto/tools/space_mission_tools.ts +60 -0
- package/src/proto/vertexai.ts +54 -0
- package/src/run.ts +381 -0
- package/src/scripts/abort.ts +138 -0
- package/src/scripts/ant_web_search.ts +158 -0
- package/src/scripts/args.ts +48 -0
- package/src/scripts/caching.ts +124 -0
- package/src/scripts/cli.ts +167 -0
- package/src/scripts/cli2.ts +125 -0
- package/src/scripts/cli3.ts +178 -0
- package/src/scripts/cli4.ts +184 -0
- package/src/scripts/cli5.ts +184 -0
- package/src/scripts/code_exec.ts +214 -0
- package/src/scripts/code_exec_files.ts +193 -0
- package/src/scripts/code_exec_simple.ts +129 -0
- package/src/scripts/content.ts +120 -0
- package/src/scripts/empty_input.ts +137 -0
- package/src/scripts/image.ts +178 -0
- package/src/scripts/memory.ts +97 -0
- package/src/scripts/search.ts +150 -0
- package/src/scripts/simple.ts +225 -0
- package/src/scripts/stream.ts +122 -0
- package/src/scripts/thinking.ts +150 -0
- package/src/scripts/tools.ts +155 -0
- package/src/specs/anthropic.simple.test.ts +317 -0
- package/src/specs/azure.simple.test.ts +316 -0
- package/src/specs/openai.simple.test.ts +316 -0
- package/src/specs/prune.test.ts +763 -0
- package/src/specs/reasoning.test.ts +165 -0
- package/src/specs/spec.utils.ts +3 -0
- package/src/specs/thinking-prune.test.ts +703 -0
- package/src/specs/token-distribution-edge-case.test.ts +316 -0
- package/src/specs/tool-error.test.ts +193 -0
- package/src/splitStream.test.ts +691 -0
- package/src/splitStream.ts +234 -0
- package/src/stream.test.ts +94 -0
- package/src/stream.ts +651 -0
- package/src/tools/CodeExecutor.ts +220 -0
- package/src/tools/ToolNode.ts +170 -0
- package/src/tools/example.ts +129 -0
- package/src/tools/handlers.ts +336 -0
- package/src/tools/search/anthropic.ts +51 -0
- package/src/tools/search/content.test.ts +173 -0
- package/src/tools/search/content.ts +147 -0
- package/src/tools/search/firecrawl.ts +210 -0
- package/src/tools/search/format.ts +250 -0
- package/src/tools/search/highlights.ts +320 -0
- package/src/tools/search/index.ts +2 -0
- package/src/tools/search/jina-reranker.test.ts +126 -0
- package/src/tools/search/output.md +2775 -0
- package/src/tools/search/rerankers.ts +242 -0
- package/src/tools/search/schema.ts +63 -0
- package/src/tools/search/search.ts +759 -0
- package/src/tools/search/serper-scraper.ts +155 -0
- package/src/tools/search/test.html +884 -0
- package/src/tools/search/test.md +643 -0
- package/src/tools/search/test.ts +159 -0
- package/src/tools/search/tool.ts +471 -0
- package/src/tools/search/types.ts +687 -0
- package/src/tools/search/utils.ts +79 -0
- package/src/types/graph.ts +185 -0
- package/src/types/index.ts +6 -0
- package/src/types/llm.ts +140 -0
- package/src/types/run.ts +89 -0
- package/src/types/stream.ts +400 -0
- package/src/types/tools.ts +80 -0
- package/src/utils/graph.ts +11 -0
- package/src/utils/index.ts +5 -0
- package/src/utils/llm.ts +27 -0
- package/src/utils/llmConfig.ts +183 -0
- package/src/utils/logging.ts +48 -0
- package/src/utils/misc.ts +57 -0
- package/src/utils/run.ts +101 -0
- package/src/utils/title.ts +165 -0
- package/src/utils/tokens.ts +70 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
|
|
2
|
+
// import { nanoid } from 'nanoid';
|
|
3
|
+
import type OpenAITypes from 'openai';
|
|
4
|
+
import type * as t from '@/types';
|
|
5
|
+
// import { SplitStreamHandler } from '@/splitStream';
|
|
6
|
+
// import { GraphEvents } from '@/common';
|
|
7
|
+
import { sleep } from '@/utils';
|
|
8
|
+
|
|
9
|
+
const choiceProps: OpenAITypes.Chat.Completions.ChatCompletionChunk.Choice = { finish_reason: null, index: 0, delta: {} };
|
|
10
|
+
const reasoningSplitRegex = /(?<=\s+)|(?=\s+)/;
|
|
11
|
+
const contentSplitRegex = /(?<=<\/?think>)|(?=<\/?think>)|(?<=\s+)|(?=\s+)/;
|
|
12
|
+
export const createMockStream = (options: {
|
|
13
|
+
text?: string;
|
|
14
|
+
reasoningText?: string;
|
|
15
|
+
streamRate?: number;
|
|
16
|
+
reasoningKey?: 'reasoning' | 'reasoning_content';
|
|
17
|
+
} = {}) => {
|
|
18
|
+
const {
|
|
19
|
+
text,
|
|
20
|
+
reasoningText,
|
|
21
|
+
streamRate = 25,
|
|
22
|
+
reasoningKey = 'reasoning_content'
|
|
23
|
+
} = options;
|
|
24
|
+
|
|
25
|
+
return async function* mockOpenAIStream(): AsyncGenerator<t.CustomChunk> {
|
|
26
|
+
const content = text ?? `Here's a sample message that includes code:
|
|
27
|
+
\`\`\`python
|
|
28
|
+
def hello_world():
|
|
29
|
+
print("Hello, World!")
|
|
30
|
+
# This is a long code block
|
|
31
|
+
# That shouldn't be split
|
|
32
|
+
return True
|
|
33
|
+
\`\`\`
|
|
34
|
+
Now we're back to regular text. This is a very long sentence that should probably be split at some point because it exceeds our threshold and contains multiple natural breaking points. Let's see how it handles this case properly.
|
|
35
|
+
|
|
36
|
+
Here's another code block:
|
|
37
|
+
\`\`\`javascript
|
|
38
|
+
console.log("Another test");
|
|
39
|
+
// More code here
|
|
40
|
+
\`\`\`
|
|
41
|
+
And finally some more regular text to test our splitting logic.`;
|
|
42
|
+
|
|
43
|
+
if (reasoningText != null && reasoningText) {
|
|
44
|
+
// Split reasoning text into "token-like" chunks
|
|
45
|
+
const reasoningTokens = reasoningText.split(reasoningSplitRegex);
|
|
46
|
+
for (const token of reasoningTokens) {
|
|
47
|
+
yield {
|
|
48
|
+
choices: [{
|
|
49
|
+
...choiceProps,
|
|
50
|
+
delta: {
|
|
51
|
+
[reasoningKey]: token,
|
|
52
|
+
},
|
|
53
|
+
}]
|
|
54
|
+
};
|
|
55
|
+
await sleep(streamRate);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Split main content into "token-like" chunks
|
|
60
|
+
const tokens = content.split(contentSplitRegex);
|
|
61
|
+
for (const token of tokens) {
|
|
62
|
+
yield {
|
|
63
|
+
choices: [{
|
|
64
|
+
...choiceProps,
|
|
65
|
+
delta: {
|
|
66
|
+
content: token
|
|
67
|
+
}
|
|
68
|
+
}]
|
|
69
|
+
};
|
|
70
|
+
await sleep(streamRate);
|
|
71
|
+
}
|
|
72
|
+
};
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
(async function testStream(): Promise<void> {
|
|
77
|
+
const runId = nanoid();
|
|
78
|
+
|
|
79
|
+
const streamHandler = new SplitStreamHandler({
|
|
80
|
+
runId,
|
|
81
|
+
handlers: {
|
|
82
|
+
[GraphEvents.ON_RUN_STEP]: (data): void => {
|
|
83
|
+
console.dir(data, { depth: null });
|
|
84
|
+
},
|
|
85
|
+
[GraphEvents.ON_MESSAGE_DELTA]: (): void => {
|
|
86
|
+
// console.dir(data, { depth: null });
|
|
87
|
+
},
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
const stream = createMockStream({
|
|
91
|
+
reasoningText: 'This is a test reasoning text.',
|
|
92
|
+
streamRate: 5,
|
|
93
|
+
})();
|
|
94
|
+
|
|
95
|
+
for await (const chunk of stream) {
|
|
96
|
+
streamHandler.handle(chunk);
|
|
97
|
+
}
|
|
98
|
+
})();
|
|
99
|
+
*/
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
// src/prompts/collab.ts
|
|
2
|
+
export const supervisorPrompt = `You are a supervisor tasked with managing a conversation between the
|
|
3
|
+
following workers: {members}. Given the following user request,
|
|
4
|
+
respond with the worker to act next. Each worker will perform a
|
|
5
|
+
task and respond with their results and status. Multiple workers can work at once, and they can use multiple tools at once. Each worker can run their tools multiple times per task. When finished,
|
|
6
|
+
respond with FINISH.`;
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
export const taskManagerPrompt = `You are a Task Manager responsible for efficiently coordinating a team of specialized workers: {members}. Your PRIMARY and SOLE OBJECTIVE is to fulfill the user's specific request as quickly and effectively as possible.
|
|
2
|
+
|
|
3
|
+
CRITICAL GUIDELINES:
|
|
4
|
+
1. The user's request is your CHIEF CONCERN. Every action must directly contribute to fulfilling this request.
|
|
5
|
+
2. Aim to complete the entire task in NO MORE THAN 2-3 TURNS, unless explicitly instructed otherwise.
|
|
6
|
+
3. Eliminate all superfluous activity. Each task must be essential to achieving the user's goal.
|
|
7
|
+
4. Assign no more than 5 tasks per turn, and only if absolutely necessary.
|
|
8
|
+
5. Be concise and direct in your task assignments.
|
|
9
|
+
6. End the process IMMEDIATELY once the user's request is fulfilled by setting 'end' to true and assigning no new tasks.
|
|
10
|
+
|
|
11
|
+
Your responsibilities:
|
|
12
|
+
1. Analyze the user's request and break it down into the minimum necessary subtasks.
|
|
13
|
+
2. Assign these essential tasks to the most appropriate team members based on their skills and tools.
|
|
14
|
+
3. Prioritize tasks to ensure the most efficient path to completion.
|
|
15
|
+
4. Continuously evaluate if the user's request has been fully addressed.
|
|
16
|
+
5. End the process IMMEDIATELY once the user's request is fulfilled.
|
|
17
|
+
|
|
18
|
+
Task Assignment Guidelines:
|
|
19
|
+
- Assign only the most crucial tasks required to meet the user's needs.
|
|
20
|
+
- Multiple tasks can be assigned to the same team member if it improves efficiency.
|
|
21
|
+
- Always specify the tool to use if applicable.
|
|
22
|
+
- Consider task dependencies to minimize the number of turns.
|
|
23
|
+
|
|
24
|
+
After each round:
|
|
25
|
+
- Critically assess if the user's request has been fully addressed.
|
|
26
|
+
- If more work is genuinely needed, assign only the most essential remaining tasks.
|
|
27
|
+
- If the user's request has been fulfilled or can be fulfilled with the results at hand, set 'end' to true and assign no new tasks.
|
|
28
|
+
|
|
29
|
+
REMEMBER: Your success is measured by how quickly and effectively you fulfill the user's request, not by the number of tasks assigned or turns taken. Excessive deliberation or unnecessary tasks are counterproductive. Focus solely on the user's needs and conclude the process as soon as those needs are met.`;
|
|
30
|
+
|
|
31
|
+
export const assignTasksFunctionDescription = 'Assign the minimum necessary tasks to team members to fulfill the user\'s request as quickly as possible. Assign up to 5 tasks maximum per turn, only if absolutely necessary. Each task must specify the team member, a concise description, and the tool to use if applicable.';
|
|
32
|
+
|
|
33
|
+
export const assignTasksFunctionParameters = {
|
|
34
|
+
type: 'object',
|
|
35
|
+
properties: {
|
|
36
|
+
tasks: {
|
|
37
|
+
type: 'array',
|
|
38
|
+
items: {
|
|
39
|
+
type: 'object',
|
|
40
|
+
properties: {
|
|
41
|
+
member: { type: 'string', description: 'Name of the team member assigned to the task' },
|
|
42
|
+
description: { type: 'string', description: 'Concise description of the essential task to be performed' },
|
|
43
|
+
tool: { type: 'string', description: 'Specific tool to be used for the task, if applicable' },
|
|
44
|
+
},
|
|
45
|
+
required: ['member', 'description'],
|
|
46
|
+
},
|
|
47
|
+
description: 'List of essential tasks to be assigned, maximum 5 tasks per turn.',
|
|
48
|
+
},
|
|
49
|
+
},
|
|
50
|
+
required: ['tasks'],
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
export const endProcessFunctionDescription = 'End the process when the user\'s request has been fulfilled.';
|
|
54
|
+
|
|
55
|
+
export const endProcessFunctionParameters = {
|
|
56
|
+
type: 'object',
|
|
57
|
+
properties: {
|
|
58
|
+
reason: { type: 'string', description: 'Brief explanation of why the process is ending' },
|
|
59
|
+
},
|
|
60
|
+
required: ['reason'],
|
|
61
|
+
};
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
// src/graphs/CollabGraph.ts
|
|
2
|
+
import { AIMessageChunk, BaseMessage, HumanMessage } from '@langchain/core/messages';
|
|
3
|
+
import { END, StateGraphArgs, START, StateGraph, MemorySaver } from '@langchain/langgraph';
|
|
4
|
+
import { AgentExecutor, createOpenAIToolsAgent } from 'langchain/agents';
|
|
5
|
+
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
|
|
6
|
+
import type { StructuredTool } from '@langchain/core/tools';
|
|
7
|
+
import { Runnable, RunnableConfig } from '@langchain/core/runnables';
|
|
8
|
+
import { JsonOutputToolsParser } from 'langchain/output_parsers';
|
|
9
|
+
import { Providers } from '@/common';
|
|
10
|
+
import { getChatModelClass } from '@/llm/providers';
|
|
11
|
+
import { Graph } from '../graphs/Graph';
|
|
12
|
+
import type * as t from '@/types';
|
|
13
|
+
import { supervisorPrompt } from '@/prompts/collab';
|
|
14
|
+
|
|
15
|
+
export interface CollabAgentStateChannels {
|
|
16
|
+
messages: BaseMessage[];
|
|
17
|
+
next: string;
|
|
18
|
+
[key: string]: any;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface CollabMember {
|
|
22
|
+
name: string;
|
|
23
|
+
systemPrompt: string;
|
|
24
|
+
tools: any[];
|
|
25
|
+
llmConfig: t.LLMConfig;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
interface SupervisorConfig {
|
|
29
|
+
systemPrompt?: string;
|
|
30
|
+
llmConfig: t.LLMConfig;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export class CollabGraph extends Graph<CollabAgentStateChannels, string> {
|
|
34
|
+
resetValues(): void {
|
|
35
|
+
throw new Error('Method not implemented.');
|
|
36
|
+
}
|
|
37
|
+
getFinalMessage(): AIMessageChunk | undefined {
|
|
38
|
+
throw new Error('Method not implemented.');
|
|
39
|
+
}
|
|
40
|
+
generateStepId(stepKey: string): [string, number] {
|
|
41
|
+
throw new Error('Method not implemented.');
|
|
42
|
+
}
|
|
43
|
+
getKeyList(metadata: Record<string, unknown> | undefined): (string | number | undefined)[] {
|
|
44
|
+
throw new Error('Method not implemented.');
|
|
45
|
+
}
|
|
46
|
+
getStepKey(metadata: Record<string, unknown> | undefined): string {
|
|
47
|
+
throw new Error('Method not implemented.');
|
|
48
|
+
}
|
|
49
|
+
checkKeyList(keyList: (string | number | undefined)[]): boolean {
|
|
50
|
+
throw new Error('Method not implemented.');
|
|
51
|
+
}
|
|
52
|
+
getStepIdByKey(stepKey: string, index?: number): string {
|
|
53
|
+
throw new Error('Method not implemented.');
|
|
54
|
+
}
|
|
55
|
+
getRunStep(stepId: string): t.RunStep | undefined {
|
|
56
|
+
throw new Error('Method not implemented.');
|
|
57
|
+
}
|
|
58
|
+
dispatchRunStep(stepKey: string, stepDetails: t.StepDetails): void {
|
|
59
|
+
throw new Error('Method not implemented.');
|
|
60
|
+
}
|
|
61
|
+
dispatchRunStepDelta(id: string, delta: t.ToolCallDelta): void {
|
|
62
|
+
throw new Error('Method not implemented.');
|
|
63
|
+
}
|
|
64
|
+
dispatchMessageDelta(id: string, delta: t.MessageDelta): void {
|
|
65
|
+
throw new Error('Method not implemented.');
|
|
66
|
+
}
|
|
67
|
+
private graph: t.CompiledWorkflow<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string> | null = null;
|
|
68
|
+
private members: t.Member[];
|
|
69
|
+
private supervisorConfig: SupervisorConfig;
|
|
70
|
+
private supervisorChain: Runnable | null = null;
|
|
71
|
+
|
|
72
|
+
constructor(members: t.Member[], supervisorConfig: SupervisorConfig) {
|
|
73
|
+
super();
|
|
74
|
+
this.members = members;
|
|
75
|
+
this.supervisorConfig = supervisorConfig;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
async initialize(): Promise<void> {
|
|
79
|
+
const memberNames = this.members.map(member => member.name);
|
|
80
|
+
const systemPrompt = this.supervisorConfig.systemPrompt || supervisorPrompt;
|
|
81
|
+
const options = [END, ...memberNames];
|
|
82
|
+
this.supervisorChain = await this.createSupervisorChain(systemPrompt, options);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
createGraphState(): StateGraphArgs<CollabAgentStateChannels>['channels'] {
|
|
86
|
+
return {
|
|
87
|
+
messages: {
|
|
88
|
+
value: (x?: BaseMessage[], y?: BaseMessage[]) => (x ?? []).concat(y ?? []),
|
|
89
|
+
default: () => [],
|
|
90
|
+
},
|
|
91
|
+
next: {
|
|
92
|
+
value: (x?: string, y?: string) => y ?? x ?? END,
|
|
93
|
+
default: () => END,
|
|
94
|
+
},
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
initializeTools(tools: StructuredTool[]): any {
|
|
99
|
+
// This method is not used in the collaborative graph
|
|
100
|
+
return null;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
initializeModel(provider: Providers, clientOptions: Record<string, unknown>, tools: any[]) {
|
|
104
|
+
const LLMClass = getChatModelClass(provider);
|
|
105
|
+
if (!LLMClass) {
|
|
106
|
+
throw new Error(`Unsupported LLM provider: ${provider}`);
|
|
107
|
+
}
|
|
108
|
+
return new LLMClass(clientOptions);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
createCallModel(boundModel: any) {
|
|
112
|
+
// This method is not directly used in the collaborative graph
|
|
113
|
+
return async (state: CollabAgentStateChannels, config?: RunnableConfig) => {
|
|
114
|
+
return { messages: [] };
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
private async createAgent(
|
|
119
|
+
llmConfig: t.LLMConfig,
|
|
120
|
+
tools: any[],
|
|
121
|
+
systemPrompt: string
|
|
122
|
+
): Promise<AgentExecutor> {
|
|
123
|
+
const { provider, ...clientOptions } = llmConfig;
|
|
124
|
+
const LLMClass = getChatModelClass(provider);
|
|
125
|
+
if (!LLMClass) {
|
|
126
|
+
throw new Error(`Unsupported LLM provider: ${provider}`);
|
|
127
|
+
}
|
|
128
|
+
const llm = new LLMClass(clientOptions);
|
|
129
|
+
|
|
130
|
+
const prompt = await ChatPromptTemplate.fromMessages([
|
|
131
|
+
['system', systemPrompt],
|
|
132
|
+
new MessagesPlaceholder('messages'),
|
|
133
|
+
new MessagesPlaceholder('agent_scratchpad'),
|
|
134
|
+
]);
|
|
135
|
+
const agent = await createOpenAIToolsAgent({ llm, tools, prompt });
|
|
136
|
+
return new AgentExecutor({ agent, tools });
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
createWorkflow(
|
|
140
|
+
graphState: StateGraphArgs<CollabAgentStateChannels>['channels'],
|
|
141
|
+
callModel?: any,
|
|
142
|
+
toolNode?: any
|
|
143
|
+
): t.CompiledWorkflow<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string> {
|
|
144
|
+
if (!this.supervisorChain) {
|
|
145
|
+
throw new Error('CollabGraph not initialized. Call initialize() first.');
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const workflow = new StateGraph<CollabAgentStateChannels, Partial<CollabAgentStateChannels>, string>({
|
|
149
|
+
channels: graphState,
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
// Dynamically create agents and add nodes for each member
|
|
153
|
+
for (const member of this.members) {
|
|
154
|
+
const node = async (
|
|
155
|
+
state: CollabAgentStateChannels,
|
|
156
|
+
config?: RunnableConfig,
|
|
157
|
+
) => {
|
|
158
|
+
const agent = await this.createAgent(member.llmConfig, member.tools, member.systemPrompt);
|
|
159
|
+
const agentPromise = agent.invoke(state, config);
|
|
160
|
+
|
|
161
|
+
// Store the promise in the state
|
|
162
|
+
await this.graph?.updateState(config ?? {}, {
|
|
163
|
+
[`${member.name}Promise`]: agentPromise,
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
const result = await agentPromise;
|
|
167
|
+
return {
|
|
168
|
+
messages: [
|
|
169
|
+
new HumanMessage({ content: result.output, name: member.name }),
|
|
170
|
+
],
|
|
171
|
+
};
|
|
172
|
+
};
|
|
173
|
+
workflow.addNode(member.name, node);
|
|
174
|
+
workflow.addEdge(member.name, 'supervisor');
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
const supervisorNode = async (
|
|
178
|
+
state: CollabAgentStateChannels,
|
|
179
|
+
config?: RunnableConfig,
|
|
180
|
+
) => {
|
|
181
|
+
// Get the current state
|
|
182
|
+
const currentState = await this.graph?.getState(config ?? {});
|
|
183
|
+
|
|
184
|
+
// Wait for all member promises to resolve
|
|
185
|
+
const memberPromises = this.members.map(member => currentState?.[`${member.name}Promise` as keyof typeof currentState]);
|
|
186
|
+
await Promise.all(memberPromises);
|
|
187
|
+
|
|
188
|
+
// Clear the promises for the next iteration
|
|
189
|
+
for (const member of this.members) {
|
|
190
|
+
await this.graph?.updateState(config ?? {}, {
|
|
191
|
+
[`${member.name}Promise`]: undefined,
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const result = await this.supervisorChain?.invoke(state, config);
|
|
196
|
+
return result;
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
workflow.addNode('supervisor', supervisorNode);
|
|
200
|
+
|
|
201
|
+
workflow.addConditionalEdges(
|
|
202
|
+
'supervisor',
|
|
203
|
+
(x: CollabAgentStateChannels) => x.next,
|
|
204
|
+
);
|
|
205
|
+
|
|
206
|
+
workflow.addEdge(START, 'supervisor');
|
|
207
|
+
|
|
208
|
+
const memory = new MemorySaver();
|
|
209
|
+
this.graph = workflow.compile({ checkpointer: memory });
|
|
210
|
+
return this.graph;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
private async createSupervisorChain(systemPrompt: string, options: string[]): Promise<Runnable> {
|
|
214
|
+
const functionDef = {
|
|
215
|
+
name: 'route',
|
|
216
|
+
description: 'Select the next role.',
|
|
217
|
+
parameters: {
|
|
218
|
+
title: 'routeSchema',
|
|
219
|
+
type: 'object',
|
|
220
|
+
properties: {
|
|
221
|
+
next: {
|
|
222
|
+
title: 'Next',
|
|
223
|
+
anyOf: [
|
|
224
|
+
{ enum: options },
|
|
225
|
+
],
|
|
226
|
+
},
|
|
227
|
+
},
|
|
228
|
+
required: ['next'],
|
|
229
|
+
},
|
|
230
|
+
};
|
|
231
|
+
|
|
232
|
+
const toolDef = {
|
|
233
|
+
type: 'function',
|
|
234
|
+
function: functionDef,
|
|
235
|
+
} as const;
|
|
236
|
+
|
|
237
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
238
|
+
['system', systemPrompt],
|
|
239
|
+
new MessagesPlaceholder('messages'),
|
|
240
|
+
[
|
|
241
|
+
'system',
|
|
242
|
+
'Given the conversation above, who should act next?' +
|
|
243
|
+
' Or should we FINISH? Select one of: {options}',
|
|
244
|
+
],
|
|
245
|
+
]);
|
|
246
|
+
|
|
247
|
+
const formattedPrompt = await prompt.partial({
|
|
248
|
+
options: options.join(', '),
|
|
249
|
+
members: this.members.map(m => m.name).join(', '),
|
|
250
|
+
});
|
|
251
|
+
|
|
252
|
+
const { provider, ...clientOptions } = this.supervisorConfig.llmConfig;
|
|
253
|
+
const LLMClass = getChatModelClass(provider);
|
|
254
|
+
if (!LLMClass) {
|
|
255
|
+
throw new Error(`Unsupported LLM provider for supervisor: ${provider}`);
|
|
256
|
+
}
|
|
257
|
+
const llm = new LLMClass(clientOptions);
|
|
258
|
+
|
|
259
|
+
return formattedPrompt
|
|
260
|
+
.pipe(llm.bindTools(
|
|
261
|
+
[toolDef],
|
|
262
|
+
{
|
|
263
|
+
tool_choice: { type: 'function', function: { name: 'route' } } as any,
|
|
264
|
+
},
|
|
265
|
+
))
|
|
266
|
+
.pipe(new JsonOutputToolsParser())
|
|
267
|
+
.pipe((x: any) => (x[0].args));
|
|
268
|
+
}
|
|
269
|
+
}
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
// src/graphs/TaskManager.ts
|
|
2
|
+
import { JsonOutputToolsParser } from 'langchain/output_parsers';
|
|
3
|
+
import { AgentExecutor, createOpenAIToolsAgent } from 'langchain/agents';
|
|
4
|
+
import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages';
|
|
5
|
+
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
|
|
6
|
+
import { END, START, StateGraph, MemorySaver } from '@langchain/langgraph';
|
|
7
|
+
import type { Runnable, RunnableConfig } from '@langchain/core/runnables';
|
|
8
|
+
import type { ToolNode } from '@langchain/langgraph/prebuilt';
|
|
9
|
+
import type { StructuredTool } from '@langchain/core/tools';
|
|
10
|
+
import type { StateGraphArgs } from '@langchain/langgraph';
|
|
11
|
+
import type * as t from '@/types';
|
|
12
|
+
import {
|
|
13
|
+
taskManagerPrompt,
|
|
14
|
+
endProcessFunctionParameters,
|
|
15
|
+
endProcessFunctionDescription,
|
|
16
|
+
assignTasksFunctionParameters,
|
|
17
|
+
assignTasksFunctionDescription,
|
|
18
|
+
} from '@/prompts/taskmanager';
|
|
19
|
+
import { getChatModelClass } from '@/llm/providers';
|
|
20
|
+
import { Providers } from '@/common';
|
|
21
|
+
import { Graph } from '../graphs/Graph';
|
|
22
|
+
|
|
23
|
+
export interface TaskManagerStateChannels {
|
|
24
|
+
messages: BaseMessage[];
|
|
25
|
+
tasks: Task[];
|
|
26
|
+
completedTasks: string[];
|
|
27
|
+
next: string;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export interface Task {
|
|
31
|
+
member: string;
|
|
32
|
+
description: string;
|
|
33
|
+
tool?: string;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export interface TaskMember {
|
|
37
|
+
name: string;
|
|
38
|
+
systemPrompt: string;
|
|
39
|
+
tools: StructuredTool[];
|
|
40
|
+
llmConfig: t.LLMConfig;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
interface SupervisorConfig {
|
|
44
|
+
systemPrompt?: string;
|
|
45
|
+
llmConfig: t.LLMConfig;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
export class TaskManager extends Graph<TaskManagerStateChannels, string> {
|
|
49
|
+
initializeTools(tools: StructuredTool[]): ToolNode<TaskManagerStateChannels> {
|
|
50
|
+
throw new Error('Method not implemented.');
|
|
51
|
+
}
|
|
52
|
+
initializeModel(provider: Providers, clientOptions: Record<string, unknown>, tools: StructuredTool[]) {
|
|
53
|
+
throw new Error('Method not implemented.');
|
|
54
|
+
}
|
|
55
|
+
createCallModel(boundModel: any): (state: TaskManagerStateChannels, config?: RunnableConfig) => Promise<Partial<TaskManagerStateChannels>> {
|
|
56
|
+
throw new Error('Method not implemented.');
|
|
57
|
+
}
|
|
58
|
+
private graph: t.CompiledWorkflow<TaskManagerStateChannels, Partial<TaskManagerStateChannels>, string> | null = null;
|
|
59
|
+
private members: TaskMember[];
|
|
60
|
+
private supervisorConfig: SupervisorConfig;
|
|
61
|
+
private supervisorChain: Runnable | null = null;
|
|
62
|
+
|
|
63
|
+
constructor(members: TaskMember[], supervisorConfig: SupervisorConfig) {
|
|
64
|
+
super();
|
|
65
|
+
this.members = members;
|
|
66
|
+
this.supervisorConfig = supervisorConfig;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async initialize(): Promise<void> {
|
|
70
|
+
const memberNames = this.members.map(member => member.name);
|
|
71
|
+
const systemPrompt = this.supervisorConfig.systemPrompt || taskManagerPrompt;
|
|
72
|
+
this.supervisorChain = await this.createSupervisorChain(systemPrompt, memberNames);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
createGraphState(): StateGraphArgs<TaskManagerStateChannels>['channels'] {
|
|
76
|
+
return {
|
|
77
|
+
messages: {
|
|
78
|
+
value: (x?: BaseMessage[], y?: BaseMessage[]) => (x ?? []).concat(y ?? []),
|
|
79
|
+
default: () => [],
|
|
80
|
+
},
|
|
81
|
+
tasks: {
|
|
82
|
+
value: (x?: Task[], y?: Task[]) => y ?? x ?? [],
|
|
83
|
+
default: () => [],
|
|
84
|
+
},
|
|
85
|
+
completedTasks: {
|
|
86
|
+
value: (x?: string[], y?: string[]) => [...new Set([...(x ?? []), ...(y ?? [])])],
|
|
87
|
+
default: () => [],
|
|
88
|
+
},
|
|
89
|
+
next: {
|
|
90
|
+
value: (x?: string, y?: string) => y ?? x ?? END,
|
|
91
|
+
default: () => END,
|
|
92
|
+
},
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
private async createAgent(
|
|
97
|
+
llmConfig: t.LLMConfig,
|
|
98
|
+
tools: StructuredTool[],
|
|
99
|
+
systemPrompt: string
|
|
100
|
+
): Promise<AgentExecutor> {
|
|
101
|
+
const { provider, ...clientOptions } = llmConfig;
|
|
102
|
+
const LLMClass = getChatModelClass(provider);
|
|
103
|
+
if (!LLMClass) {
|
|
104
|
+
throw new Error(`Unsupported LLM provider: ${provider}`);
|
|
105
|
+
}
|
|
106
|
+
const llm = new LLMClass(clientOptions);
|
|
107
|
+
|
|
108
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
109
|
+
['system', systemPrompt],
|
|
110
|
+
new MessagesPlaceholder('chat_history'),
|
|
111
|
+
['human', '{input}'],
|
|
112
|
+
new MessagesPlaceholder('agent_scratchpad'),
|
|
113
|
+
]);
|
|
114
|
+
const agent = await createOpenAIToolsAgent({ llm, tools, prompt });
|
|
115
|
+
return new AgentExecutor({ agent, tools });
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
createWorkflow(
|
|
119
|
+
graphState: StateGraphArgs<TaskManagerStateChannels>['channels'],
|
|
120
|
+
callModel?: any,
|
|
121
|
+
): t.CompiledWorkflow<TaskManagerStateChannels, Partial<TaskManagerStateChannels>, string> {
|
|
122
|
+
if (!this.supervisorChain) {
|
|
123
|
+
throw new Error('TaskManager not initialized. Call initialize() first.');
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const workflow = new StateGraph<TaskManagerStateChannels, Partial<TaskManagerStateChannels>, string>({
|
|
127
|
+
channels: graphState,
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
const supervisorNode = async (
|
|
131
|
+
state: TaskManagerStateChannels,
|
|
132
|
+
config?: RunnableConfig,
|
|
133
|
+
) => {
|
|
134
|
+
const result = await this.supervisorChain?.invoke(state, config) as { tasks?: Task[], reason?: string };
|
|
135
|
+
console.log('Supervisor Node Output:', result);
|
|
136
|
+
|
|
137
|
+
if (result && result.reason) {
|
|
138
|
+
console.log('Process ending. Reason:', result.reason);
|
|
139
|
+
return { next: END };
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
const newTasks = (result.tasks || []).filter(task =>
|
|
143
|
+
!state.completedTasks.includes(`${task.member}:${task.description}`)
|
|
144
|
+
);
|
|
145
|
+
|
|
146
|
+
return {
|
|
147
|
+
tasks: newTasks,
|
|
148
|
+
next: newTasks.length > 0 ? 'execute_tasks' : END,
|
|
149
|
+
};
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
const executeTasksNode = async (
|
|
153
|
+
state: TaskManagerStateChannels,
|
|
154
|
+
config?: RunnableConfig,
|
|
155
|
+
) => {
|
|
156
|
+
const results: BaseMessage[] = [];
|
|
157
|
+
const completedTasks: string[] = [];
|
|
158
|
+
|
|
159
|
+
for (const task of state.tasks) {
|
|
160
|
+
const member = this.members.find(m => m.name === task.member);
|
|
161
|
+
if (!member) {
|
|
162
|
+
throw new Error(`TaskMember ${task.member} not found`);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
const agent = await this.createAgent(member.llmConfig, member.tools, member.systemPrompt);
|
|
166
|
+
const taskMessage = new HumanMessage(`Task: ${task.description}${task.tool ? ` Use the ${task.tool} tool.` : ''}`);
|
|
167
|
+
const result = await agent.invoke({
|
|
168
|
+
input: taskMessage.content,
|
|
169
|
+
chat_history: state.messages,
|
|
170
|
+
}, config);
|
|
171
|
+
|
|
172
|
+
results.push(new AIMessage({ content: result.output, name: task.member }));
|
|
173
|
+
completedTasks.push(`${task.member}:${task.description}`);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
messages: state.messages.concat(results),
|
|
178
|
+
completedTasks: state.completedTasks.concat(completedTasks),
|
|
179
|
+
tasks: [],
|
|
180
|
+
next: 'supervisor',
|
|
181
|
+
};
|
|
182
|
+
};
|
|
183
|
+
|
|
184
|
+
workflow.addNode('supervisor', supervisorNode);
|
|
185
|
+
workflow.addNode('execute_tasks', executeTasksNode);
|
|
186
|
+
|
|
187
|
+
workflow.addEdge(START, 'supervisor');
|
|
188
|
+
workflow.addConditionalEdges(
|
|
189
|
+
'supervisor',
|
|
190
|
+
(x: TaskManagerStateChannels) => x.next,
|
|
191
|
+
);
|
|
192
|
+
workflow.addEdge('execute_tasks', 'supervisor');
|
|
193
|
+
|
|
194
|
+
const memory = new MemorySaver();
|
|
195
|
+
this.graph = workflow.compile({ checkpointer: memory });
|
|
196
|
+
return this.graph;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
private async createSupervisorChain(systemPrompt: string, memberNames: string[]): Promise<Runnable> {
|
|
200
|
+
const assignTasksDef = {
|
|
201
|
+
name: 'assign_tasks',
|
|
202
|
+
description: assignTasksFunctionDescription,
|
|
203
|
+
parameters: assignTasksFunctionParameters,
|
|
204
|
+
};
|
|
205
|
+
|
|
206
|
+
const endProcessDef = {
|
|
207
|
+
name: 'end_process',
|
|
208
|
+
description: endProcessFunctionDescription,
|
|
209
|
+
parameters: endProcessFunctionParameters,
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
const toolDefs = [
|
|
213
|
+
{ type: 'function', function: assignTasksDef },
|
|
214
|
+
{ type: 'function', function: endProcessDef },
|
|
215
|
+
];
|
|
216
|
+
|
|
217
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
218
|
+
['system', systemPrompt],
|
|
219
|
+
new MessagesPlaceholder('messages'),
|
|
220
|
+
new MessagesPlaceholder('completedTasks'),
|
|
221
|
+
[
|
|
222
|
+
'human',
|
|
223
|
+
'Based on the conversation above and the completed tasks, either assign new tasks using the \'assign_tasks\' function or end the process using the \'end_process\' function if the user\'s request is fulfilled. Assign only the most essential tasks to minimize the number of turns. Do not repeat tasks that have already been completed.',
|
|
224
|
+
],
|
|
225
|
+
]);
|
|
226
|
+
|
|
227
|
+
const formattedPrompt = await prompt.partial({
|
|
228
|
+
members: memberNames.join(', '),
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
const { provider, ...clientOptions } = this.supervisorConfig.llmConfig;
|
|
232
|
+
const LLMClass = getChatModelClass(provider);
|
|
233
|
+
if (!LLMClass) {
|
|
234
|
+
throw new Error(`Unsupported LLM provider for supervisor: ${provider}`);
|
|
235
|
+
}
|
|
236
|
+
const llm = new LLMClass(clientOptions);
|
|
237
|
+
|
|
238
|
+
return formattedPrompt
|
|
239
|
+
.pipe(llm.bindTools(toolDefs))
|
|
240
|
+
.pipe(new JsonOutputToolsParser())
|
|
241
|
+
.pipe((x: any) => x[0].args);
|
|
242
|
+
}
|
|
243
|
+
}
|