@jterrazz/intelligence 1.1.1 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -35
- package/dist/adapters/agents/chat-agent.adapter.js +9 -6
- package/dist/adapters/agents/chat-agent.adapter.js.map +1 -1
- package/dist/adapters/models/openrouter-model.adapter.js +6 -0
- package/dist/adapters/models/openrouter-model.adapter.js.map +1 -1
- package/dist/adapters/prompts/__tests__/presets.test.js +4 -4
- package/dist/adapters/prompts/__tests__/presets.test.js.map +1 -1
- package/dist/adapters/prompts/library/categories/response.d.ts +1 -1
- package/dist/adapters/prompts/library/categories/response.js +1 -1
- package/dist/adapters/prompts/library/categories/response.js.map +1 -1
- package/dist/adapters/prompts/library/index.d.ts +3 -3
- package/dist/adapters/prompts/library/index.js +4 -4
- package/dist/adapters/prompts/library/index.js.map +1 -1
- package/dist/adapters/prompts/library/presets.js +4 -4
- package/dist/adapters/prompts/library/presets.js.map +1 -1
- package/dist/index.cjs +23 -14
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -31,13 +31,13 @@ Get your first agent running in under a minute. This example uses a preset to cr
|
|
|
31
31
|
```typescript
|
|
32
32
|
import {
|
|
33
33
|
ChatAgentAdapter,
|
|
34
|
-
|
|
34
|
+
OpenRouterAdapter,
|
|
35
35
|
SystemPromptAdapter,
|
|
36
|
-
|
|
36
|
+
PROMPT_LIBRARY,
|
|
37
37
|
} from '@jterrazz/intelligence';
|
|
38
38
|
|
|
39
39
|
// 1. Set up the model provider
|
|
40
|
-
const model = new
|
|
40
|
+
const model = new OpenRouterAdapter({
|
|
41
41
|
apiKey: process.env.OPENROUTER_API_KEY!, // Make sure to set this environment variable
|
|
42
42
|
modelName: 'anthropic/claude-3.5-sonnet',
|
|
43
43
|
});
|
|
@@ -45,7 +45,7 @@ const model = new OpenRouterModelAdapter({
|
|
|
45
45
|
// 2. Create an agent using a preset prompt
|
|
46
46
|
const agent = new ChatAgentAdapter('discord-bot', {
|
|
47
47
|
model,
|
|
48
|
-
systemPrompt: new SystemPromptAdapter(
|
|
48
|
+
systemPrompt: new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.COMMUNITY_ANIMATOR),
|
|
49
49
|
});
|
|
50
50
|
|
|
51
51
|
// 3. Run the agent
|
|
@@ -63,10 +63,14 @@ console.log(response);
|
|
|
63
63
|
|
|
64
64
|
Instead of writing monolithic prompts, the library provides a collection of composable string constants. Mix and match them to build a precise, fine-grained system prompt that defines your agent's behavior.
|
|
65
65
|
|
|
66
|
-
- **`
|
|
67
|
-
- **`
|
|
68
|
-
- **`
|
|
69
|
-
- **`
|
|
66
|
+
- **`FOUNDATIONS`**: Core, non-negotiable rules (e.g., `PROMPT_LIBRARY.FOUNDATIONS.ETHICAL_CONDUCT`).
|
|
67
|
+
- **`PERSONAS`**: The agent's identity and purpose (e.g., `PROMPT_LIBRARY.PERSONAS.EXPERT_ADVISOR`).
|
|
68
|
+
- **`DOMAINS`**: The agent's area of expertise (e.g., `PROMPT_LIBRARY.DOMAINS.SOFTWARE_ENGINEERING`).
|
|
69
|
+
- **`TONES`**: The emotional flavor of communication (e.g., `PROMPT_LIBRARY.TONES.PROFESSIONAL`).
|
|
70
|
+
- **`FORMATS`**: The structural format of the output (e.g., `PROMPT_LIBRARY.FORMATS.JSON`).
|
|
71
|
+
- **`LANGUAGES`**: The natural language for the response (e.g., `PROMPT_LIBRARY.LANGUAGES.ENGLISH_NATIVE`).
|
|
72
|
+
- **`VERBOSITY`**: The level of detail in the response (e.g., `PROMPT_LIBRARY.VERBOSITY.DETAILED`).
|
|
73
|
+
- **`RESPONSES`**: The strategic approach to responding (e.g., `PROMPT_LIBRARY.RESPONSES.ALWAYS_ENGAGE`).
|
|
70
74
|
|
|
71
75
|
This approach makes agent behavior more predictable and easier to modify.
|
|
72
76
|
|
|
@@ -100,7 +104,7 @@ The adapter handles errors gracefully and integrates seamlessly with the agent,
|
|
|
100
104
|
The library is built on a hexagonal architecture.
|
|
101
105
|
|
|
102
106
|
- **Ports (`/ports`)**: Define the contracts (interfaces) for core components like `Agent`, `Model`, and `Tool`.
|
|
103
|
-
- **Adapters (`/adapters`)**: Provide concrete implementations. For example, `ChatAgentAdapter` is an adapter that uses LangChain, and `
|
|
107
|
+
- **Adapters (`/adapters`)**: Provide concrete implementations. For example, `ChatAgentAdapter` is an adapter that uses LangChain, and `OpenRouterAdapter` is an adapter for the OpenRouter API.
|
|
104
108
|
|
|
105
109
|
This separation of concerns means you can easily create your own adapters to support different models or services without changing the application's core logic.
|
|
106
110
|
|
|
@@ -121,25 +125,25 @@ This recipe creates an agent that acts as an expert software engineer, providing
|
|
|
121
125
|
```typescript
|
|
122
126
|
import {
|
|
123
127
|
ChatAgentAdapter,
|
|
124
|
-
|
|
128
|
+
OpenRouterAdapter,
|
|
125
129
|
SystemPromptAdapter,
|
|
126
130
|
UserPromptAdapter,
|
|
127
|
-
|
|
131
|
+
PROMPT_LIBRARY,
|
|
128
132
|
} from '@jterrazz/intelligence';
|
|
129
133
|
|
|
130
|
-
const model = new
|
|
134
|
+
const model = new OpenRouterAdapter({
|
|
131
135
|
apiKey: process.env.OPENROUTER_API_KEY!,
|
|
132
136
|
modelName: 'anthropic/claude-3.5-sonnet',
|
|
133
137
|
});
|
|
134
138
|
|
|
135
139
|
// 1. Compose the system prompt from multiple parts (using rest arguments)
|
|
136
140
|
const systemPrompt = new SystemPromptAdapter(
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
141
|
+
PROMPT_LIBRARY.PERSONAS.EXPERT_ADVISOR,
|
|
142
|
+
PROMPT_LIBRARY.DOMAINS.SOFTWARE_ENGINEERING,
|
|
143
|
+
PROMPT_LIBRARY.TONES.PROFESSIONAL,
|
|
144
|
+
PROMPT_LIBRARY.VERBOSITY.DETAILED,
|
|
145
|
+
PROMPT_LIBRARY.FORMATS.MARKDOWN,
|
|
146
|
+
PROMPT_LIBRARY.FOUNDATIONS.FACTUAL_ACCURACY,
|
|
143
147
|
);
|
|
144
148
|
|
|
145
149
|
// 2. Create the user request (using a single array)
|
|
@@ -166,22 +170,22 @@ This example shows how to use the simpler `QueryAgentAdapter` for one-shot respo
|
|
|
166
170
|
```typescript
|
|
167
171
|
import {
|
|
168
172
|
QueryAgentAdapter,
|
|
169
|
-
|
|
173
|
+
OpenRouterAdapter,
|
|
170
174
|
SystemPromptAdapter,
|
|
171
175
|
UserPromptAdapter,
|
|
172
|
-
|
|
176
|
+
PROMPT_LIBRARY,
|
|
173
177
|
} from '@jterrazz/intelligence';
|
|
174
178
|
|
|
175
|
-
const model = new
|
|
179
|
+
const model = new OpenRouterAdapter({
|
|
176
180
|
apiKey: process.env.OPENROUTER_API_KEY!,
|
|
177
181
|
modelName: 'anthropic/claude-3.5-sonnet',
|
|
178
182
|
});
|
|
179
183
|
|
|
180
184
|
// 1. Create a simple system prompt for text processing
|
|
181
185
|
const systemPrompt = new SystemPromptAdapter(
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
186
|
+
PROMPT_LIBRARY.PERSONAS.EXPERT_ADVISOR,
|
|
187
|
+
PROMPT_LIBRARY.TONES.PROFESSIONAL,
|
|
188
|
+
PROMPT_LIBRARY.FORMATS.MARKDOWN,
|
|
185
189
|
'You are a helpful assistant that improves text clarity and grammar.',
|
|
186
190
|
);
|
|
187
191
|
|
|
@@ -208,14 +212,14 @@ This example shows how to use `QueryAgentAdapter` with schema parsing for struct
|
|
|
208
212
|
```typescript
|
|
209
213
|
import {
|
|
210
214
|
QueryAgentAdapter,
|
|
211
|
-
|
|
215
|
+
OpenRouterAdapter,
|
|
212
216
|
SystemPromptAdapter,
|
|
213
217
|
UserPromptAdapter,
|
|
214
|
-
|
|
218
|
+
PROMPT_LIBRARY,
|
|
215
219
|
} from '@jterrazz/intelligence';
|
|
216
220
|
import { z } from 'zod/v4';
|
|
217
221
|
|
|
218
|
-
const model = new
|
|
222
|
+
const model = new OpenRouterAdapter({
|
|
219
223
|
apiKey: process.env.OPENROUTER_API_KEY!,
|
|
220
224
|
modelName: 'anthropic/claude-3.5-sonnet',
|
|
221
225
|
});
|
|
@@ -230,9 +234,9 @@ const extractionSchema = z.object({
|
|
|
230
234
|
|
|
231
235
|
// 2. Create a system prompt for data extraction
|
|
232
236
|
const systemPrompt = new SystemPromptAdapter(
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
237
|
+
PROMPT_LIBRARY.PERSONAS.EXPERT_ADVISOR,
|
|
238
|
+
PROMPT_LIBRARY.TONES.PROFESSIONAL,
|
|
239
|
+
PROMPT_LIBRARY.FORMATS.JSON,
|
|
236
240
|
'You extract contact information from text and return it as JSON.',
|
|
237
241
|
);
|
|
238
242
|
|
|
@@ -263,10 +267,11 @@ This example shows how to give an agent a tool and have it respond to a user que
|
|
|
263
267
|
```typescript
|
|
264
268
|
import {
|
|
265
269
|
ChatAgentAdapter,
|
|
266
|
-
|
|
270
|
+
OpenRouterAdapter,
|
|
267
271
|
SafeToolAdapter,
|
|
268
272
|
SystemPromptAdapter,
|
|
269
|
-
|
|
273
|
+
UserPromptAdapter,
|
|
274
|
+
PROMPT_LIBRARY,
|
|
270
275
|
} from '@jterrazz/intelligence';
|
|
271
276
|
import { z } from 'zod/v4';
|
|
272
277
|
|
|
@@ -288,12 +293,12 @@ const weatherTool = new SafeToolAdapter(
|
|
|
288
293
|
// 2. Create an agent that knows how to use tools
|
|
289
294
|
const agent = new ChatAgentAdapter('weather-bot', {
|
|
290
295
|
model,
|
|
291
|
-
systemPrompt: new SystemPromptAdapter(
|
|
296
|
+
systemPrompt: new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.EMPATHETIC_SUPPORT_AGENT), // A good general-purpose preset
|
|
292
297
|
tools: [weatherTool], // Pass the tool instance directly
|
|
293
298
|
});
|
|
294
299
|
|
|
295
300
|
// 3. Run the agent with a user query that requires the tool
|
|
296
|
-
const response = await agent.run(
|
|
301
|
+
const response = await agent.run(new UserPromptAdapter("What's the weather like in Boston?"));
|
|
297
302
|
|
|
298
303
|
console.log(response);
|
|
299
304
|
// Expected output: "The weather in Boston is 75°F and sunny."
|
|
@@ -314,7 +319,7 @@ console.log(response);
|
|
|
314
319
|
| `SystemPromptAdapter` | A simple adapter to generate a system prompt string from a prompt array. |
|
|
315
320
|
| `UserPromptAdapter` | A simple adapter to generate a user prompt string from a prompt array. |
|
|
316
321
|
| `AIResponseParser` | A utility to parse a model's string output into a typed object using Zod. |
|
|
317
|
-
| `
|
|
322
|
+
| `PROMPT_LIBRARY` | A frozen object containing the entire composable prompt library. |
|
|
318
323
|
|
|
319
324
|
## Contributing
|
|
320
325
|
|
|
@@ -160,7 +160,7 @@ function _ts_generator(thisArg, body) {
|
|
|
160
160
|
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
161
161
|
import { AgentExecutor, createStructuredChatAgent } from 'langchain/agents';
|
|
162
162
|
import { AIResponseParser } from '../utils/ai-response-parser.js';
|
|
163
|
-
var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Any formatting guidelines within the <OBJECTIVE> section
|
|
163
|
+
var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Your entire response MUST be a single JSON markdown code block. Any formatting guidelines within the <OBJECTIVE> section apply ONLY to the content inside the "RESPOND:" part of your final "action_input".\n\nREQUIRED: You have two ways to respond:\n\n1. **Call a tool** to gather information. For this, you MUST output a JSON blob with the tool\'s name and its input.\n *Valid tool names are: {tool_names}*\n ```json\n {{\n "action": "tool_name_to_use",\n "action_input": "the input for the tool, or an empty object {{}} if no input is needed"\n }}\n ```\n\n2. **Provide the Final Answer** once you have enough information. For this, you MUST output a JSON blob with the "Final Answer" action.\n The "action_input" for a "Final Answer" MUST be a string that begins with either "RESPOND: " for a message or "SILENT: " for no message. This prefix is a literal part of the output string and MUST NOT be omitted.\n - To send a message:\n ```json\n {{\n "action": "Final Answer",\n "action_input": "RESPOND: <your response message>"\n }}\n ```\n - To stay silent:\n ```json\n {{\n "action": "Final Answer",\n "action_input": "SILENT: <your reason for staying silent>"\n }}\n ```\n\n YOU MUST ALWAYS INCLUDE "RESPOND:" OR "SILENT:" IN YOUR FINAL ANSWER\'S "action_input". FAILURE TO DO SO WILL CAUSE AN ERROR.\n</OUTPUT_FORMAT>\n\n<EXECUTION_CONTEXT>\nThis is internal data for your reference.\n\n<TOOLS>\n{tools}\n</TOOLS>\n\n<WORKING_MEMORY>\nThis is your internal thought process and previous tool usage.\n{agent_scratchpad}\n</WORKING_MEMORY>\n</EXECUTION_CONTEXT>\n';
|
|
164
164
|
/**
|
|
165
165
|
* An advanced agent that uses tools and a structured prompt to engage in conversational chat.
|
|
166
166
|
* It can decide whether to respond or remain silent and supports schema-validated responses.
|
|
@@ -213,6 +213,12 @@ var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<
|
|
|
213
213
|
throw new Error('Agent returned an invalid result structure.');
|
|
214
214
|
}
|
|
215
215
|
agentResponse = this.parseAgentOutput(result.output);
|
|
216
|
+
if (!agentResponse) {
|
|
217
|
+
return [
|
|
218
|
+
2,
|
|
219
|
+
null
|
|
220
|
+
];
|
|
221
|
+
}
|
|
216
222
|
if (!agentResponse.shouldRespond) {
|
|
217
223
|
;
|
|
218
224
|
(_this_options_logger2 = this.options.logger) === null || _this_options_logger2 === void 0 ? void 0 : _this_options_logger2.info("[".concat(this.name, "] Agent chose to remain silent."), {
|
|
@@ -318,13 +324,10 @@ var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<
|
|
|
318
324
|
shouldRespond: false
|
|
319
325
|
};
|
|
320
326
|
}
|
|
321
|
-
(_this_options_logger = this.options.logger) === null || _this_options_logger === void 0 ? void 0 : _this_options_logger.
|
|
327
|
+
(_this_options_logger = this.options.logger) === null || _this_options_logger === void 0 ? void 0 : _this_options_logger.error("[".concat(this.name, "] Agent output was missing 'RESPOND:' or 'SILENT:' prefix."), {
|
|
322
328
|
rawOutput: output
|
|
323
329
|
});
|
|
324
|
-
return
|
|
325
|
-
message: text,
|
|
326
|
-
shouldRespond: true
|
|
327
|
-
};
|
|
330
|
+
return null;
|
|
328
331
|
}
|
|
329
332
|
},
|
|
330
333
|
{
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../src/adapters/agents/chat-agent.adapter.ts"],"sourcesContent":["import { type LoggerPort } from '@jterrazz/logger';\nimport { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { AgentExecutor, createStructuredChatAgent } from 'langchain/agents';\nimport { type z } from 'zod/v4';\n\nimport { type AgentPort } from '../../ports/agent.port.js';\nimport type { ModelPort } from '../../ports/model.port.js';\nimport type { PromptPort } from '../../ports/prompt.port.js';\nimport type { ToolPort } from '../../ports/tool.port.js';\n\nimport { AIResponseParser } from '../utils/ai-response-parser.js';\n\nimport type { SystemPromptAdapter } from '../prompts/system-prompt.adapter.js';\n\nexport interface ChatAgentOptions<T = unknown> {\n logger?: LoggerPort;\n model: ModelPort;\n schema?: z.ZodSchema<T>;\n systemPrompt: SystemPromptAdapter;\n tools: ToolPort[];\n verbose?: boolean;\n}\n\nconst SYSTEM_PROMPT_TEMPLATE = `\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Any formatting guidelines within the <OBJECTIVE> section (like message templates) apply ONLY to the content that goes inside the \"RESPOND: \" part of your final answer.\n\nYou have two ways to respond:\n\n1. **Call a tool** to gather information. For this, you MUST output a JSON blob with the tool's name and its input.\n *Valid tool names are: {tool_names}*\n \\`\\`\\`json\n {{\n \"action\": \"tool_name_to_use\",\n \"action_input\": \"the input for the tool, or an empty object {{}} if no input is needed\"\n }}\n \\`\\`\\`\n\n2. **Provide the Final Answer** once you have enough information. For this, you MUST output a JSON blob with the \"Final Answer\" action. The input must start with \"RESPOND: \" or \"SILENT: \".\n - To send a message:\n \\`\\`\\`json\n {{\n \"action\": \"Final Answer\",\n \"action_input\": \"RESPOND: <your response message>\"\n }}\n \\`\\`\\`\n - To stay silent:\n \\`\\`\\`json\n {{\n \"action\": \"Final Answer\",\n \"action_input\": \"SILENT: <your reason for staying silent>\"\n }}\n \\`\\`\\`\n</OUTPUT_FORMAT>\n\n<EXECUTION_CONTEXT>\nThis is internal data for your reference.\n\n<TOOLS>\n{tools}\n</TOOLS>\n\n<WORKING_MEMORY>\nThis is your internal thought process and previous tool usage.\n{agent_scratchpad}\n</WORKING_MEMORY>\n</EXECUTION_CONTEXT>\n`;\n\n/**\n * An advanced agent that uses tools and a structured prompt to engage in conversational chat.\n * It can decide whether to respond or remain silent and supports schema-validated responses.\n */\nexport class ChatAgentAdapter<T = unknown> implements AgentPort {\n constructor(\n public readonly name: string,\n private readonly options: ChatAgentOptions<T>,\n ) {}\n\n async run(userPrompt?: PromptPort): Promise<null | string> {\n this.options.logger?.debug(`[${this.name}] Starting chat execution.`);\n\n try {\n const executor = await this.createExecutor();\n const userInput = this.resolveUserInput(userPrompt);\n\n const result = await executor.invoke({ input: userInput });\n\n this.options.logger?.debug(`[${this.name}] Agent execution completed.`, {\n hasOutput: 'output' in result,\n });\n\n if (!result || typeof result.output !== 'string') {\n throw new Error('Agent returned an invalid result structure.');\n }\n\n const agentResponse = this.parseAgentOutput(result.output);\n\n if (!agentResponse.shouldRespond) {\n this.options.logger?.info(`[${this.name}] Agent chose to remain silent.`, {\n reason: agentResponse.reason,\n });\n return null;\n }\n\n const message = agentResponse.message ?? '';\n\n if (this.options.schema) {\n this.validateResponseContent(message, this.options.schema);\n this.options.logger?.info(\n `[${this.name}] Execution finished; response content validated.`,\n );\n } else {\n this.options.logger?.info(`[${this.name}] Execution finished.`);\n }\n\n return message;\n } catch (error) {\n this.options.logger?.error(`[${this.name}] Chat execution failed.`, {\n error: error instanceof Error ? error.message : 'Unknown error',\n });\n return null;\n }\n }\n\n private async createExecutor(): Promise<AgentExecutor> {\n const model = this.options.model.getModel();\n const tools = this.options.tools.map((tool) => tool.getDynamicTool());\n\n const prompt = ChatPromptTemplate.fromMessages([\n [\n 'system',\n SYSTEM_PROMPT_TEMPLATE.replace(\n '{mission_prompt}',\n this.options.systemPrompt.generate(),\n ),\n ],\n ['human', '{input}'],\n ]);\n\n const agent = await createStructuredChatAgent({\n llm: model,\n prompt,\n tools,\n });\n\n return AgentExecutor.fromAgentAndTools({\n agent,\n tools,\n verbose: this.options.verbose,\n });\n }\n\n private parseAgentOutput(output: string): {\n message?: string;\n reason?: string;\n shouldRespond: boolean;\n } {\n const text = output.trim();\n\n const respondMatch = text.match(/^RESPOND:\\s*([\\s\\S]+)$/i);\n if (respondMatch) {\n return { message: respondMatch[1].trim(), shouldRespond: true };\n }\n\n const silentMatch = text.match(/^SILENT:\\s*([\\s\\S]+)$/i);\n if (silentMatch) {\n return { reason: silentMatch[1].trim(), shouldRespond: false };\n }\n\n this.options.logger?.warn(\n `[${this.name}] Agent output was missing 'RESPOND:' or 'SILENT:' prefix. Treating as a direct response.`,\n { rawOutput: output },\n );\n\n return { message: text, shouldRespond: true };\n }\n\n private resolveUserInput(userPrompt?: PromptPort): string {\n if (userPrompt) {\n return userPrompt.generate();\n }\n return 'Proceed with your instructions.';\n }\n\n private validateResponseContent<TResponse>(\n content: string,\n schema: z.ZodSchema<TResponse>,\n ): void {\n try {\n new AIResponseParser(schema).parse(content);\n } catch (error) {\n this.options.logger?.error(\n `[${this.name}] Failed to validate response content against schema.`,\n {\n error: error instanceof Error ? error.message : 'Unknown error',\n rawContent: content,\n },\n );\n throw new Error('Invalid response content from model.');\n }\n }\n}\n"],"names":["ChatPromptTemplate","AgentExecutor","createStructuredChatAgent","AIResponseParser","SYSTEM_PROMPT_TEMPLATE","ChatAgentAdapter","name","options","run","userPrompt","executor","userInput","result","agentResponse","message","error","logger","debug","createExecutor","resolveUserInput","invoke","input","hasOutput","output","Error","parseAgentOutput","shouldRespond","info","reason","schema","validateResponseContent","model","tools","prompt","agent","getModel","map","tool","getDynamicTool","fromMessages","replace","systemPrompt","generate","llm","fromAgentAndTools","verbose","text","trim","respondMatch","match","silentMatch","warn","rawOutput","content","parse","rawContent"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA,SAASA,kBAAkB,QAAQ,0BAA0B;AAC7D,SAASC,aAAa,EAAEC,yBAAyB,QAAQ,mBAAmB;AAQ5E,SAASC,gBAAgB,QAAQ,iCAAiC;AAalE,IAAMC,yBAA0B;AAkDhC;;;CAGC,GACD,OAAO,IAAA,AAAMC,iCAAN;;aAAMA,iBAEL,AAAgBC,IAAY,EAC5B,AAAiBC,OAA4B;gCAHxCF;;;aAEWC,OAAAA;aACCC,UAAAA;;kBAHZF;;YAMHG,KAAAA;mBAAN,SAAMA,IAAIC,UAAuB;;wBAC7B,sBAQI,uBALMC,UACAC,WAEAC,QAUAC,eAGF,uBAMYA,wBAAVC,SAIF,uBAIA,uBAICC,OACL;;;;iCAtCJ,uBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBC,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACX,IAAI,EAAC;;;;;;;;;gCAGpB;;oCAAM,IAAI,CAACY,cAAc;;;gCAApCR,WAAW;gCACXC,YAAY,IAAI,CAACQ,gBAAgB,CAACV;gCAEzB;;oCAAMC,SAASU,MAAM,CAAC;wCAAEC,OAAOV;oCAAU;;;gCAAlDC,SAAS;iCAEf,wBAAA,IAAI,CAACL,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBC,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACX,IAAI,EAAC,iCAA+B;oCACpEgB,WAAW,YAAYV;gCAC3B;gCAEA,IAAI,CAACA,UAAU,OAAOA,OAAOW,MAAM,KAAK,UAAU;oCAC9C,MAAM,IAAIC,MAAM;gCACpB;gCAEMX,gBAAgB,IAAI,CAACY,gBAAgB,CAACb,OAAOW,MAAM;gCAEzD,IAAI,CAACV,cAAca,aAAa,EAAE;;qCAC9B,wBAAA,IAAI,CAACnB,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CAAC,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC,oCAAkC;wCACtEsB,QAAQf,cAAce,MAAM;oCAChC;oCACA;;wCAAO;;gCACX;gCAEMd,UAAUD,CAAAA,yBAAAA,cAAcC,OAAO,cAArBD,oCAAAA,yBAAyB;gCAEzC,IAAI,IAAI,CAACN,OAAO,CAACsB,MAAM,EAAE;;oCACrB,IAAI,CAACC,uBAAuB,CAAChB,SAAS,IAAI,CAACP,OAAO,CAACsB,MAAM;qCACzD,wBAAA,IAAI,CAACtB,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CACrB,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC;gCAEtB,OAAO;;qCACH,wBAAA,IAAI,CAACC,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CAAC,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC;gCAC5C;gCAEA;;oCAAOQ;;;gCACFC;iCACL,wBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBD,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACT,IAAI,EAAC,6BAA2B;oCAChES,OAAOA,AAAK,YAALA,OAAiBS,SAAQT,MAAMD,OAAO,GAAG;gCACpD;gCACA;;oCAAO;;;;;;;;gBAEf;;;;YAEcI,KAAAA;mBAAd,SAAcA;;wBACJa,OACAC,OAEAC,QAWAC;;;;gCAdAH,QAAQ,IAAI,CAACxB,OAAO,CAACwB,KAAK,CAACI,QAAQ;gCACnCH,QAAQ,IAAI,CAACzB,OAAO,CAACyB,KAAK,CAACI,GAAG,CAAC,SAACC;2CAASA,KAAKC,cAAc;;gCAE5DL,SAASjC,mBAAmBuC,YAAY;;wCAEtC;wCACAnC,uBAAuBoC,OAAO,CAC1B,oBACA,IAAI,CAACjC,OAAO,CAACkC,YAAY,CAACC,QAAQ;;;wCAGzC;wCAAS;;;gCAGA;;oCAAMxC,0BAA0B;wCAC1CyC,KAAKZ;wCACLE,QAAAA;wCACAD,OAAAA;oCACJ;;;gCAJME,QAAQ;gCAMd;;oCAAOjC,cAAc2C,iBAAiB,CAAC;wCACnCV,OAAAA;wCACAF,OAAAA;wCACAa,SAAS,IAAI,CAACtC,OAAO,CAACsC,OAAO;oCACjC;;;;gBACJ;;;;YAEQpB,KAAAA;mBAAR,SAAQA,iBAAiBF,MAAc;oBAiBnC;gBAZA,IAAMuB,OAAOvB,OAAOwB,IAAI;gBAExB,IAAMC,eAAeF,KAAKG,KAAK,CAAC;gBAChC,IAAID,cAAc;oBACd,OAAO;wBAAElC,SAASkC,YAAY,CAAC,EAAE,CAACD,IAAI;wBAAIrB,eAAe;oBAAK;gBAClE;gBAEA,IAAMwB,cAAcJ,KAAKG,KAAK,CAAC;gBAC/B,IAAIC,aAAa;oBACb,OAAO;wBAAEtB,QAAQsB,WAAW,CAAC,EAAE,CAACH,IAAI;wBAAIrB,eAAe;oBAAM;gBACjE;iBAEA,uBAAA,IAAI,CAACnB,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBmC,IAAI,CACrB,AAAC,IAAa,OAAV,IAAI,CAAC7C,IAAI,EAAC,8FACd;oBAAE8C,WAAW7B;gBAAO;gBAGxB,OAAO;oBAAET,SAASgC;oBAAMpB,eAAe;gBAAK;YAChD;;;YAEQP,KAAAA;mBAAR,SAAQA,iBAAiBV,UAAuB;gBAC5C,IAAIA,YAAY;oBACZ,OAAOA,WAAWiC,QAAQ;gBAC9B;gBACA,OAAO;YACX;;;YAEQZ,KAAAA;mBAAR,SAAQA,wBACJuB,OAAe,EACfxB,MAA8B;gBAE9B,IAAI;oBACA,IAAI1B,iBAAiB0B,QAAQyB,KAAK,CAACD;gBACvC,EAAE,OAAOtC,OAAO;wBACZ;qBAAA,uBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBD,KAAK,CACtB,AAAC,IAAa,OAAV,IAAI,CAACT,IAAI,EAAC,0DACd;wBACIS,OAAOA,AAAK,YAALA,OAAiBS,SAAQT,MAAMD,OAAO,GAAG;wBAChDyC,YAAYF;oBAChB;oBAEJ,MAAM,IAAI7B,MAAM;gBACpB;YACJ;;;WAhISnB;IAiIZ"}
|
|
1
|
+
{"version":3,"sources":["../../../src/adapters/agents/chat-agent.adapter.ts"],"sourcesContent":["import { type LoggerPort } from '@jterrazz/logger';\nimport { ChatPromptTemplate } from '@langchain/core/prompts';\nimport { AgentExecutor, createStructuredChatAgent } from 'langchain/agents';\nimport { type z } from 'zod/v4';\n\nimport { type AgentPort } from '../../ports/agent.port.js';\nimport type { ModelPort } from '../../ports/model.port.js';\nimport type { PromptPort } from '../../ports/prompt.port.js';\nimport type { ToolPort } from '../../ports/tool.port.js';\n\nimport { AIResponseParser } from '../utils/ai-response-parser.js';\n\nimport type { SystemPromptAdapter } from '../prompts/system-prompt.adapter.js';\n\nexport interface ChatAgentOptions<T = unknown> {\n logger?: LoggerPort;\n model: ModelPort;\n schema?: z.ZodSchema<T>;\n systemPrompt: SystemPromptAdapter;\n tools: ToolPort[];\n verbose?: boolean;\n}\n\nconst SYSTEM_PROMPT_TEMPLATE = `\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Your entire response MUST be a single JSON markdown code block. Any formatting guidelines within the <OBJECTIVE> section apply ONLY to the content inside the \"RESPOND:\" part of your final \"action_input\".\n\nREQUIRED: You have two ways to respond:\n\n1. **Call a tool** to gather information. For this, you MUST output a JSON blob with the tool's name and its input.\n *Valid tool names are: {tool_names}*\n \\`\\`\\`json\n {{\n \"action\": \"tool_name_to_use\",\n \"action_input\": \"the input for the tool, or an empty object {{}} if no input is needed\"\n }}\n \\`\\`\\`\n\n2. **Provide the Final Answer** once you have enough information. For this, you MUST output a JSON blob with the \"Final Answer\" action.\n The \"action_input\" for a \"Final Answer\" MUST be a string that begins with either \"RESPOND: \" for a message or \"SILENT: \" for no message. This prefix is a literal part of the output string and MUST NOT be omitted.\n - To send a message:\n \\`\\`\\`json\n {{\n \"action\": \"Final Answer\",\n \"action_input\": \"RESPOND: <your response message>\"\n }}\n \\`\\`\\`\n - To stay silent:\n \\`\\`\\`json\n {{\n \"action\": \"Final Answer\",\n \"action_input\": \"SILENT: <your reason for staying silent>\"\n }}\n \\`\\`\\`\n\n YOU MUST ALWAYS INCLUDE \"RESPOND:\" OR \"SILENT:\" IN YOUR FINAL ANSWER'S \"action_input\". FAILURE TO DO SO WILL CAUSE AN ERROR.\n</OUTPUT_FORMAT>\n\n<EXECUTION_CONTEXT>\nThis is internal data for your reference.\n\n<TOOLS>\n{tools}\n</TOOLS>\n\n<WORKING_MEMORY>\nThis is your internal thought process and previous tool usage.\n{agent_scratchpad}\n</WORKING_MEMORY>\n</EXECUTION_CONTEXT>\n`;\n\n/**\n * An advanced agent that uses tools and a structured prompt to engage in conversational chat.\n * It can decide whether to respond or remain silent and supports schema-validated responses.\n */\nexport class ChatAgentAdapter<T = unknown> implements AgentPort {\n constructor(\n public readonly name: string,\n private readonly options: ChatAgentOptions<T>,\n ) {}\n\n async run(userPrompt?: PromptPort): Promise<null | string> {\n this.options.logger?.debug(`[${this.name}] Starting chat execution.`);\n\n try {\n const executor = await this.createExecutor();\n const userInput = this.resolveUserInput(userPrompt);\n\n const result = await executor.invoke({ input: userInput });\n\n this.options.logger?.debug(`[${this.name}] Agent execution completed.`, {\n hasOutput: 'output' in result,\n });\n\n if (!result || typeof result.output !== 'string') {\n throw new Error('Agent returned an invalid result structure.');\n }\n\n const agentResponse = this.parseAgentOutput(result.output);\n\n if (!agentResponse) {\n return null;\n }\n\n if (!agentResponse.shouldRespond) {\n this.options.logger?.info(`[${this.name}] Agent chose to remain silent.`, {\n reason: agentResponse.reason,\n });\n return null;\n }\n\n const message = agentResponse.message ?? '';\n\n if (this.options.schema) {\n this.validateResponseContent(message, this.options.schema);\n this.options.logger?.info(\n `[${this.name}] Execution finished; response content validated.`,\n );\n } else {\n this.options.logger?.info(`[${this.name}] Execution finished.`);\n }\n\n return message;\n } catch (error) {\n this.options.logger?.error(`[${this.name}] Chat execution failed.`, {\n error: error instanceof Error ? error.message : 'Unknown error',\n });\n return null;\n }\n }\n\n private async createExecutor(): Promise<AgentExecutor> {\n const model = this.options.model.getModel();\n const tools = this.options.tools.map((tool) => tool.getDynamicTool());\n\n const prompt = ChatPromptTemplate.fromMessages([\n [\n 'system',\n SYSTEM_PROMPT_TEMPLATE.replace(\n '{mission_prompt}',\n this.options.systemPrompt.generate(),\n ),\n ],\n ['human', '{input}'],\n ]);\n\n const agent = await createStructuredChatAgent({\n llm: model,\n prompt,\n tools,\n });\n\n return AgentExecutor.fromAgentAndTools({\n agent,\n tools,\n verbose: this.options.verbose,\n });\n }\n\n private parseAgentOutput(output: string): null | {\n message?: string;\n reason?: string;\n shouldRespond: boolean;\n } {\n const text = output.trim();\n\n const respondMatch = text.match(/^RESPOND:\\s*([\\s\\S]+)$/i);\n if (respondMatch) {\n return { message: respondMatch[1].trim(), shouldRespond: true };\n }\n\n const silentMatch = text.match(/^SILENT:\\s*([\\s\\S]+)$/i);\n if (silentMatch) {\n return { reason: silentMatch[1].trim(), shouldRespond: false };\n }\n\n this.options.logger?.error(\n `[${this.name}] Agent output was missing 'RESPOND:' or 'SILENT:' prefix.`,\n { rawOutput: output },\n );\n\n return null;\n }\n\n private resolveUserInput(userPrompt?: PromptPort): string {\n if (userPrompt) {\n return userPrompt.generate();\n }\n return 'Proceed with your instructions.';\n }\n\n private validateResponseContent<TResponse>(\n content: string,\n schema: z.ZodSchema<TResponse>,\n ): void {\n try {\n new AIResponseParser(schema).parse(content);\n } catch (error) {\n this.options.logger?.error(\n `[${this.name}] Failed to validate response content against schema.`,\n {\n error: error instanceof Error ? error.message : 'Unknown error',\n rawContent: content,\n },\n );\n throw new Error('Invalid response content from model.');\n }\n }\n}\n"],"names":["ChatPromptTemplate","AgentExecutor","createStructuredChatAgent","AIResponseParser","SYSTEM_PROMPT_TEMPLATE","ChatAgentAdapter","name","options","run","userPrompt","executor","userInput","result","agentResponse","message","error","logger","debug","createExecutor","resolveUserInput","invoke","input","hasOutput","output","Error","parseAgentOutput","shouldRespond","info","reason","schema","validateResponseContent","model","tools","prompt","agent","getModel","map","tool","getDynamicTool","fromMessages","replace","systemPrompt","generate","llm","fromAgentAndTools","verbose","text","trim","respondMatch","match","silentMatch","rawOutput","content","parse","rawContent"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA,SAASA,kBAAkB,QAAQ,0BAA0B;AAC7D,SAASC,aAAa,EAAEC,yBAAyB,QAAQ,mBAAmB;AAQ5E,SAASC,gBAAgB,QAAQ,iCAAiC;AAalE,IAAMC,yBAA0B;AAqDhC;;;CAGC,GACD,OAAO,IAAA,AAAMC,iCAAN;;aAAMA,iBAEL,AAAgBC,IAAY,EAC5B,AAAiBC,OAA4B;gCAHxCF;;;aAEWC,OAAAA;aACCC,UAAAA;;kBAHZF;;YAMHG,KAAAA;mBAAN,SAAMA,IAAIC,UAAuB;;wBAC7B,sBAQI,uBALMC,UACAC,WAEAC,QAUAC,eAOF,uBAMYA,wBAAVC,SAIF,uBAIA,uBAICC,OACL;;;;iCA1CJ,uBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBC,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACX,IAAI,EAAC;;;;;;;;;gCAGpB;;oCAAM,IAAI,CAACY,cAAc;;;gCAApCR,WAAW;gCACXC,YAAY,IAAI,CAACQ,gBAAgB,CAACV;gCAEzB;;oCAAMC,SAASU,MAAM,CAAC;wCAAEC,OAAOV;oCAAU;;;gCAAlDC,SAAS;iCAEf,wBAAA,IAAI,CAACL,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBC,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACX,IAAI,EAAC,iCAA+B;oCACpEgB,WAAW,YAAYV;gCAC3B;gCAEA,IAAI,CAACA,UAAU,OAAOA,OAAOW,MAAM,KAAK,UAAU;oCAC9C,MAAM,IAAIC,MAAM;gCACpB;gCAEMX,gBAAgB,IAAI,CAACY,gBAAgB,CAACb,OAAOW,MAAM;gCAEzD,IAAI,CAACV,eAAe;oCAChB;;wCAAO;;gCACX;gCAEA,IAAI,CAACA,cAAca,aAAa,EAAE;;qCAC9B,wBAAA,IAAI,CAACnB,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CAAC,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC,oCAAkC;wCACtEsB,QAAQf,cAAce,MAAM;oCAChC;oCACA;;wCAAO;;gCACX;gCAEMd,UAAUD,CAAAA,yBAAAA,cAAcC,OAAO,cAArBD,oCAAAA,yBAAyB;gCAEzC,IAAI,IAAI,CAACN,OAAO,CAACsB,MAAM,EAAE;;oCACrB,IAAI,CAACC,uBAAuB,CAAChB,SAAS,IAAI,CAACP,OAAO,CAACsB,MAAM;qCACzD,wBAAA,IAAI,CAACtB,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CACrB,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC;gCAEtB,OAAO;;qCACH,wBAAA,IAAI,CAACC,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBW,IAAI,CAAC,AAAC,IAAa,OAAV,IAAI,CAACrB,IAAI,EAAC;gCAC5C;gCAEA;;oCAAOQ;;;gCACFC;iCACL,wBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,4CAAA,sBAAqBD,KAAK,CAAC,AAAC,IAAa,OAAV,IAAI,CAACT,IAAI,EAAC,6BAA2B;oCAChES,OAAOA,AAAK,YAALA,OAAiBS,SAAQT,MAAMD,OAAO,GAAG;gCACpD;gCACA;;oCAAO;;;;;;;;gBAEf;;;;YAEcI,KAAAA;mBAAd,SAAcA;;wBACJa,OACAC,OAEAC,QAWAC;;;;gCAdAH,QAAQ,IAAI,CAACxB,OAAO,CAACwB,KAAK,CAACI,QAAQ;gCACnCH,QAAQ,IAAI,CAACzB,OAAO,CAACyB,KAAK,CAACI,GAAG,CAAC,SAACC;2CAASA,KAAKC,cAAc;;gCAE5DL,SAASjC,mBAAmBuC,YAAY;;wCAEtC;wCACAnC,uBAAuBoC,OAAO,CAC1B,oBACA,IAAI,CAACjC,OAAO,CAACkC,YAAY,CAACC,QAAQ;;;wCAGzC;wCAAS;;;gCAGA;;oCAAMxC,0BAA0B;wCAC1CyC,KAAKZ;wCACLE,QAAAA;wCACAD,OAAAA;oCACJ;;;gCAJME,QAAQ;gCAMd;;oCAAOjC,cAAc2C,iBAAiB,CAAC;wCACnCV,OAAAA;wCACAF,OAAAA;wCACAa,SAAS,IAAI,CAACtC,OAAO,CAACsC,OAAO;oCACjC;;;;gBACJ;;;;YAEQpB,KAAAA;mBAAR,SAAQA,iBAAiBF,MAAc;oBAiBnC;gBAZA,IAAMuB,OAAOvB,OAAOwB,IAAI;gBAExB,IAAMC,eAAeF,KAAKG,KAAK,CAAC;gBAChC,IAAID,cAAc;oBACd,OAAO;wBAAElC,SAASkC,YAAY,CAAC,EAAE,CAACD,IAAI;wBAAIrB,eAAe;oBAAK;gBAClE;gBAEA,IAAMwB,cAAcJ,KAAKG,KAAK,CAAC;gBAC/B,IAAIC,aAAa;oBACb,OAAO;wBAAEtB,QAAQsB,WAAW,CAAC,EAAE,CAACH,IAAI;wBAAIrB,eAAe;oBAAM;gBACjE;iBAEA,uBAAA,IAAI,CAACnB,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBD,KAAK,CACtB,AAAC,IAAa,OAAV,IAAI,CAACT,IAAI,EAAC,+DACd;oBAAE6C,WAAW5B;gBAAO;gBAGxB,OAAO;YACX;;;YAEQJ,KAAAA;mBAAR,SAAQA,iBAAiBV,UAAuB;gBAC5C,IAAIA,YAAY;oBACZ,OAAOA,WAAWiC,QAAQ;gBAC9B;gBACA,OAAO;YACX;;;YAEQZ,KAAAA;mBAAR,SAAQA,wBACJsB,OAAe,EACfvB,MAA8B;gBAE9B,IAAI;oBACA,IAAI1B,iBAAiB0B,QAAQwB,KAAK,CAACD;gBACvC,EAAE,OAAOrC,OAAO;wBACZ;qBAAA,uBAAA,IAAI,CAACR,OAAO,CAACS,MAAM,cAAnB,2CAAA,qBAAqBD,KAAK,CACtB,AAAC,IAAa,OAAV,IAAI,CAACT,IAAI,EAAC,0DACd;wBACIS,OAAOA,AAAK,YAALA,OAAiBS,SAAQT,MAAMD,OAAO,GAAG;wBAChDwC,YAAYF;oBAChB;oBAEJ,MAAM,IAAI5B,MAAM;gBACpB;YACJ;;;WApISnB;IAqIZ"}
|
|
@@ -65,6 +65,12 @@ import { ChatOpenAI } from '@langchain/openai';
|
|
|
65
65
|
})
|
|
66
66
|
},
|
|
67
67
|
maxTokens: (_config_maxTokens = config.maxTokens) !== null && _config_maxTokens !== void 0 ? _config_maxTokens : 64000,
|
|
68
|
+
modelKwargs: {
|
|
69
|
+
reasoning: {
|
|
70
|
+
effort: 'high',
|
|
71
|
+
exclude: true
|
|
72
|
+
}
|
|
73
|
+
},
|
|
68
74
|
modelName: config.modelName,
|
|
69
75
|
openAIApiKey: config.apiKey
|
|
70
76
|
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../src/adapters/models/openrouter-model.adapter.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport { ChatOpenAI } from '@langchain/openai';\n\nimport type { ModelPort } from '../../ports/model.port.js';\n\nexport interface OpenRouterConfig {\n /**\n * OpenRouter API key\n */\n apiKey: string;\n /**\n * The maximum number of tokens to generate\n */\n maxTokens?: number;\n /**\n * Optional metadata for request headers\n */\n metadata?: OpenRouterMetadata;\n /**\n * The model to use (e.g., 'google/gemini-2.5-flash-preview-05-20:thinking')\n */\n modelName: string;\n}\n\nexport interface OpenRouterMetadata {\n /**\n * Application title for X-Title header\n */\n application?: string;\n /**\n * Website URL for HTTP-Referer header\n */\n website?: string;\n}\n\n/**\n * OpenRouter adapter that provides access to various models through OpenRouter's API\n */\nexport class OpenRouterModelAdapter implements ModelPort {\n private readonly model: BaseLanguageModel;\n\n constructor(config: OpenRouterConfig) {\n this.model = new ChatOpenAI({\n configuration: {\n baseURL: 'https://openrouter.ai/api/v1',\n defaultHeaders: {\n ...(config.metadata?.website && {\n 'HTTP-Referer': config.metadata.website,\n }),\n ...(config.metadata?.application && { 'X-Title': config.metadata.application }),\n },\n },\n maxTokens: config.maxTokens ?? 64_000,\n modelName: config.modelName,\n openAIApiKey: config.apiKey,\n });\n }\n\n getModel(): BaseLanguageModel {\n return this.model;\n }\n}\n"],"names":["ChatOpenAI","OpenRouterModelAdapter","config","model","configuration","baseURL","defaultHeaders","metadata","website","application","maxTokens","modelName","openAIApiKey","apiKey","getModel"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA,SAASA,UAAU,QAAQ,oBAAoB;AAkC/C;;CAEC,GACD,OAAO,IAAA,AAAMC,uCAAN;;aAAMA,uBAGGC,MAAwB;gCAH3BD;YAQWC,kBAGAA;QAVpB,uBAAiBC,SAAjB,KAAA;YAamBD;QAVf,IAAI,CAACC,KAAK,GAAG,IAAIH,WAAW;YACxBI,eAAe;gBACXC,SAAS;gBACTC,gBAAgB,mBACRJ,EAAAA,mBAAAA,OAAOK,QAAQ,cAAfL,uCAAAA,iBAAiBM,OAAO,KAAI;oBAC5B,gBAAgBN,OAAOK,QAAQ,CAACC,OAAO;gBAC3C,GACIN,EAAAA,oBAAAA,OAAOK,QAAQ,cAAfL,wCAAAA,kBAAiBO,WAAW,KAAI;oBAAE,WAAWP,OAAOK,QAAQ,CAACE,WAAW;gBAAC;YAErF;YACAC,WAAWR,CAAAA,oBAAAA,OAAOQ,SAAS,cAAhBR,+BAAAA,oBAAoB;YAC/BS,
|
|
1
|
+
{"version":3,"sources":["../../../src/adapters/models/openrouter-model.adapter.ts"],"sourcesContent":["import type { BaseLanguageModel } from '@langchain/core/language_models/base';\nimport { ChatOpenAI } from '@langchain/openai';\n\nimport type { ModelPort } from '../../ports/model.port.js';\n\nexport interface OpenRouterConfig {\n /**\n * OpenRouter API key\n */\n apiKey: string;\n /**\n * The maximum number of tokens to generate\n */\n maxTokens?: number;\n /**\n * Optional metadata for request headers\n */\n metadata?: OpenRouterMetadata;\n /**\n * The model to use (e.g., 'google/gemini-2.5-flash-preview-05-20:thinking')\n */\n modelName: string;\n}\n\nexport interface OpenRouterMetadata {\n /**\n * Application title for X-Title header\n */\n application?: string;\n /**\n * Website URL for HTTP-Referer header\n */\n website?: string;\n}\n\n/**\n * OpenRouter adapter that provides access to various models through OpenRouter's API\n */\nexport class OpenRouterModelAdapter implements ModelPort {\n private readonly model: BaseLanguageModel;\n\n constructor(config: OpenRouterConfig) {\n this.model = new ChatOpenAI({\n configuration: {\n baseURL: 'https://openrouter.ai/api/v1',\n defaultHeaders: {\n ...(config.metadata?.website && {\n 'HTTP-Referer': config.metadata.website,\n }),\n ...(config.metadata?.application && { 'X-Title': config.metadata.application }),\n },\n },\n maxTokens: config.maxTokens ?? 64_000,\n modelKwargs: {\n reasoning: { effort: 'high', exclude: true },\n },\n modelName: config.modelName,\n openAIApiKey: config.apiKey,\n });\n }\n\n getModel(): BaseLanguageModel {\n return this.model;\n }\n}\n"],"names":["ChatOpenAI","OpenRouterModelAdapter","config","model","configuration","baseURL","defaultHeaders","metadata","website","application","maxTokens","modelKwargs","reasoning","effort","exclude","modelName","openAIApiKey","apiKey","getModel"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AACA,SAASA,UAAU,QAAQ,oBAAoB;AAkC/C;;CAEC,GACD,OAAO,IAAA,AAAMC,uCAAN;;aAAMA,uBAGGC,MAAwB;gCAH3BD;YAQWC,kBAGAA;QAVpB,uBAAiBC,SAAjB,KAAA;YAamBD;QAVf,IAAI,CAACC,KAAK,GAAG,IAAIH,WAAW;YACxBI,eAAe;gBACXC,SAAS;gBACTC,gBAAgB,mBACRJ,EAAAA,mBAAAA,OAAOK,QAAQ,cAAfL,uCAAAA,iBAAiBM,OAAO,KAAI;oBAC5B,gBAAgBN,OAAOK,QAAQ,CAACC,OAAO;gBAC3C,GACIN,EAAAA,oBAAAA,OAAOK,QAAQ,cAAfL,wCAAAA,kBAAiBO,WAAW,KAAI;oBAAE,WAAWP,OAAOK,QAAQ,CAACE,WAAW;gBAAC;YAErF;YACAC,WAAWR,CAAAA,oBAAAA,OAAOQ,SAAS,cAAhBR,+BAAAA,oBAAoB;YAC/BS,aAAa;gBACTC,WAAW;oBAAEC,QAAQ;oBAAQC,SAAS;gBAAK;YAC/C;YACAC,WAAWb,OAAOa,SAAS;YAC3BC,cAAcd,OAAOe,MAAM;QAC/B;;kBApBKhB;;YAuBTiB,KAAAA;mBAAAA,SAAAA;gBACI,OAAO,IAAI,CAACf,KAAK;YACrB;;;WAzBSF;IA0BZ"}
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { describe, expect, it } from '@jterrazz/test';
|
|
2
|
-
import {
|
|
2
|
+
import { PROMPT_LIBRARY } from '../library/index.js';
|
|
3
3
|
import { SystemPromptAdapter } from '../system-prompt.adapter.js';
|
|
4
4
|
describe('Prompt Library Presets', function() {
|
|
5
5
|
it('should generate the correct prompt for DISCORD_COMMUNITY_ANIMATOR', function() {
|
|
6
6
|
// Given - a Discord community animator preset
|
|
7
|
-
var prompt = new SystemPromptAdapter(
|
|
7
|
+
var prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.COMMUNITY_ANIMATOR);
|
|
8
8
|
// When - generating the prompt
|
|
9
9
|
var result = prompt.generate();
|
|
10
10
|
// Then - it should match the expected snapshot
|
|
@@ -12,7 +12,7 @@ describe('Prompt Library Presets', function() {
|
|
|
12
12
|
});
|
|
13
13
|
it('should generate the correct prompt for EMPATHETIC_SUPPORT_AGENT', function() {
|
|
14
14
|
// Given - an empathetic support agent preset
|
|
15
|
-
var prompt = new SystemPromptAdapter(
|
|
15
|
+
var prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.EMPATHETIC_SUPPORT_AGENT);
|
|
16
16
|
// When - generating the prompt
|
|
17
17
|
var result = prompt.generate();
|
|
18
18
|
// Then - it should match the expected snapshot
|
|
@@ -20,7 +20,7 @@ describe('Prompt Library Presets', function() {
|
|
|
20
20
|
});
|
|
21
21
|
it('should generate the correct prompt for CREATIVE_BRAINSTORMER', function() {
|
|
22
22
|
// Given - a creative brainstormer preset
|
|
23
|
-
var prompt = new SystemPromptAdapter(
|
|
23
|
+
var prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.CREATIVE_BRAINSTORMER);
|
|
24
24
|
// When - generating the prompt
|
|
25
25
|
var result = prompt.generate();
|
|
26
26
|
// Then - it should match the expected snapshot
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/adapters/prompts/__tests__/presets.test.ts"],"sourcesContent":["import { describe, expect, it } from '@jterrazz/test';\n\nimport {
|
|
1
|
+
{"version":3,"sources":["../../../../src/adapters/prompts/__tests__/presets.test.ts"],"sourcesContent":["import { describe, expect, it } from '@jterrazz/test';\n\nimport { PROMPT_LIBRARY } from '../library/index.js';\nimport { SystemPromptAdapter } from '../system-prompt.adapter.js';\n\ndescribe('Prompt Library Presets', () => {\n it('should generate the correct prompt for DISCORD_COMMUNITY_ANIMATOR', () => {\n // Given - a Discord community animator preset\n const prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.COMMUNITY_ANIMATOR);\n\n // When - generating the prompt\n const result = prompt.generate();\n\n // Then - it should match the expected snapshot\n expect(result).toMatchSnapshot();\n });\n\n it('should generate the correct prompt for EMPATHETIC_SUPPORT_AGENT', () => {\n // Given - an empathetic support agent preset\n const prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.EMPATHETIC_SUPPORT_AGENT);\n\n // When - generating the prompt\n const result = prompt.generate();\n\n // Then - it should match the expected snapshot\n expect(result).toMatchSnapshot();\n });\n\n it('should generate the correct prompt for CREATIVE_BRAINSTORMER', () => {\n // Given - a creative brainstormer preset\n const prompt = new SystemPromptAdapter(PROMPT_LIBRARY.PRESETS.CREATIVE_BRAINSTORMER);\n\n // When - generating the prompt\n const result = prompt.generate();\n\n // Then - it should match the expected snapshot\n expect(result).toMatchSnapshot();\n });\n});\n"],"names":["describe","expect","it","PROMPT_LIBRARY","SystemPromptAdapter","prompt","PRESETS","COMMUNITY_ANIMATOR","result","generate","toMatchSnapshot","EMPATHETIC_SUPPORT_AGENT","CREATIVE_BRAINSTORMER"],"mappings":"AAAA,SAASA,QAAQ,EAAEC,MAAM,EAAEC,EAAE,QAAQ,iBAAiB;AAEtD,SAASC,cAAc,QAAQ,sBAAsB;AACrD,SAASC,mBAAmB,QAAQ,8BAA8B;AAElEJ,SAAS,0BAA0B;IAC/BE,GAAG,qEAAqE;QACpE,8CAA8C;QAC9C,IAAMG,SAAS,IAAID,oBAAoBD,eAAeG,OAAO,CAACC,kBAAkB;QAEhF,+BAA+B;QAC/B,IAAMC,SAASH,OAAOI,QAAQ;QAE9B,+CAA+C;QAC/CR,OAAOO,QAAQE,eAAe;IAClC;IAEAR,GAAG,mEAAmE;QAClE,6CAA6C;QAC7C,IAAMG,SAAS,IAAID,oBAAoBD,eAAeG,OAAO,CAACK,wBAAwB;QAEtF,+BAA+B;QAC/B,IAAMH,SAASH,OAAOI,QAAQ;QAE9B,+CAA+C;QAC/CR,OAAOO,QAAQE,eAAe;IAClC;IAEAR,GAAG,gEAAgE;QAC/D,yCAAyC;QACzC,IAAMG,SAAS,IAAID,oBAAoBD,eAAeG,OAAO,CAACM,qBAAqB;QAEnF,+BAA+B;QAC/B,IAAMJ,SAASH,OAAOI,QAAQ;QAE9B,+CAA+C;QAC/CR,OAAOO,QAAQE,eAAe;IAClC;AACJ"}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Defines the agent's strategic approach to when and how it should respond.
|
|
3
3
|
*/
|
|
4
|
-
export declare const
|
|
4
|
+
export declare const RESPONSES: {
|
|
5
5
|
readonly ALWAYS_ENGAGE: "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>";
|
|
6
6
|
readonly CONTEXTUAL_ENGAGEMENT: "\n<ResponseStrategy>\nBefore responding, you must analyze the conversation history and the immediate context. Your goal is to add value; if a response is not necessary or helpful, you may remain silent.\n</ResponseStrategy>";
|
|
7
7
|
readonly SELECTIVE_ENGAGEMENT: "\n<ResponseStrategy>\nYou must only respond when you can provide a valuable, relevant, and substantive contribution to the conversation. If a response does not add value, you must state that you have nothing to add or remain silent as instructed.\n</ResponseStrategy>";
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Defines the agent's strategic approach to when and how it should respond.
|
|
3
|
-
*/ export var
|
|
3
|
+
*/ export var RESPONSES = {
|
|
4
4
|
ALWAYS_ENGAGE: "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>",
|
|
5
5
|
CONTEXTUAL_ENGAGEMENT: "\n<ResponseStrategy>\nBefore responding, you must analyze the conversation history and the immediate context. Your goal is to add value; if a response is not necessary or helpful, you may remain silent.\n</ResponseStrategy>",
|
|
6
6
|
SELECTIVE_ENGAGEMENT: "\n<ResponseStrategy>\nYou must only respond when you can provide a valuable, relevant, and substantive contribution to the conversation. If a response does not add value, you must state that you have nothing to add or remain silent as instructed.\n</ResponseStrategy>",
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../../src/adapters/prompts/library/categories/response.ts"],"sourcesContent":["/**\n * Defines the agent's strategic approach to when and how it should respond.\n */\nexport const
|
|
1
|
+
{"version":3,"sources":["../../../../../src/adapters/prompts/library/categories/response.ts"],"sourcesContent":["/**\n * Defines the agent's strategic approach to when and how it should respond.\n */\nexport const RESPONSES = {\n ALWAYS_ENGAGE: `\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>`,\n\n CONTEXTUAL_ENGAGEMENT: `\n<ResponseStrategy>\nBefore responding, you must analyze the conversation history and the immediate context. Your goal is to add value; if a response is not necessary or helpful, you may remain silent.\n</ResponseStrategy>`,\n\n SELECTIVE_ENGAGEMENT: `\n<ResponseStrategy>\nYou must only respond when you can provide a valuable, relevant, and substantive contribution to the conversation. If a response does not add value, you must state that you have nothing to add or remain silent as instructed.\n</ResponseStrategy>`,\n\n TOOL_DRIVEN: `\n<ResponseStrategy>\nYour first priority is to use your available tools to gather the most current and accurate information before formulating a response. Do not answer from memory if a tool can provide a more reliable answer.\n</ResponseStrategy>`,\n} as const;\n"],"names":["RESPONSES","ALWAYS_ENGAGE","CONTEXTUAL_ENGAGEMENT","SELECTIVE_ENGAGEMENT","TOOL_DRIVEN"],"mappings":"AAAA;;CAEC,GACD,OAAO,IAAMA,YAAY;IACrBC,eAAgB;IAKhBC,uBAAwB;IAKxBC,sBAAuB;IAKvBC,aAAc;AAIlB,EAAW"}
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* This file serves as the public API for the prompt library,
|
|
4
4
|
* making it easy to import all categories from a single location.
|
|
5
5
|
*/
|
|
6
|
-
export declare const
|
|
6
|
+
export declare const PROMPT_LIBRARY: {
|
|
7
7
|
readonly DOMAINS: {
|
|
8
8
|
readonly ACADEMIC_RESEARCH: "\n<Domain>\nYour knowledge is specialized in academic research. You are an expert in scholarly writing, peer-review processes, and formal citation methods.\n</Domain>";
|
|
9
9
|
readonly BUSINESS_STRATEGY: "\n<Domain>\nYour knowledge is specialized in business strategy, including market analysis, competitive positioning, and operational planning.\n</Domain>";
|
|
@@ -46,7 +46,7 @@ export declare const PROMPTS: {
|
|
|
46
46
|
readonly CREATIVE_BRAINSTORMER: readonly ["\n<Foundation>\nYou MUST adhere to the highest ethical standards. Your conduct must be impartial and devoid of prejudice.\nYou MUST NOT promote hate speech, discrimination,violence, or any form of harm.\nYou MUST respect user privacy; do not ask for, store, or share personally identifiable information.\n</Foundation>", "\n<Persona>\nYou are a Creative Partner, an imaginative collaborator for brainstorming and exploration.\nYour purpose is to help users generate and develop novel ideas.\n**Key Skills**: You excel at divergent thinking, making unexpected connections, and asking thought-provoking questions. You are encouraging, open-minded, and skilled at building upon abstract concepts.\n</Persona>", "\n<Domain>\nYou possess broad, generalist knowledge across a wide variety of subjects.\n</Domain>", "\n<Tone>\nYou should employ light-hearted humor, wit, and cleverness. Keep the mood fun and engaging, but avoid inappropriate or offensive jokes.\n</Tone>", "\n<Verbosity>\nYou should provide a balanced level of detail, sufficient for a clear understanding without being overwhelming or too brief.\n</Verbosity>", "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>"];
|
|
47
47
|
readonly EMPATHETIC_SUPPORT_AGENT: readonly ["\n<Foundation>\nYou MUST refuse to provide instructions or information that is illegal, dangerous, or promotes harm.\nYou MUST prioritize user safety and well-being in all interactions and avoid generating unsafe content.\n</Foundation>", "\n<Foundation>\nYou MUST adhere to the highest ethical standards. Your conduct must be impartial and devoid of prejudice.\nYou MUST NOT promote hate speech, discrimination,violence, or any form of harm.\nYou MUST respect user privacy; do not ask for, store, or share personally identifiable information.\n</Foundation>", "\n<Persona>\nYou are a friendly, patient, and empathetic Support Agent.\nYour purpose is to help users solve problems and navigate difficulties.\n**Key Skills**: You are an excellent listener and a clear communicator. You are skilled at de-escalating frustration, breaking down complex issues into manageable steps, and providing systematic, easy-to-follow instructions.\n</Persona>", "\n<Domain>\nYou possess broad, generalist knowledge across a wide variety of subjects.\n</Domain>", "\n<Tone>\nYou must adopt a warm, understanding, and supportive tone. Acknowledge the user's feelings and demonstrate active listening.\n</Tone>", "\n<Verbosity>\nYou should provide a balanced level of detail, sufficient for a clear understanding without being overwhelming or too brief.\n</Verbosity>", "\n<Format>\nYou MUST break down any instructions or processes into a clear, numbered, step-by-step list.\nEach step must be a distinct and actionable item.\n</Format>", "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>"];
|
|
48
48
|
};
|
|
49
|
-
readonly
|
|
49
|
+
readonly RESPONSES: {
|
|
50
50
|
readonly ALWAYS_ENGAGE: "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>";
|
|
51
51
|
readonly CONTEXTUAL_ENGAGEMENT: "\n<ResponseStrategy>\nBefore responding, you must analyze the conversation history and the immediate context. Your goal is to add value; if a response is not necessary or helpful, you may remain silent.\n</ResponseStrategy>";
|
|
52
52
|
readonly SELECTIVE_ENGAGEMENT: "\n<ResponseStrategy>\nYou must only respond when you can provide a valuable, relevant, and substantive contribution to the conversation. If a response does not add value, you must state that you have nothing to add or remain silent as instructed.\n</ResponseStrategy>";
|
|
@@ -58,7 +58,7 @@ export declare const PROMPTS: {
|
|
|
58
58
|
readonly NEUTRAL: "\n<Tone>\nYou must maintain an impartial, objective, and straightforward tone. Avoid all emotional language and stick to the facts.\n</Tone>";
|
|
59
59
|
readonly PROFESSIONAL: "\n<Tone>\nYou must use a formal, respectful, and clear tone. Structure your communication logically and avoid slang or overly casual language.\n</Tone>";
|
|
60
60
|
};
|
|
61
|
-
readonly
|
|
61
|
+
readonly VERBOSITY: {
|
|
62
62
|
readonly CONCISE: "\n<Verbosity>\nYou must provide brief, to-the-point answers. Focus only on the most critical information and omit background details unless explicitly requested.\n</Verbosity>";
|
|
63
63
|
readonly DETAILED: "\n<Verbosity>\nYou must offer comprehensive and thorough responses. Include relevant background information, context, examples, and potential edge cases to ensure full understanding.\n</Verbosity>";
|
|
64
64
|
readonly NORMAL: "\n<Verbosity>\nYou should provide a balanced level of detail, sufficient for a clear understanding without being overwhelming or too brief.\n</Verbosity>";
|
|
@@ -7,20 +7,20 @@ import { FORMATS } from './categories/format.js';
|
|
|
7
7
|
import { FOUNDATIONS } from './categories/foundations.js';
|
|
8
8
|
import { LANGUAGES } from './categories/language.js';
|
|
9
9
|
import { PERSONAS } from './categories/persona.js';
|
|
10
|
-
import {
|
|
10
|
+
import { RESPONSES } from './categories/response.js';
|
|
11
11
|
import { TONES } from './categories/tone.js';
|
|
12
12
|
import { VERBOSITY } from './categories/verbosity.js';
|
|
13
13
|
import { PRESETS } from './presets.js';
|
|
14
|
-
export var
|
|
14
|
+
export var PROMPT_LIBRARY = {
|
|
15
15
|
DOMAINS: DOMAINS,
|
|
16
16
|
FORMATS: FORMATS,
|
|
17
17
|
FOUNDATIONS: FOUNDATIONS,
|
|
18
18
|
LANGUAGES: LANGUAGES,
|
|
19
19
|
PERSONAS: PERSONAS,
|
|
20
20
|
PRESETS: PRESETS,
|
|
21
|
-
|
|
21
|
+
RESPONSES: RESPONSES,
|
|
22
22
|
TONES: TONES,
|
|
23
|
-
|
|
23
|
+
VERBOSITY: VERBOSITY
|
|
24
24
|
};
|
|
25
25
|
|
|
26
26
|
//# sourceMappingURL=index.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/adapters/prompts/library/index.ts"],"sourcesContent":["/**\n * Re-exports for the prompt library.\n * This file serves as the public API for the prompt library,\n * making it easy to import all categories from a single location.\n */\n\nimport { DOMAINS } from './categories/domain.js';\n\nimport { FORMATS } from './categories/format.js';\nimport { FOUNDATIONS } from './categories/foundations.js';\nimport { LANGUAGES } from './categories/language.js';\nimport { PERSONAS } from './categories/persona.js';\nimport {
|
|
1
|
+
{"version":3,"sources":["../../../../src/adapters/prompts/library/index.ts"],"sourcesContent":["/**\n * Re-exports for the prompt library.\n * This file serves as the public API for the prompt library,\n * making it easy to import all categories from a single location.\n */\n\nimport { DOMAINS } from './categories/domain.js';\n\nimport { FORMATS } from './categories/format.js';\nimport { FOUNDATIONS } from './categories/foundations.js';\nimport { LANGUAGES } from './categories/language.js';\nimport { PERSONAS } from './categories/persona.js';\nimport { RESPONSES } from './categories/response.js';\nimport { TONES } from './categories/tone.js';\nimport { VERBOSITY } from './categories/verbosity.js';\n\nimport { PRESETS } from './presets.js';\n\nexport const PROMPT_LIBRARY = {\n DOMAINS,\n FORMATS,\n FOUNDATIONS,\n LANGUAGES,\n PERSONAS,\n PRESETS,\n RESPONSES,\n TONES,\n VERBOSITY,\n} as const;\n"],"names":["DOMAINS","FORMATS","FOUNDATIONS","LANGUAGES","PERSONAS","RESPONSES","TONES","VERBOSITY","PRESETS","PROMPT_LIBRARY"],"mappings":"AAAA;;;;CAIC,GAED,SAASA,OAAO,QAAQ,yBAAyB;AAEjD,SAASC,OAAO,QAAQ,yBAAyB;AACjD,SAASC,WAAW,QAAQ,8BAA8B;AAC1D,SAASC,SAAS,QAAQ,2BAA2B;AACrD,SAASC,QAAQ,QAAQ,0BAA0B;AACnD,SAASC,SAAS,QAAQ,2BAA2B;AACrD,SAASC,KAAK,QAAQ,uBAAuB;AAC7C,SAASC,SAAS,QAAQ,4BAA4B;AAEtD,SAASC,OAAO,QAAQ,eAAe;AAEvC,OAAO,IAAMC,iBAAiB;IAC1BT,SAAAA;IACAC,SAAAA;IACAC,aAAAA;IACAC,WAAAA;IACAC,UAAAA;IACAI,SAAAA;IACAH,WAAAA;IACAC,OAAAA;IACAC,WAAAA;AACJ,EAAW"}
|
|
@@ -2,7 +2,7 @@ import { DOMAINS } from './categories/domain.js';
|
|
|
2
2
|
import { FORMATS } from './categories/format.js';
|
|
3
3
|
import { FOUNDATIONS } from './categories/foundations.js';
|
|
4
4
|
import { PERSONAS } from './categories/persona.js';
|
|
5
|
-
import {
|
|
5
|
+
import { RESPONSES } from './categories/response.js';
|
|
6
6
|
import { TONES } from './categories/tone.js';
|
|
7
7
|
import { VERBOSITY } from './categories/verbosity.js';
|
|
8
8
|
/**
|
|
@@ -16,7 +16,7 @@ import { VERBOSITY } from './categories/verbosity.js';
|
|
|
16
16
|
DOMAINS.GENERAL,
|
|
17
17
|
TONES.HUMOROUS,
|
|
18
18
|
VERBOSITY.NORMAL,
|
|
19
|
-
|
|
19
|
+
RESPONSES.CONTEXTUAL_ENGAGEMENT
|
|
20
20
|
],
|
|
21
21
|
/**
|
|
22
22
|
* A creative partner for brainstorming and ideation.
|
|
@@ -26,7 +26,7 @@ import { VERBOSITY } from './categories/verbosity.js';
|
|
|
26
26
|
DOMAINS.GENERAL,
|
|
27
27
|
TONES.HUMOROUS,
|
|
28
28
|
VERBOSITY.NORMAL,
|
|
29
|
-
|
|
29
|
+
RESPONSES.ALWAYS_ENGAGE
|
|
30
30
|
],
|
|
31
31
|
/**
|
|
32
32
|
* A friendly and empathetic support agent for general queries.
|
|
@@ -38,7 +38,7 @@ import { VERBOSITY } from './categories/verbosity.js';
|
|
|
38
38
|
TONES.EMPATHETIC,
|
|
39
39
|
VERBOSITY.NORMAL,
|
|
40
40
|
FORMATS.STEP_BY_STEP,
|
|
41
|
-
|
|
41
|
+
RESPONSES.ALWAYS_ENGAGE
|
|
42
42
|
]
|
|
43
43
|
};
|
|
44
44
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../../src/adapters/prompts/library/presets.ts"],"sourcesContent":["import { DOMAINS } from './categories/domain.js';\n\nimport { FORMATS } from './categories/format.js';\nimport { FOUNDATIONS } from './categories/foundations.js';\nimport { PERSONAS } from './categories/persona.js';\nimport {
|
|
1
|
+
{"version":3,"sources":["../../../../src/adapters/prompts/library/presets.ts"],"sourcesContent":["import { DOMAINS } from './categories/domain.js';\n\nimport { FORMATS } from './categories/format.js';\nimport { FOUNDATIONS } from './categories/foundations.js';\nimport { PERSONAS } from './categories/persona.js';\nimport { RESPONSES } from './categories/response.js';\nimport { TONES } from './categories/tone.js';\nimport { VERBOSITY } from './categories/verbosity.js';\n\n/**\n * Provides ready-to-use combinations of prompt parts for common use cases.\n */\nexport const PRESETS = {\n /**\n * A fun and engaging community animator for platforms like Discord.\n */\n COMMUNITY_ANIMATOR: [\n FOUNDATIONS.HARM_PREVENTION,\n PERSONAS.COMMUNITY_ANIMATOR,\n DOMAINS.GENERAL,\n TONES.HUMOROUS,\n VERBOSITY.NORMAL,\n RESPONSES.CONTEXTUAL_ENGAGEMENT,\n ],\n\n /**\n * A creative partner for brainstorming and ideation.\n */\n CREATIVE_BRAINSTORMER: [\n FOUNDATIONS.ETHICAL_CONDUCT,\n PERSONAS.CREATIVE_PARTNER,\n DOMAINS.GENERAL,\n TONES.HUMOROUS,\n VERBOSITY.NORMAL,\n RESPONSES.ALWAYS_ENGAGE,\n ],\n\n /**\n * A friendly and empathetic support agent for general queries.\n */\n EMPATHETIC_SUPPORT_AGENT: [\n FOUNDATIONS.HARM_PREVENTION,\n FOUNDATIONS.ETHICAL_CONDUCT,\n PERSONAS.SUPPORT_AGENT,\n DOMAINS.GENERAL,\n TONES.EMPATHETIC,\n VERBOSITY.NORMAL,\n FORMATS.STEP_BY_STEP,\n RESPONSES.ALWAYS_ENGAGE,\n ],\n} as const;\n"],"names":["DOMAINS","FORMATS","FOUNDATIONS","PERSONAS","RESPONSES","TONES","VERBOSITY","PRESETS","COMMUNITY_ANIMATOR","HARM_PREVENTION","GENERAL","HUMOROUS","NORMAL","CONTEXTUAL_ENGAGEMENT","CREATIVE_BRAINSTORMER","ETHICAL_CONDUCT","CREATIVE_PARTNER","ALWAYS_ENGAGE","EMPATHETIC_SUPPORT_AGENT","SUPPORT_AGENT","EMPATHETIC","STEP_BY_STEP"],"mappings":"AAAA,SAASA,OAAO,QAAQ,yBAAyB;AAEjD,SAASC,OAAO,QAAQ,yBAAyB;AACjD,SAASC,WAAW,QAAQ,8BAA8B;AAC1D,SAASC,QAAQ,QAAQ,0BAA0B;AACnD,SAASC,SAAS,QAAQ,2BAA2B;AACrD,SAASC,KAAK,QAAQ,uBAAuB;AAC7C,SAASC,SAAS,QAAQ,4BAA4B;AAEtD;;CAEC,GACD,OAAO,IAAMC,UAAU;IACnB;;KAEC,GACDC,oBAAoB;QAChBN,YAAYO,eAAe;QAC3BN,SAASK,kBAAkB;QAC3BR,QAAQU,OAAO;QACfL,MAAMM,QAAQ;QACdL,UAAUM,MAAM;QAChBR,UAAUS,qBAAqB;KAClC;IAED;;KAEC,GACDC,uBAAuB;QACnBZ,YAAYa,eAAe;QAC3BZ,SAASa,gBAAgB;QACzBhB,QAAQU,OAAO;QACfL,MAAMM,QAAQ;QACdL,UAAUM,MAAM;QAChBR,UAAUa,aAAa;KAC1B;IAED;;KAEC,GACDC,0BAA0B;QACtBhB,YAAYO,eAAe;QAC3BP,YAAYa,eAAe;QAC3BZ,SAASgB,aAAa;QACtBnB,QAAQU,OAAO;QACfL,MAAMe,UAAU;QAChBd,UAAUM,MAAM;QAChBX,QAAQoB,YAAY;QACpBjB,UAAUa,aAAa;KAC1B;AACL,EAAW"}
|
package/dist/index.cjs
CHANGED
|
@@ -627,7 +627,7 @@ function _ts_generator$2(thisArg, body) {
|
|
|
627
627
|
};
|
|
628
628
|
}
|
|
629
629
|
}
|
|
630
|
-
var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Any formatting guidelines within the <OBJECTIVE> section
|
|
630
|
+
var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<OUTPUT_FORMAT>\nCRITICAL: The format instructions in this section are the ONLY valid way to structure your response. Your entire response MUST be a single JSON markdown code block. Any formatting guidelines within the <OBJECTIVE> section apply ONLY to the content inside the "RESPOND:" part of your final "action_input".\n\nREQUIRED: You have two ways to respond:\n\n1. **Call a tool** to gather information. For this, you MUST output a JSON blob with the tool\'s name and its input.\n *Valid tool names are: {tool_names}*\n ```json\n {{\n "action": "tool_name_to_use",\n "action_input": "the input for the tool, or an empty object {{}} if no input is needed"\n }}\n ```\n\n2. **Provide the Final Answer** once you have enough information. For this, you MUST output a JSON blob with the "Final Answer" action.\n The "action_input" for a "Final Answer" MUST be a string that begins with either "RESPOND: " for a message or "SILENT: " for no message. This prefix is a literal part of the output string and MUST NOT be omitted.\n - To send a message:\n ```json\n {{\n "action": "Final Answer",\n "action_input": "RESPOND: <your response message>"\n }}\n ```\n - To stay silent:\n ```json\n {{\n "action": "Final Answer",\n "action_input": "SILENT: <your reason for staying silent>"\n }}\n ```\n\n YOU MUST ALWAYS INCLUDE "RESPOND:" OR "SILENT:" IN YOUR FINAL ANSWER\'S "action_input". FAILURE TO DO SO WILL CAUSE AN ERROR.\n</OUTPUT_FORMAT>\n\n<EXECUTION_CONTEXT>\nThis is internal data for your reference.\n\n<TOOLS>\n{tools}\n</TOOLS>\n\n<WORKING_MEMORY>\nThis is your internal thought process and previous tool usage.\n{agent_scratchpad}\n</WORKING_MEMORY>\n</EXECUTION_CONTEXT>\n';
|
|
631
631
|
/**
|
|
632
632
|
* An advanced agent that uses tools and a structured prompt to engage in conversational chat.
|
|
633
633
|
* It can decide whether to respond or remain silent and supports schema-validated responses.
|
|
@@ -679,6 +679,12 @@ var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<
|
|
|
679
679
|
throw new Error('Agent returned an invalid result structure.');
|
|
680
680
|
}
|
|
681
681
|
agentResponse = this.parseAgentOutput(result.output);
|
|
682
|
+
if (!agentResponse) {
|
|
683
|
+
return [
|
|
684
|
+
2,
|
|
685
|
+
null
|
|
686
|
+
];
|
|
687
|
+
}
|
|
682
688
|
if (!agentResponse.shouldRespond) {
|
|
683
689
|
(_this_options_logger2 = this.options.logger) === null || _this_options_logger2 === void 0 ? void 0 : _this_options_logger2.info("[".concat(this.name, "] Agent chose to remain silent."), {
|
|
684
690
|
reason: agentResponse.reason
|
|
@@ -781,13 +787,10 @@ var SYSTEM_PROMPT_TEMPLATE = '\n<OBJECTIVE>\n{mission_prompt}\n</OBJECTIVE>\n\n<
|
|
|
781
787
|
shouldRespond: false
|
|
782
788
|
};
|
|
783
789
|
}
|
|
784
|
-
(_this_options_logger = this.options.logger) === null || _this_options_logger === void 0 ? void 0 : _this_options_logger.
|
|
790
|
+
(_this_options_logger = this.options.logger) === null || _this_options_logger === void 0 ? void 0 : _this_options_logger.error("[".concat(this.name, "] Agent output was missing 'RESPOND:' or 'SILENT:' prefix."), {
|
|
785
791
|
rawOutput: output
|
|
786
792
|
});
|
|
787
|
-
return
|
|
788
|
-
message: text,
|
|
789
|
-
shouldRespond: true
|
|
790
|
-
};
|
|
793
|
+
return null;
|
|
791
794
|
}
|
|
792
795
|
},
|
|
793
796
|
{
|
|
@@ -1178,6 +1181,12 @@ function _object_spread(target) {
|
|
|
1178
1181
|
})
|
|
1179
1182
|
},
|
|
1180
1183
|
maxTokens: (_config_maxTokens = config.maxTokens) !== null && _config_maxTokens !== void 0 ? _config_maxTokens : 64000,
|
|
1184
|
+
modelKwargs: {
|
|
1185
|
+
reasoning: {
|
|
1186
|
+
effort: 'high',
|
|
1187
|
+
exclude: true
|
|
1188
|
+
}
|
|
1189
|
+
},
|
|
1181
1190
|
modelName: config.modelName,
|
|
1182
1191
|
openAIApiKey: config.apiKey
|
|
1183
1192
|
});
|
|
@@ -1250,7 +1259,7 @@ function _object_spread(target) {
|
|
|
1250
1259
|
|
|
1251
1260
|
/**
|
|
1252
1261
|
* Defines the agent's strategic approach to when and how it should respond.
|
|
1253
|
-
*/ var
|
|
1262
|
+
*/ var RESPONSES = {
|
|
1254
1263
|
ALWAYS_ENGAGE: "\n<ResponseStrategy>\nYou must always provide a response to the user's input. Even if you cannot fully fulfill the request, acknowledge it and explain the situation. Your primary directive is to be responsive.\n</ResponseStrategy>",
|
|
1255
1264
|
CONTEXTUAL_ENGAGEMENT: "\n<ResponseStrategy>\nBefore responding, you must analyze the conversation history and the immediate context. Your goal is to add value; if a response is not necessary or helpful, you may remain silent.\n</ResponseStrategy>",
|
|
1256
1265
|
SELECTIVE_ENGAGEMENT: "\n<ResponseStrategy>\nYou must only respond when you can provide a valuable, relevant, and substantive contribution to the conversation. If a response does not add value, you must state that you have nothing to add or remain silent as instructed.\n</ResponseStrategy>",
|
|
@@ -1285,7 +1294,7 @@ function _object_spread(target) {
|
|
|
1285
1294
|
DOMAINS.GENERAL,
|
|
1286
1295
|
TONES.HUMOROUS,
|
|
1287
1296
|
VERBOSITY.NORMAL,
|
|
1288
|
-
|
|
1297
|
+
RESPONSES.CONTEXTUAL_ENGAGEMENT
|
|
1289
1298
|
],
|
|
1290
1299
|
/**
|
|
1291
1300
|
* A creative partner for brainstorming and ideation.
|
|
@@ -1295,7 +1304,7 @@ function _object_spread(target) {
|
|
|
1295
1304
|
DOMAINS.GENERAL,
|
|
1296
1305
|
TONES.HUMOROUS,
|
|
1297
1306
|
VERBOSITY.NORMAL,
|
|
1298
|
-
|
|
1307
|
+
RESPONSES.ALWAYS_ENGAGE
|
|
1299
1308
|
],
|
|
1300
1309
|
/**
|
|
1301
1310
|
* A friendly and empathetic support agent for general queries.
|
|
@@ -1307,20 +1316,20 @@ function _object_spread(target) {
|
|
|
1307
1316
|
TONES.EMPATHETIC,
|
|
1308
1317
|
VERBOSITY.NORMAL,
|
|
1309
1318
|
FORMATS.STEP_BY_STEP,
|
|
1310
|
-
|
|
1319
|
+
RESPONSES.ALWAYS_ENGAGE
|
|
1311
1320
|
]
|
|
1312
1321
|
};
|
|
1313
1322
|
|
|
1314
|
-
var
|
|
1323
|
+
var PROMPT_LIBRARY = {
|
|
1315
1324
|
DOMAINS: DOMAINS,
|
|
1316
1325
|
FORMATS: FORMATS,
|
|
1317
1326
|
FOUNDATIONS: FOUNDATIONS,
|
|
1318
1327
|
LANGUAGES: LANGUAGES,
|
|
1319
1328
|
PERSONAS: PERSONAS,
|
|
1320
1329
|
PRESETS: PRESETS,
|
|
1321
|
-
|
|
1330
|
+
RESPONSES: RESPONSES,
|
|
1322
1331
|
TONES: TONES,
|
|
1323
|
-
|
|
1332
|
+
VERBOSITY: VERBOSITY
|
|
1324
1333
|
};
|
|
1325
1334
|
|
|
1326
1335
|
function _class_call_check$2(instance, Constructor) {
|
|
@@ -1713,7 +1722,7 @@ function _ts_generator(thisArg, body) {
|
|
|
1713
1722
|
exports.AIResponseParser = AIResponseParser;
|
|
1714
1723
|
exports.ChatAgentAdapter = ChatAgentAdapter;
|
|
1715
1724
|
exports.OpenRouterAdapter = OpenRouterModelAdapter;
|
|
1716
|
-
exports.
|
|
1725
|
+
exports.PROMPT_LIBRARY = PROMPT_LIBRARY;
|
|
1717
1726
|
exports.QueryAgentAdapter = QueryAgentAdapter;
|
|
1718
1727
|
exports.SafeToolAdapter = SafeToolAdapter;
|
|
1719
1728
|
exports.SystemPromptAdapter = SystemPromptAdapter;
|
package/dist/index.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
export { ChatAgentAdapter } from './adapters/agents/chat-agent.adapter.js';
|
|
2
2
|
export { QueryAgentAdapter } from './adapters/agents/query-agent.adapter.js';
|
|
3
3
|
export { OpenRouterModelAdapter as OpenRouterAdapter } from './adapters/models/openrouter-model.adapter.js';
|
|
4
|
-
export {
|
|
4
|
+
export { PROMPT_LIBRARY } from './adapters/prompts/library/index.js';
|
|
5
5
|
export { SystemPromptAdapter } from './adapters/prompts/system-prompt.adapter.js';
|
|
6
6
|
export { UserPromptAdapter } from './adapters/prompts/user-prompt.adapter.js';
|
|
7
7
|
export { SafeToolAdapter } from './adapters/tools/safe-tool.adapter.js';
|
package/dist/index.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
export { ChatAgentAdapter } from './adapters/agents/chat-agent.adapter.js';
|
|
2
2
|
export { QueryAgentAdapter } from './adapters/agents/query-agent.adapter.js';
|
|
3
3
|
export { OpenRouterModelAdapter as OpenRouterAdapter } from './adapters/models/openrouter-model.adapter.js';
|
|
4
|
-
export {
|
|
4
|
+
export { PROMPT_LIBRARY } from './adapters/prompts/library/index.js';
|
|
5
5
|
export { SystemPromptAdapter } from './adapters/prompts/system-prompt.adapter.js';
|
|
6
6
|
export { UserPromptAdapter } from './adapters/prompts/user-prompt.adapter.js';
|
|
7
7
|
export { SafeToolAdapter } from './adapters/tools/safe-tool.adapter.js';
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["export { ChatAgentAdapter } from './adapters/agents/chat-agent.adapter.js';\nexport { QueryAgentAdapter } from './adapters/agents/query-agent.adapter.js';\nexport { OpenRouterModelAdapter as OpenRouterAdapter } from './adapters/models/openrouter-model.adapter.js';\nexport {
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["export { ChatAgentAdapter } from './adapters/agents/chat-agent.adapter.js';\nexport { QueryAgentAdapter } from './adapters/agents/query-agent.adapter.js';\nexport { OpenRouterModelAdapter as OpenRouterAdapter } from './adapters/models/openrouter-model.adapter.js';\nexport { PROMPT_LIBRARY } from './adapters/prompts/library/index.js';\nexport { SystemPromptAdapter } from './adapters/prompts/system-prompt.adapter.js';\nexport { UserPromptAdapter } from './adapters/prompts/user-prompt.adapter.js';\nexport { SafeToolAdapter } from './adapters/tools/safe-tool.adapter.js';\nexport { AIResponseParser } from './adapters/utils/ai-response-parser.js';\n\nexport * from './ports/agent.port.js';\nexport * from './ports/model.port.js';\nexport * from './ports/prompt.port.js';\nexport * from './ports/tool.port.js';\n"],"names":["ChatAgentAdapter","QueryAgentAdapter","OpenRouterModelAdapter","OpenRouterAdapter","PROMPT_LIBRARY","SystemPromptAdapter","UserPromptAdapter","SafeToolAdapter","AIResponseParser"],"mappings":"AAAA,SAASA,gBAAgB,QAAQ,0CAA0C;AAC3E,SAASC,iBAAiB,QAAQ,2CAA2C;AAC7E,SAASC,0BAA0BC,iBAAiB,QAAQ,gDAAgD;AAC5G,SAASC,cAAc,QAAQ,sCAAsC;AACrE,SAASC,mBAAmB,QAAQ,8CAA8C;AAClF,SAASC,iBAAiB,QAAQ,4CAA4C;AAC9E,SAASC,eAAe,QAAQ,wCAAwC;AACxE,SAASC,gBAAgB,QAAQ,yCAAyC;AAE1E,cAAc,wBAAwB;AACtC,cAAc,wBAAwB;AACtC,cAAc,yBAAyB;AACvC,cAAc,uBAAuB"}
|