@gugacoder/agentic-sdk 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/dist/agent.d.ts +2 -0
  2. package/dist/agent.js +463 -0
  3. package/dist/context/compaction.d.ts +27 -0
  4. package/dist/context/compaction.js +219 -0
  5. package/dist/context/models.d.ts +6 -0
  6. package/dist/context/models.js +41 -0
  7. package/dist/context/tokenizer.d.ts +5 -0
  8. package/dist/context/tokenizer.js +11 -0
  9. package/dist/context/usage.d.ts +11 -0
  10. package/dist/context/usage.js +49 -0
  11. package/dist/display-schemas.d.ts +1865 -0
  12. package/dist/display-schemas.js +219 -0
  13. package/dist/index.d.ts +38 -0
  14. package/dist/index.js +28 -0
  15. package/dist/middleware/logging.d.ts +2 -0
  16. package/dist/middleware/logging.js +32 -0
  17. package/dist/prompts/assembly.d.ts +13 -0
  18. package/dist/prompts/assembly.js +229 -0
  19. package/dist/providers.d.ts +19 -0
  20. package/dist/providers.js +44 -0
  21. package/dist/proxy.d.ts +2 -0
  22. package/dist/proxy.js +103 -0
  23. package/dist/schemas.d.ts +228 -0
  24. package/dist/schemas.js +51 -0
  25. package/dist/session.d.ts +7 -0
  26. package/dist/session.js +102 -0
  27. package/dist/structured.d.ts +18 -0
  28. package/dist/structured.js +38 -0
  29. package/dist/tool-repair.d.ts +21 -0
  30. package/dist/tool-repair.js +72 -0
  31. package/dist/tools/api-spec.d.ts +4 -0
  32. package/dist/tools/api-spec.js +123 -0
  33. package/dist/tools/apply-patch.d.ts +484 -0
  34. package/dist/tools/apply-patch.js +157 -0
  35. package/dist/tools/ask-user.d.ts +14 -0
  36. package/dist/tools/ask-user.js +27 -0
  37. package/dist/tools/bash.d.ts +550 -0
  38. package/dist/tools/bash.js +43 -0
  39. package/dist/tools/batch.d.ts +13 -0
  40. package/dist/tools/batch.js +84 -0
  41. package/dist/tools/brave-search.d.ts +6 -0
  42. package/dist/tools/brave-search.js +19 -0
  43. package/dist/tools/code-search.d.ts +20 -0
  44. package/dist/tools/code-search.js +42 -0
  45. package/dist/tools/diagnostics.d.ts +4 -0
  46. package/dist/tools/diagnostics.js +69 -0
  47. package/dist/tools/display.d.ts +483 -0
  48. package/dist/tools/display.js +77 -0
  49. package/dist/tools/edit.d.ts +682 -0
  50. package/dist/tools/edit.js +47 -0
  51. package/dist/tools/glob.d.ts +4 -0
  52. package/dist/tools/glob.js +42 -0
  53. package/dist/tools/grep.d.ts +6 -0
  54. package/dist/tools/grep.js +69 -0
  55. package/dist/tools/http-request.d.ts +7 -0
  56. package/dist/tools/http-request.js +98 -0
  57. package/dist/tools/index.d.ts +1611 -0
  58. package/dist/tools/index.js +46 -0
  59. package/dist/tools/job-tools.d.ts +24 -0
  60. package/dist/tools/job-tools.js +67 -0
  61. package/dist/tools/list-dir.d.ts +5 -0
  62. package/dist/tools/list-dir.js +79 -0
  63. package/dist/tools/multi-edit.d.ts +814 -0
  64. package/dist/tools/multi-edit.js +57 -0
  65. package/dist/tools/read.d.ts +5 -0
  66. package/dist/tools/read.js +33 -0
  67. package/dist/tools/task.d.ts +21 -0
  68. package/dist/tools/task.js +51 -0
  69. package/dist/tools/todo.d.ts +14 -0
  70. package/dist/tools/todo.js +60 -0
  71. package/dist/tools/web-fetch.d.ts +4 -0
  72. package/dist/tools/web-fetch.js +126 -0
  73. package/dist/tools/web-search.d.ts +22 -0
  74. package/dist/tools/web-search.js +48 -0
  75. package/dist/tools/write.d.ts +550 -0
  76. package/dist/tools/write.js +30 -0
  77. package/dist/types.d.ts +201 -0
  78. package/dist/types.js +1 -0
  79. package/package.json +43 -0
  80. package/src/agent.ts +520 -0
  81. package/src/context/compaction.ts +265 -0
  82. package/src/context/models.ts +42 -0
  83. package/src/context/tokenizer.ts +12 -0
  84. package/src/context/usage.ts +65 -0
  85. package/src/display-schemas.ts +276 -0
  86. package/src/index.ts +43 -0
  87. package/src/middleware/logging.ts +37 -0
  88. package/src/prompts/assembly.ts +263 -0
  89. package/src/prompts/identity.md +10 -0
  90. package/src/prompts/patterns.md +7 -0
  91. package/src/prompts/safety.md +7 -0
  92. package/src/prompts/tool-guide.md +9 -0
  93. package/src/prompts/tools/bash.md +7 -0
  94. package/src/prompts/tools/edit.md +7 -0
  95. package/src/prompts/tools/glob.md +7 -0
  96. package/src/prompts/tools/grep.md +7 -0
  97. package/src/prompts/tools/read.md +7 -0
  98. package/src/prompts/tools/write.md +7 -0
  99. package/src/providers.ts +58 -0
  100. package/src/proxy.ts +101 -0
  101. package/src/schemas.ts +58 -0
  102. package/src/session.ts +110 -0
  103. package/src/structured.ts +65 -0
  104. package/src/tool-repair.ts +92 -0
  105. package/src/tools/api-spec.ts +158 -0
  106. package/src/tools/apply-patch.ts +188 -0
  107. package/src/tools/ask-user.ts +40 -0
  108. package/src/tools/bash.ts +51 -0
  109. package/src/tools/batch.ts +103 -0
  110. package/src/tools/brave-search.ts +24 -0
  111. package/src/tools/code-search.ts +69 -0
  112. package/src/tools/diagnostics.ts +93 -0
  113. package/src/tools/display.ts +105 -0
  114. package/src/tools/edit.ts +55 -0
  115. package/src/tools/glob.ts +46 -0
  116. package/src/tools/grep.ts +68 -0
  117. package/src/tools/http-request.ts +103 -0
  118. package/src/tools/index.ts +48 -0
  119. package/src/tools/job-tools.ts +84 -0
  120. package/src/tools/list-dir.ts +102 -0
  121. package/src/tools/multi-edit.ts +65 -0
  122. package/src/tools/read.ts +40 -0
  123. package/src/tools/task.ts +71 -0
  124. package/src/tools/todo.ts +82 -0
  125. package/src/tools/web-fetch.ts +155 -0
  126. package/src/tools/web-search.ts +75 -0
  127. package/src/tools/write.ts +34 -0
  128. package/src/types.ts +145 -0
  129. package/tsconfig.json +17 -0
@@ -0,0 +1,219 @@
1
+ import { generateText, Output, wrapLanguageModel } from "ai";
2
+ import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
3
+ import { z } from "zod";
4
+ import { countTokens } from "./tokenizer.js";
5
+ import { getContextWindow } from "./models.js";
6
+ const CompactionSchema = z.object({
7
+ summary: z.string().describe("Resumo conciso da conversa"),
8
+ decisions: z.array(z.string()).describe("Decisoes tomadas"),
9
+ filesModified: z.array(z.string()).describe("Arquivos modificados"),
10
+ currentState: z.string().describe("Estado atual do trabalho"),
11
+ nextSteps: z.array(z.string()).describe("Proximos passos"),
12
+ });
13
+ const TAIL_RATIO = 0.30;
14
+ const SUMMARIZATION_PROMPT = `Summarize the conversation below preserving: decisions made, files modified, current state of work, and next steps. Be concise.`;
15
+ function countMessageTokens(msg) {
16
+ let tokens = 4; // overhead for role + framing
17
+ if (typeof msg.content === "string") {
18
+ tokens += countTokens(msg.content);
19
+ }
20
+ else if (Array.isArray(msg.content)) {
21
+ for (const part of msg.content) {
22
+ if (part.type === "text" && typeof part.text === "string") {
23
+ tokens += countTokens(part.text);
24
+ }
25
+ else if (part.type === "image") {
26
+ tokens += 300;
27
+ }
28
+ else if (part.type === "file") {
29
+ const mime = part.mimeType ?? "";
30
+ if (mime === "application/pdf") {
31
+ tokens += 1500;
32
+ }
33
+ else if (mime.startsWith("audio/")) {
34
+ tokens += 200;
35
+ }
36
+ else {
37
+ tokens += 500;
38
+ }
39
+ }
40
+ else {
41
+ tokens += 500; // unknown part fallback
42
+ }
43
+ }
44
+ }
45
+ return tokens;
46
+ }
47
+ function formatMessagesForSummary(messages) {
48
+ return messages
49
+ .map((msg) => {
50
+ const role = msg.role;
51
+ const content = typeof msg.content === "string"
52
+ ? msg.content
53
+ : Array.isArray(msg.content)
54
+ ? msg.content
55
+ .filter((p) => "text" in p)
56
+ .map((p) => p.text)
57
+ .join("\n")
58
+ : "[non-text content]";
59
+ return `[${role}]: ${content}`;
60
+ })
61
+ .join("\n\n");
62
+ }
63
+ /**
64
+ * Splits messages into head (to summarize) and tail (to keep intact).
65
+ * Tail = last messages that fit within 30% of the context window.
66
+ */
67
+ function splitMessages(messages, contextWindow) {
68
+ const tailBudget = Math.floor(contextWindow * TAIL_RATIO);
69
+ let tailTokens = 0;
70
+ let tailStart = messages.length;
71
+ // Walk backwards to find tail boundary
72
+ for (let i = messages.length - 1; i >= 0; i--) {
73
+ const msgTokens = countMessageTokens(messages[i]);
74
+ if (tailTokens + msgTokens > tailBudget)
75
+ break;
76
+ tailTokens += msgTokens;
77
+ tailStart = i;
78
+ }
79
+ // Ensure at least one message in head (nothing to compact otherwise)
80
+ if (tailStart <= 0) {
81
+ tailStart = 1;
82
+ }
83
+ return {
84
+ head: messages.slice(0, tailStart),
85
+ tail: messages.slice(tailStart),
86
+ };
87
+ }
88
+ /**
89
+ * Compacts conversation messages by summarizing older messages (head)
90
+ * and keeping recent messages (tail) intact.
91
+ *
92
+ * Uses the same model and apiKey as the agent for summarization.
93
+ * If summarization fails, returns original messages with a warning.
94
+ */
95
+ export async function compactMessages(messages, options) {
96
+ // Nothing to compact if 2 or fewer messages
97
+ if (messages.length <= 2) {
98
+ return { messages, compacted: false };
99
+ }
100
+ const contextWindow = getContextWindow(options.model, options.contextWindow);
101
+ const { head, tail } = splitMessages(messages, contextWindow);
102
+ // If head is empty or has only 1 message, nothing to compact
103
+ if (head.length <= 1) {
104
+ return { messages, compacted: false };
105
+ }
106
+ const conversationText = formatMessagesForSummary(head);
107
+ // Build telemetry config for compaction spans
108
+ const telemetryConfig = options.telemetry?.enabled
109
+ ? {
110
+ isEnabled: true,
111
+ functionId: options.telemetry.functionId
112
+ ? `${options.telemetry.functionId}-compaction`
113
+ : "ai-compaction",
114
+ recordInputs: false,
115
+ recordOutputs: false,
116
+ metadata: {
117
+ ...options.telemetry.metadata,
118
+ },
119
+ }
120
+ : undefined;
121
+ try {
122
+ const baseModel = options.providers
123
+ ? options.providers.model(options.model)
124
+ : createOpenAICompatible({
125
+ name: "openrouter",
126
+ baseURL: "https://openrouter.ai/api/v1",
127
+ apiKey: options.apiKey,
128
+ })(options.model);
129
+ const model = options.middleware && options.middleware.length > 0
130
+ ? wrapLanguageModel({ model: baseModel, middleware: options.middleware })
131
+ : baseModel;
132
+ const result = await generateText({
133
+ model,
134
+ output: Output.object({ schema: CompactionSchema }),
135
+ system: SUMMARIZATION_PROMPT,
136
+ messages: [{ role: "user", content: conversationText }],
137
+ maxOutputTokens: 2000,
138
+ ...(telemetryConfig ? { experimental_telemetry: telemetryConfig } : {}),
139
+ });
140
+ const obj = result.output;
141
+ if (!obj.summary || obj.summary.trim().length === 0) {
142
+ return {
143
+ messages,
144
+ compacted: false,
145
+ warning: "Compaction produced empty summary — keeping original messages",
146
+ };
147
+ }
148
+ const sections = [];
149
+ sections.push(`## Summary\n${obj.summary}`);
150
+ if (obj.decisions.length > 0) {
151
+ sections.push(`## Decisions\n${obj.decisions.map((d) => `- ${d}`).join("\n")}`);
152
+ }
153
+ if (obj.filesModified.length > 0) {
154
+ sections.push(`## Files Modified\n${obj.filesModified.map((f) => `- ${f}`).join("\n")}`);
155
+ }
156
+ sections.push(`## Current State\n${obj.currentState}`);
157
+ if (obj.nextSteps.length > 0) {
158
+ sections.push(`## Next Steps\n${obj.nextSteps.map((s) => `- ${s}`).join("\n")}`);
159
+ }
160
+ const summary = sections.join("\n\n");
161
+ const summaryMessage = {
162
+ role: "user",
163
+ content: `<context_summary>\n${summary}\n</context_summary>`,
164
+ };
165
+ return {
166
+ messages: [summaryMessage, ...tail],
167
+ compacted: true,
168
+ };
169
+ }
170
+ catch (structuredErr) {
171
+ // Fallback: generateObject() failed — try generateText() with free-text summarization
172
+ try {
173
+ const fallbackBaseModel = options.providers
174
+ ? options.providers.model(options.model)
175
+ : createOpenAICompatible({
176
+ name: "openrouter",
177
+ baseURL: "https://openrouter.ai/api/v1",
178
+ apiKey: options.apiKey,
179
+ })(options.model);
180
+ const fallbackModel = options.middleware && options.middleware.length > 0
181
+ ? wrapLanguageModel({ model: fallbackBaseModel, middleware: options.middleware })
182
+ : fallbackBaseModel;
183
+ const result = await generateText({
184
+ model: fallbackModel,
185
+ system: SUMMARIZATION_PROMPT,
186
+ messages: [{ role: "user", content: conversationText }],
187
+ maxOutputTokens: 2000,
188
+ ...(telemetryConfig ? { experimental_telemetry: telemetryConfig } : {}),
189
+ });
190
+ const summary = result.text;
191
+ if (!summary || summary.trim().length === 0) {
192
+ return {
193
+ messages,
194
+ compacted: false,
195
+ warning: "Compaction fallback produced empty summary — keeping original messages",
196
+ };
197
+ }
198
+ const summaryMessage = {
199
+ role: "user",
200
+ content: `<context_summary>\n${summary}\n</context_summary>`,
201
+ };
202
+ const structuredMsg = structuredErr instanceof Error ? structuredErr.message : String(structuredErr);
203
+ return {
204
+ messages: [summaryMessage, ...tail],
205
+ compacted: true,
206
+ warning: `Structured compaction failed (${structuredMsg}) — used text fallback`,
207
+ };
208
+ }
209
+ catch (fallbackErr) {
210
+ const structuredMsg = structuredErr instanceof Error ? structuredErr.message : String(structuredErr);
211
+ const fallbackMsg = fallbackErr instanceof Error ? fallbackErr.message : String(fallbackErr);
212
+ return {
213
+ messages,
214
+ compacted: false,
215
+ warning: `Compaction failed: structured (${structuredMsg}), fallback (${fallbackMsg}) — keeping original messages`,
216
+ };
217
+ }
218
+ }
219
+ }
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Returns the context window size for a model ID.
3
+ * If `override` is provided, it takes precedence over the map.
4
+ * Unknown models default to 128000.
5
+ */
6
+ export declare function getContextWindow(modelId: string, override?: number): number;
@@ -0,0 +1,41 @@
1
+ const MODEL_CONTEXT_WINDOWS = {
2
+ // Anthropic — Claude 4.x
3
+ "anthropic/claude-opus-4-6": 200000,
4
+ "anthropic/claude-sonnet-4-5": 200000,
5
+ "anthropic/claude-haiku-4-5": 200000,
6
+ // Anthropic — Claude 3.x
7
+ "anthropic/claude-3.5-sonnet": 200000,
8
+ "anthropic/claude-3.5-haiku": 200000,
9
+ "anthropic/claude-3-opus": 200000,
10
+ "anthropic/claude-3-sonnet": 200000,
11
+ "anthropic/claude-3-haiku": 200000,
12
+ // OpenAI
13
+ "openai/gpt-4o": 128000,
14
+ "openai/gpt-4o-mini": 128000,
15
+ "openai/gpt-4-turbo": 128000,
16
+ "openai/o1": 200000,
17
+ "openai/o1-mini": 128000,
18
+ "openai/o3": 200000,
19
+ "openai/o3-mini": 200000,
20
+ "openai/o4-mini": 200000,
21
+ // Google
22
+ "google/gemini-2.0-flash": 1048576,
23
+ "google/gemini-2.5-pro": 1048576,
24
+ // Meta
25
+ "meta-llama/llama-3.1-405b-instruct": 131072,
26
+ "meta-llama/llama-3.1-70b-instruct": 131072,
27
+ // DeepSeek
28
+ "deepseek/deepseek-chat-v3": 131072,
29
+ "deepseek/deepseek-r1": 131072,
30
+ };
31
+ const DEFAULT_CONTEXT_WINDOW = 128000;
32
+ /**
33
+ * Returns the context window size for a model ID.
34
+ * If `override` is provided, it takes precedence over the map.
35
+ * Unknown models default to 128000.
36
+ */
37
+ export function getContextWindow(modelId, override) {
38
+ if (override !== undefined)
39
+ return override;
40
+ return MODEL_CONTEXT_WINDOWS[modelId] ?? DEFAULT_CONTEXT_WINDOW;
41
+ }
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Returns an estimated token count for the given text using cl100k_base encoding.
3
+ * Returns 0 for empty strings.
4
+ */
5
+ export declare function countTokens(text: string): number;
@@ -0,0 +1,11 @@
1
+ import { encodingForModel } from "js-tiktoken";
2
+ const encoder = encodingForModel("gpt-4o");
3
+ /**
4
+ * Returns an estimated token count for the given text using cl100k_base encoding.
5
+ * Returns 0 for empty strings.
6
+ */
7
+ export function countTokens(text) {
8
+ if (!text)
9
+ return 0;
10
+ return encoder.encode(text).length;
11
+ }
@@ -0,0 +1,11 @@
1
+ import type { ModelMessage } from "ai";
2
+ import type { ContextUsage } from "../types.js";
3
+ export interface GetContextUsageOptions {
4
+ model: string;
5
+ systemPrompt: string;
6
+ toolDefinitions: Record<string, unknown>;
7
+ messages: ModelMessage[];
8
+ contextWindow?: number;
9
+ compactThreshold?: number;
10
+ }
11
+ export declare function getContextUsage(options: GetContextUsageOptions): ContextUsage;
@@ -0,0 +1,49 @@
1
+ import { countTokens } from "./tokenizer.js";
2
+ import { getContextWindow } from "./models.js";
3
+ const DEFAULT_COMPACT_THRESHOLD = 0.65;
4
+ function countMessagesTokens(messages) {
5
+ let total = 0;
6
+ for (const msg of messages) {
7
+ if (typeof msg.content === "string") {
8
+ total += countTokens(msg.content);
9
+ }
10
+ else if (Array.isArray(msg.content)) {
11
+ for (const part of msg.content) {
12
+ if ("text" in part && typeof part.text === "string") {
13
+ total += countTokens(part.text);
14
+ }
15
+ }
16
+ }
17
+ // Add overhead for role + message framing (~4 tokens per message)
18
+ total += 4;
19
+ }
20
+ return total;
21
+ }
22
+ function countToolDefinitionsTokens(tools) {
23
+ if (!tools || Object.keys(tools).length === 0)
24
+ return 0;
25
+ return countTokens(JSON.stringify(tools));
26
+ }
27
+ export function getContextUsage(options) {
28
+ const contextWindow = getContextWindow(options.model, options.contextWindow);
29
+ const compactThreshold = options.compactThreshold ?? DEFAULT_COMPACT_THRESHOLD;
30
+ const systemPrompt = countTokens(options.systemPrompt);
31
+ const toolDefinitions = countToolDefinitionsTokens(options.toolDefinitions);
32
+ const messages = countMessagesTokens(options.messages);
33
+ const used = systemPrompt + toolDefinitions + messages;
34
+ const free = contextWindow - used;
35
+ const usagePercent = (used / contextWindow) * 100;
36
+ const willCompact = usagePercent >= compactThreshold * 100;
37
+ return {
38
+ model: options.model,
39
+ contextWindow,
40
+ systemPrompt,
41
+ toolDefinitions,
42
+ messages,
43
+ used,
44
+ free,
45
+ usagePercent,
46
+ compactThreshold: compactThreshold * 100,
47
+ willCompact,
48
+ };
49
+ }