@polka-codes/core 0.8.24 → 0.8.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,6 +8,7 @@ declare abstract class AgentBase {
8
8
  protected readonly config: Readonly<AgentBaseConfig>;
9
9
  protected readonly handlers: Record<string, FullToolInfo>;
10
10
  constructor(name: string, ai: AiServiceBase, config: AgentBaseConfig);
11
+ abort(): void;
11
12
  get parameters(): Readonly<any>;
12
13
  get messages(): Readonly<MessageParam[]>;
13
14
  setMessages(messages: Readonly<MessageParam[]>): void;
@@ -93,6 +94,7 @@ export { agentsPrompt as agentsPrompt_alias_1 }
93
94
  export { agentsPrompt as agentsPrompt_alias_2 }
94
95
 
95
96
  declare abstract class AiServiceBase {
97
+ #private;
96
98
  readonly usageMeter: UsageMeter;
97
99
  readonly options: AiServiceOptions;
98
100
  constructor(options: AiServiceOptions);
@@ -101,7 +103,8 @@ declare abstract class AiServiceBase {
101
103
  id: string;
102
104
  info: ModelInfo;
103
105
  };
104
- abstract sendImpl(systemPrompt: string, messages: MessageParam[]): ApiStream;
106
+ abstract sendImpl(systemPrompt: string, messages: MessageParam[], signal: AbortSignal): ApiStream;
107
+ abort(): void;
105
108
  send(systemPrompt: string, messages: MessageParam[]): ApiStream;
106
109
  request(systemPrompt: string, messages: MessageParam[]): Promise<{
107
110
  response: string;
@@ -300,7 +303,7 @@ export declare class AnthropicService extends AiServiceBase {
300
303
  info: ModelInfo;
301
304
  };
302
305
  constructor(options: AiServiceOptions);
303
- sendImpl(systemPrompt: string, messages: MessageParam[]): ApiStream;
306
+ sendImpl(systemPrompt: string, messages: MessageParam[], signal: AbortSignal): ApiStream;
304
307
  }
305
308
 
306
309
  declare type ApiStream = AsyncGenerator<ApiStreamChunk>;
@@ -795,7 +798,7 @@ export declare class DeepSeekService extends AiServiceBase {
795
798
  info: ModelInfo;
796
799
  };
797
800
  constructor(options: AiServiceOptions);
798
- sendImpl(systemPrompt: string, messages: MessageParam[]): ApiStream;
801
+ sendImpl(systemPrompt: string, messages: MessageParam[], signal: AbortSignal): ApiStream;
799
802
  }
800
803
 
801
804
  declare const _default: {
@@ -1945,6 +1948,8 @@ declare type ExitReason = {
1945
1948
  type: 'UsageExceeded';
1946
1949
  } | {
1947
1950
  type: 'WaitForUserInput';
1951
+ } | {
1952
+ type: 'Aborted';
1948
1953
  } | ToolResponseExit | ToolResponseInterrupted | ToolResponseHandOver | ToolResponseDelegate | {
1949
1954
  type: 'Pause';
1950
1955
  responses: ToolResponseOrToolPause[];
@@ -2472,6 +2477,7 @@ declare class MultiAgent {
2472
2477
  }): Promise<ExitReason>;
2473
2478
  continueTask(userMessage: string): Promise<ExitReason>;
2474
2479
  get hasActiveAgent(): boolean;
2480
+ abort(): void;
2475
2481
  }
2476
2482
  export { MultiAgent }
2477
2483
  export { MultiAgent as MultiAgent_alias_1 }
@@ -2495,7 +2501,7 @@ export declare class OllamaService extends AiServiceBase {
2495
2501
  info: ModelInfo;
2496
2502
  };
2497
2503
  constructor(options: AiServiceOptions);
2498
- sendImpl(systemPrompt: string, messages: MessageParam[]): ApiStream;
2504
+ sendImpl(systemPrompt: string, messages: MessageParam[], signal: AbortSignal): ApiStream;
2499
2505
  }
2500
2506
 
2501
2507
  declare const openAiModelInfoSaneDefaults: {
@@ -2517,7 +2523,7 @@ export declare class OpenRouterService extends AiServiceBase {
2517
2523
  info: ModelInfo;
2518
2524
  };
2519
2525
  constructor(options: AiServiceOptions);
2520
- sendImpl(systemPrompt: string, messages: MessageParam[]): ApiStream;
2526
+ sendImpl(systemPrompt: string, messages: MessageParam[], signal: AbortSignal): ApiStream;
2521
2527
  }
2522
2528
 
2523
2529
  declare type Output = {
@@ -2607,11 +2613,20 @@ declare enum Policies {
2607
2613
  export { Policies }
2608
2614
  export { Policies as Policies_alias_1 }
2609
2615
 
2610
- declare const replaceInFile_2: (fileContent: string, diff: string) => Promise<string>;
2616
+ declare const replaceInFile_2: (fileContent: string, diff: string) => ReplaceResult;
2611
2617
  export { replaceInFile_2 as replaceInFileHelper }
2612
2618
  export { replaceInFile_2 as replaceInFile_alias_3 }
2613
2619
  export { replaceInFile_2 as replaceInFile_alias_4 }
2614
2620
 
2621
+ declare type ReplaceResult = {
2622
+ content: string;
2623
+ status: 'no_diff_applied' | 'some_diff_applied' | 'all_diff_applied';
2624
+ appliedCount: number;
2625
+ totalCount: number;
2626
+ };
2627
+ export { ReplaceResult }
2628
+ export { ReplaceResult as ReplaceResult_alias_1 }
2629
+
2615
2630
  declare const responsePrompts: {
2616
2631
  readonly errorInvokeTool: (tool: string, error: unknown) => string;
2617
2632
  readonly requireUseTool: "Error: No tool use detected. You MUST use a tool before proceeding.\ne.g. <tool_tool_name>tool_name</tool_tool_name>\n\nEnsure the opening and closing tags are correctly nested and closed, and that you are using the correct tool name.\nAvoid unnecessary text or symbols before or after the tool use.\nAvoid unnecessary escape characters or special characters.\n";
package/dist/index.js CHANGED
@@ -8,14 +8,19 @@ var __export = (target, all) => {
8
8
  var AiServiceBase = class {
9
9
  usageMeter;
10
10
  options;
11
+ #abortController;
11
12
  constructor(options) {
12
13
  this.options = options;
13
14
  this.usageMeter = options.usageMeter;
14
15
  }
16
+ abort() {
17
+ this.#abortController?.abort();
18
+ }
15
19
  async *send(systemPrompt, messages) {
16
20
  this.usageMeter.checkLimit();
17
21
  this.usageMeter.incrementMessageCount();
18
- const stream = this.sendImpl(systemPrompt, messages);
22
+ this.#abortController = new AbortController();
23
+ const stream = this.sendImpl(systemPrompt, messages, this.#abortController.signal);
19
24
  for await (const chunk of stream) {
20
25
  switch (chunk.type) {
21
26
  case "usage":
@@ -28,7 +33,8 @@ var AiServiceBase = class {
28
33
  async request(systemPrompt, messages) {
29
34
  this.usageMeter.checkLimit();
30
35
  this.usageMeter.incrementMessageCount();
31
- const stream = this.sendImpl(systemPrompt, messages);
36
+ this.#abortController = new AbortController();
37
+ const stream = this.sendImpl(systemPrompt, messages, this.#abortController.signal);
32
38
  const usage = {
33
39
  inputTokens: 0,
34
40
  outputTokens: 0,
@@ -208,7 +214,7 @@ var AnthropicService = class extends AiServiceBase {
208
214
  info: anthropicModels[id] ?? anthropicModels[anthropicDefaultModelId]
209
215
  };
210
216
  }
211
- async *sendImpl(systemPrompt, messages) {
217
+ async *sendImpl(systemPrompt, messages, signal) {
212
218
  let stream;
213
219
  const modelId = this.model.id;
214
220
  const cacheControl = this.#options.enableCache ? { type: "ephemeral" } : void 0;
@@ -236,52 +242,58 @@ var AnthropicService = class extends AiServiceBase {
236
242
  }, []);
237
243
  const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1;
238
244
  const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1;
239
- stream = await this.#client.messages.create({
240
- model: modelId,
241
- max_tokens: this.model.info.maxTokens || 8192,
242
- thinking: thinkingBudgetTokens ? { type: "enabled", budget_tokens: thinkingBudgetTokens } : void 0,
243
- temperature,
244
- system: [
245
- {
246
- text: systemPrompt,
247
- type: "text",
248
- cache_control: cacheControl
249
- }
250
- ],
251
- // setting cache breakpoint for system prompt so new tasks can reuse it
252
- messages: messages.map((message, index) => {
253
- if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
254
- return {
255
- ...message,
256
- content: typeof message.content === "string" ? [
257
- {
258
- type: "text",
259
- text: message.content,
260
- cache_control: cacheControl
261
- }
262
- ] : message.content.map(
263
- (content, contentIndex) => contentIndex === message.content.length - 1 ? {
264
- ...content,
265
- cache_control: cacheControl
266
- } : content
267
- )
268
- };
269
- }
270
- return message;
271
- }),
272
- stream: true
273
- });
245
+ stream = await this.#client.messages.create(
246
+ {
247
+ model: modelId,
248
+ max_tokens: this.model.info.maxTokens || 8192,
249
+ thinking: thinkingBudgetTokens ? { type: "enabled", budget_tokens: thinkingBudgetTokens } : void 0,
250
+ temperature,
251
+ system: [
252
+ {
253
+ text: systemPrompt,
254
+ type: "text",
255
+ cache_control: cacheControl
256
+ }
257
+ ],
258
+ // setting cache breakpoint for system prompt so new tasks can reuse it
259
+ messages: messages.map((message, index) => {
260
+ if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) {
261
+ return {
262
+ ...message,
263
+ content: typeof message.content === "string" ? [
264
+ {
265
+ type: "text",
266
+ text: message.content,
267
+ cache_control: cacheControl
268
+ }
269
+ ] : message.content.map(
270
+ (content, contentIndex) => contentIndex === message.content.length - 1 ? {
271
+ ...content,
272
+ cache_control: cacheControl
273
+ } : content
274
+ )
275
+ };
276
+ }
277
+ return message;
278
+ }),
279
+ stream: true
280
+ },
281
+ { signal }
282
+ );
274
283
  break;
275
284
  }
276
285
  default: {
277
- stream = await this.#client.messages.create({
278
- model: modelId,
279
- max_tokens: this.model.info.maxTokens || 8192,
280
- temperature: 0,
281
- system: [{ text: systemPrompt, type: "text" }],
282
- messages,
283
- stream: true
284
- });
286
+ stream = await this.#client.messages.create(
287
+ {
288
+ model: modelId,
289
+ max_tokens: this.model.info.maxTokens || 8192,
290
+ temperature: 0,
291
+ system: [{ text: systemPrompt, type: "text" }],
292
+ messages,
293
+ stream: true
294
+ },
295
+ { signal }
296
+ );
285
297
  break;
286
298
  }
287
299
  }
@@ -487,19 +499,22 @@ var DeepSeekService = class extends AiServiceBase {
487
499
  info: deepSeekModels[id] ?? deepSeekModels[deepSeekDefaultModelId]
488
500
  };
489
501
  }
490
- async *sendImpl(systemPrompt, messages) {
502
+ async *sendImpl(systemPrompt, messages, signal) {
491
503
  const openAiMessages = [
492
504
  { role: "system", content: systemPrompt },
493
505
  ...convertToOpenAiMessages(messages)
494
506
  ];
495
- const stream = await this.#client.chat.completions.create({
496
- model: this.model.id,
497
- max_completion_tokens: this.model.info.maxTokens,
498
- messages: openAiMessages,
499
- temperature: 0,
500
- stream: true,
501
- stream_options: { include_usage: true }
502
- });
507
+ const stream = await this.#client.chat.completions.create(
508
+ {
509
+ model: this.model.id,
510
+ max_completion_tokens: this.model.info.maxTokens,
511
+ messages: openAiMessages,
512
+ temperature: 0,
513
+ stream: true,
514
+ stream_options: { include_usage: true }
515
+ },
516
+ { signal }
517
+ );
503
518
  for await (const chunk of stream) {
504
519
  const delta = chunk.choices[0]?.delta;
505
520
  if (delta?.reasoning_content) {
@@ -549,17 +564,20 @@ var OllamaService = class extends AiServiceBase {
549
564
  info: openAiModelInfoSaneDefaults
550
565
  };
551
566
  }
552
- async *sendImpl(systemPrompt, messages) {
567
+ async *sendImpl(systemPrompt, messages, signal) {
553
568
  const openAiMessages = [
554
569
  { role: "system", content: systemPrompt },
555
570
  ...convertToOpenAiMessages(messages)
556
571
  ];
557
- const stream = await this.#client.chat.completions.create({
558
- model: this.model.id,
559
- messages: openAiMessages,
560
- temperature: 0,
561
- stream: true
562
- });
572
+ const stream = await this.#client.chat.completions.create(
573
+ {
574
+ model: this.model.id,
575
+ messages: openAiMessages,
576
+ temperature: 0,
577
+ stream: true
578
+ },
579
+ { signal }
580
+ );
563
581
  for await (const chunk of stream) {
564
582
  const delta = chunk.choices[0]?.delta;
565
583
  if (delta?.content) {
@@ -609,7 +627,7 @@ var OpenRouterService = class extends AiServiceBase {
609
627
  this.#modelProviderInfo = data.data;
610
628
  });
611
629
  }
612
- async *sendImpl(systemPrompt, messages) {
630
+ async *sendImpl(systemPrompt, messages, signal) {
613
631
  const openAiMessages = [
614
632
  { role: "system", content: systemPrompt },
615
633
  ...convertToOpenAiMessages(messages)
@@ -662,15 +680,18 @@ var OpenRouterService = class extends AiServiceBase {
662
680
  if (this.model.id === "deepseek/deepseek-chat") {
663
681
  shouldApplyMiddleOutTransform = true;
664
682
  }
665
- const stream = await this.#client.chat.completions.create({
666
- model: this.model.id,
667
- messages: openAiMessages,
668
- temperature: 0,
669
- stream: true,
670
- transforms: shouldApplyMiddleOutTransform ? ["middle-out"] : void 0,
671
- include_reasoning: true,
672
- ...reasoning
673
- });
683
+ const stream = await this.#client.chat.completions.create(
684
+ {
685
+ model: this.model.id,
686
+ messages: openAiMessages,
687
+ temperature: 0,
688
+ stream: true,
689
+ transforms: shouldApplyMiddleOutTransform ? ["middle-out"] : void 0,
690
+ include_reasoning: true,
691
+ ...reasoning
692
+ },
693
+ { signal }
694
+ );
674
695
  let genId;
675
696
  for await (const chunk of stream) {
676
697
  if ("error" in chunk) {
@@ -1008,7 +1029,7 @@ var getArray = (args, name, defaultValue) => {
1008
1029
  };
1009
1030
 
1010
1031
  // src/tools/utils/replaceInFile.ts
1011
- var replaceInFile = async (fileContent, diff) => {
1032
+ var replaceInFile = (fileContent, diff) => {
1012
1033
  const blockPattern = /<<<<<+ SEARCH>?\s*\r?\n([\s\S]*?)\r?\n=======[ \t]*\r?\n([\s\S]*?)\r?\n?>>>>>+ REPLACE/g;
1013
1034
  const blocks = [];
1014
1035
  for (let match = blockPattern.exec(diff); match !== null; match = blockPattern.exec(diff)) {
@@ -1053,14 +1074,32 @@ var replaceInFile = async (fileContent, diff) => {
1053
1074
  const startPos = endPos - strippedSearch.length;
1054
1075
  return content.slice(0, startPos) + replace + content.slice(endPos);
1055
1076
  }
1056
- throw new Error(`Could not find the following text in file:
1057
- ${search}`);
1077
+ return null;
1058
1078
  };
1059
1079
  let updatedFile = fileContent;
1080
+ let appliedCount = 0;
1081
+ const totalCount = blocks.length;
1060
1082
  for (const { search, replace } of blocks) {
1061
- updatedFile = findAndReplace(updatedFile, search, replace);
1083
+ const result = findAndReplace(updatedFile, search, replace);
1084
+ if (result !== null) {
1085
+ updatedFile = result;
1086
+ appliedCount++;
1087
+ }
1088
+ }
1089
+ let status;
1090
+ if (appliedCount === 0) {
1091
+ status = "no_diff_applied";
1092
+ } else if (appliedCount < totalCount) {
1093
+ status = "some_diff_applied";
1094
+ } else {
1095
+ status = "all_diff_applied";
1062
1096
  }
1063
- return updatedFile;
1097
+ return {
1098
+ content: updatedFile,
1099
+ status,
1100
+ appliedCount,
1101
+ totalCount
1102
+ };
1064
1103
  };
1065
1104
 
1066
1105
  // src/tools/askFollowupQuestion.ts
@@ -1759,14 +1798,30 @@ var handler8 = async (provider, args) => {
1759
1798
  if (fileContent == null) {
1760
1799
  return {
1761
1800
  type: "Error" /* Error */,
1762
- message: `<error><replace_in_file_path>${path}</replace_in_file_path><error_message>File not found</error_message></error>`
1801
+ message: `<replace_in_file_result path="${path}" status="failed" message="File not found" />`
1802
+ };
1803
+ }
1804
+ const result = replaceInFile(fileContent, diff);
1805
+ if (result.status === "no_diff_applied") {
1806
+ return {
1807
+ type: "Error" /* Error */,
1808
+ message: `<replace_in_file_result path="${path}" status="failed" message="Unable to apply changes">
1809
+ <file_content path="${path}">${fileContent}</file_content>
1810
+ </replace_in_file_result>`
1811
+ };
1812
+ }
1813
+ await provider.writeFile(path, result.content);
1814
+ if (result.status === "some_diff_applied") {
1815
+ return {
1816
+ type: "Reply" /* Reply */,
1817
+ message: `<replace_in_file_result path="${path}" status="some_diff_applied" applied_count="${result.appliedCount}" total_count="${result.totalCount}">
1818
+ <file_content path="${path}">${result.content}</file_content>
1819
+ </replace_in_file_result>`
1763
1820
  };
1764
1821
  }
1765
- const result = await replaceInFile(fileContent, diff);
1766
- await provider.writeFile(path, result);
1767
1822
  return {
1768
1823
  type: "Reply" /* Reply */,
1769
- message: `<replace_in_file_path>${path}</replace_in_file_path>`
1824
+ message: `<replace_in_file_result path="${path}" status="all_diff_applied" />`
1770
1825
  };
1771
1826
  };
1772
1827
  var isAvailable8 = (provider) => {
@@ -2832,6 +2887,7 @@ var AgentBase = class {
2832
2887
  handlers;
2833
2888
  #messages = [];
2834
2889
  #policies;
2890
+ #aborted = false;
2835
2891
  constructor(name, ai, config) {
2836
2892
  this.ai = ai;
2837
2893
  if (config.agents && config.agents.length > 0) {
@@ -2863,6 +2919,10 @@ ${instance.prompt}`;
2863
2919
  this.config = config;
2864
2920
  this.#policies = policies;
2865
2921
  }
2922
+ abort() {
2923
+ this.#aborted = true;
2924
+ this.ai.abort();
2925
+ }
2866
2926
  get parameters() {
2867
2927
  return this.ai.options.parameters;
2868
2928
  }
@@ -2891,11 +2951,17 @@ ${instance.prompt}`;
2891
2951
  async #processLoop(userMessage) {
2892
2952
  let nextRequest = userMessage;
2893
2953
  while (true) {
2954
+ if (this.#aborted) {
2955
+ return { type: "Aborted" };
2956
+ }
2894
2957
  if (this.ai.usageMeter.isLimitExceeded().result) {
2895
2958
  this.#callback({ kind: "UsageExceeded" /* UsageExceeded */, agent: this });
2896
2959
  return { type: "UsageExceeded" };
2897
2960
  }
2898
2961
  const response = await this.#request(nextRequest);
2962
+ if (this.#aborted) {
2963
+ return { type: "Aborted" };
2964
+ }
2899
2965
  const resp = await this.#handleResponse(response);
2900
2966
  if (resp.type === "exit") {
2901
2967
  this.#callback({ kind: "EndTask" /* EndTask */, agent: this, exitReason: resp.reason });
@@ -2942,14 +3008,23 @@ ${instance.prompt}`;
2942
3008
  }
2943
3009
  }
2944
3010
  } catch (error) {
3011
+ if (error instanceof Error && error.name === "AbortError") {
3012
+ break;
3013
+ }
2945
3014
  console.error("Error in stream:", error);
2946
3015
  }
2947
3016
  if (currentAssistantMessage) {
2948
3017
  break;
2949
3018
  }
3019
+ if (this.#aborted) {
3020
+ break;
3021
+ }
2950
3022
  console.debug(`Retrying request ${i + 1} of ${retryCount}`);
2951
3023
  }
2952
3024
  if (!currentAssistantMessage) {
3025
+ if (this.#aborted) {
3026
+ return [];
3027
+ }
2953
3028
  throw new Error("No assistant message received");
2954
3029
  }
2955
3030
  this.#messages.push({
@@ -3663,6 +3738,7 @@ var MultiAgent = class {
3663
3738
  case "Delegate" /* Delegate */:
3664
3739
  console.warn("Unexpected exit reason", delegateResult);
3665
3740
  break;
3741
+ case "Aborted":
3666
3742
  case "Interrupted" /* Interrupted */:
3667
3743
  return delegateResult;
3668
3744
  case "Exit" /* Exit */:
@@ -3670,6 +3746,7 @@ var MultiAgent = class {
3670
3746
  }
3671
3747
  return delegateResult;
3672
3748
  }
3749
+ case "Aborted":
3673
3750
  case "Interrupted" /* Interrupted */:
3674
3751
  case "Exit" /* Exit */:
3675
3752
  this.#agents.pop();
@@ -3702,6 +3779,11 @@ var MultiAgent = class {
3702
3779
  get hasActiveAgent() {
3703
3780
  return this.#agents.length > 0;
3704
3781
  }
3782
+ abort() {
3783
+ if (this.hasActiveAgent) {
3784
+ this.#agents[this.#agents.length - 1].abort();
3785
+ }
3786
+ }
3705
3787
  };
3706
3788
 
3707
3789
  // src/config.ts
@@ -3778,27 +3860,26 @@ var prompt = `
3778
3860
  You are equipped with **Knowledge Management** capabilities:
3779
3861
 
3780
3862
  1. **What to capture**
3781
- \u2022 Public API of each file (public classes, functions, methods, parameters, return types).
3782
- \u2022 High-level description of each file's purpose.
3783
- \u2022 Invariants and assumptions that must always hold.
3784
- \u2022 Project or directory-specific coding patterns, styles, and architectural conventions.
3785
- \u2022 Rules (commenting, testing, documentation, security, etc.).
3786
- \u2022 Any other insight that a future contributor would find crucial.
3863
+ - Public API of each file (public classes, functions, methods, parameters, return types).
3864
+ - High-level description of each file's purpose.
3865
+ - Invariants and assumptions that must always hold.
3866
+ - Project or directory-specific coding patterns, styles, and architectural conventions.
3867
+ - Any other insight that a future contributor would find crucial.
3787
3868
 
3788
3869
  2. **Where to store it**
3789
- \u2022 Save knowledge in a YAML file named \`knowledge.ai.yml\`.
3790
- \u2022 **Create the file in the repository root if it does not yet exist.**
3791
- \u2022 One file per directory.
3870
+ - Save knowledge in a YAML file named \`knowledge.ai.yml\`.
3871
+ - **Create the file in the repository root if it does not yet exist.**
3872
+ - One file per directory.
3792
3873
  - The repository root file records knowledge that applies project-wide (e.g., service responsibilities, global patterns).
3793
3874
  - Each sub-directory keeps only the knowledge relevant to that directory or package.
3794
- \u2022 Use clear top-level keys such as \`description\`, \`files\`, \`rules\`.
3875
+ - Use clear top-level keys such as \`description\`, \`files\`, \`rules\`.
3795
3876
 
3796
3877
  3. **When to update**
3797
- \u2022 **Default behaviour:** only create / update knowledge for the files you actively read, create, or modify during the current task.
3878
+ - **Default behaviour:** only create / update knowledge for the files you actively read, create, or modify during the current task.
3798
3879
  - Operate on other files **only if the user explicitly requests it**.
3799
- \u2022 **While working**: after reading, analysing, creating, or modifying code, immediately record any new or changed knowledge.
3800
- \u2022 **On refactor / deletion**: locate and delete or amend obsolete entries so that knowledge never drifts from the codebase.
3801
- \u2022 **Granularity**: update only the affected directory's \`knowledge.ai.yml\`, except when the change has global impact.
3880
+ - **While working**: after reading, analysing, creating, or modifying code, immediately record any new or changed knowledge.
3881
+ - **On refactor / deletion**: locate and delete or amend obsolete entries so that knowledge never drifts from the codebase.
3882
+ - **Granularity**: update only the affected directory's \`knowledge.ai.yml\`, except when the change has global impact.
3802
3883
 
3803
3884
  4. **How to format (illustrative)**
3804
3885
  \`\`\`yaml
@@ -3808,19 +3889,14 @@ files:
3808
3889
  description: "Numeric helpers for currency calculations"
3809
3890
  api:
3810
3891
  functions:
3811
- 1:
3812
- name: "add"
3813
- params:
3814
- 1: { name: "a", type: "number" }
3815
- 2: { name: "b", type: "number" }
3816
- returns: "number"
3892
+ 1: add(a: number, b: number): number
3817
3893
  rules:
3818
3894
  1: "rules that apply to all files in this directory"
3819
3895
  \`\`\`
3820
3896
 
3821
3897
  5. **Source of truth**
3822
- \u2022 **Never invent knowledge.** Everything you record must be *directly derived* from existing code, comments, commit messages, or explicit user instructions.
3823
- \u2022 If a section has no confirmed content, omit it rather than guessing.
3898
+ - **Never invent knowledge.** Everything you record must be *directly derived* from existing code, comments, commit messages, or explicit user instructions.
3899
+ - If a section has no confirmed content, omit it rather than guessing.
3824
3900
 
3825
3901
  6. **Automatic context**
3826
3902
  When you are asked to read or modify a file, the orchestration layer will supply any existing knowledge for that path automatically. Use it, refine it, and keep it accurate.
@@ -3829,11 +3905,11 @@ rules:
3829
3905
  You can use the \`updateKnowledge\` tool to efficiently update knowledge files with smart merging capabilities.
3830
3906
 
3831
3907
  8. **Dictionary-First Format**
3832
- \u2022 **Always prefer dictionaries** for structured data.
3833
- \u2022 The **\`files\` section must be a dictionary keyed by file path** (e.g., \`"math.ts": {...}\`).
3834
- \u2022 For other lists (rules, functions, etc.), use numbered dictionaries.
3835
- \u2022 Arrays are allowed only when strict ordering is essential and dictionaries cannot express it.
3836
- \u2022 When removing items, refer to them by their numeric key or index; gaps are fine.
3908
+ - **Always prefer dictionaries** for structured data.
3909
+ - The **\`files\` section must be a dictionary keyed by file path** (e.g., \`"math.ts": {...}\`).
3910
+ - For other lists (rules, functions, etc.), use numbered dictionaries.
3911
+ - Arrays are allowed only when strict ordering is essential and dictionaries cannot express it.
3912
+ - When removing items, refer to them by their numeric key or index; gaps are fine.
3837
3913
 
3838
3914
  Your workflow **must**:
3839
3915
  1. Detect knowledge deltas.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@polka-codes/core",
3
- "version": "0.8.24",
3
+ "version": "0.8.26",
4
4
  "license": "AGPL-3.0",
5
5
  "author": "github@polka.codes",
6
6
  "type": "module",