@polka-codes/cli 0.7.9 → 0.7.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +70 -76
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -24629,7 +24629,62 @@ var {
24629
24629
  Help
24630
24630
  } = import__.default;
24631
24631
  // package.json
24632
- var version = "0.7.9";
24632
+ var version = "0.7.11";
24633
+
24634
+ // ../core/src/AiService/AiServiceBase.ts
24635
+ class AiServiceBase {
24636
+ usageMeter;
24637
+ constructor(usageMeter) {
24638
+ this.usageMeter = usageMeter;
24639
+ }
24640
+ async* send(systemPrompt, messages) {
24641
+ this.usageMeter.incrementMessageCount();
24642
+ const stream = this.sendImpl(systemPrompt, messages);
24643
+ for await (const chunk of stream) {
24644
+ switch (chunk.type) {
24645
+ case "usage":
24646
+ this.usageMeter.addUsage(chunk, this.model);
24647
+ break;
24648
+ }
24649
+ yield chunk;
24650
+ }
24651
+ }
24652
+ async request(systemPrompt, messages) {
24653
+ this.usageMeter.incrementMessageCount();
24654
+ const stream = this.sendImpl(systemPrompt, messages);
24655
+ const usage = {
24656
+ inputTokens: 0,
24657
+ outputTokens: 0,
24658
+ cacheWriteTokens: 0,
24659
+ cacheReadTokens: 0,
24660
+ totalCost: 0
24661
+ };
24662
+ let resp = "";
24663
+ let reasoning = "";
24664
+ for await (const chunk of stream) {
24665
+ switch (chunk.type) {
24666
+ case "usage":
24667
+ usage.inputTokens = chunk.inputTokens ?? 0;
24668
+ usage.outputTokens = chunk.outputTokens ?? 0;
24669
+ usage.cacheWriteTokens = chunk.cacheWriteTokens ?? 0;
24670
+ usage.cacheReadTokens = chunk.cacheReadTokens ?? 0;
24671
+ usage.totalCost = chunk.totalCost;
24672
+ break;
24673
+ case "text":
24674
+ resp += chunk.text;
24675
+ break;
24676
+ case "reasoning":
24677
+ reasoning += chunk.text;
24678
+ }
24679
+ }
24680
+ this.usageMeter.addUsage(usage, this.model);
24681
+ return {
24682
+ response: resp,
24683
+ reasoning,
24684
+ usage
24685
+ };
24686
+ }
24687
+ }
24633
24688
 
24634
24689
  // ../../node_modules/@anthropic-ai/sdk/version.mjs
24635
24690
  var VERSION = "0.39.0";
@@ -27984,61 +28039,6 @@ Anthropic.Models = Models2;
27984
28039
  Anthropic.ModelInfosPage = ModelInfosPage;
27985
28040
  Anthropic.Beta = Beta;
27986
28041
 
27987
- // ../core/src/AiService/AiServiceBase.ts
27988
- class AiServiceBase {
27989
- usageMeter;
27990
- constructor(usageMeter) {
27991
- this.usageMeter = usageMeter;
27992
- }
27993
- async* send(systemPrompt, messages) {
27994
- this.usageMeter.incrementMessageCount();
27995
- const stream = this.sendImpl(systemPrompt, messages);
27996
- for await (const chunk of stream) {
27997
- switch (chunk.type) {
27998
- case "usage":
27999
- this.usageMeter.addUsage(chunk, this.model);
28000
- break;
28001
- }
28002
- yield chunk;
28003
- }
28004
- }
28005
- async request(systemPrompt, messages) {
28006
- this.usageMeter.incrementMessageCount();
28007
- const stream = this.sendImpl(systemPrompt, messages);
28008
- const usage = {
28009
- inputTokens: 0,
28010
- outputTokens: 0,
28011
- cacheWriteTokens: 0,
28012
- cacheReadTokens: 0,
28013
- totalCost: 0
28014
- };
28015
- let resp = "";
28016
- let reasoning = "";
28017
- for await (const chunk of stream) {
28018
- switch (chunk.type) {
28019
- case "usage":
28020
- usage.inputTokens = chunk.inputTokens ?? 0;
28021
- usage.outputTokens = chunk.outputTokens ?? 0;
28022
- usage.cacheWriteTokens = chunk.cacheWriteTokens ?? 0;
28023
- usage.cacheReadTokens = chunk.cacheReadTokens ?? 0;
28024
- usage.totalCost = chunk.totalCost;
28025
- break;
28026
- case "text":
28027
- resp += chunk.text;
28028
- break;
28029
- case "reasoning":
28030
- reasoning += chunk.text;
28031
- }
28032
- }
28033
- this.usageMeter.addUsage(usage, this.model);
28034
- return {
28035
- response: resp,
28036
- reasoning,
28037
- usage
28038
- };
28039
- }
28040
- }
28041
-
28042
28042
  // ../core/src/AiService/ModelInfo.ts
28043
28043
  var anthropicDefaultModelId = "claude-3-7-sonnet-20250219";
28044
28044
  var anthropicModels = {
@@ -34568,26 +34568,20 @@ ${tools.map((tool) => {
34568
34568
  }).join("")}
34569
34569
  # Tool Use Guidelines
34570
34570
 
34571
- 1. **Thinking**: Use \`<thinking>\` XCM tag to clearly outline your thought process *before* using any tools. This includes:
34572
- * Assessing the current situation and available information.
34573
- * Defining specific goals and a plan to achieve them.
34574
- * Justifying the selection of a particular tool.
34575
- * Explaining how you intend to use the tool and what you expect to achieve.
34576
- 2. **Tool Selection**: Choose one tool at a time per message based on the task and its description. Do not assume a tool’s outcome without explicit confirmation.
34577
- 3. **Formatting**: Formulate tool use only in the specified XML format for each tool.
34578
- 4. **User Response**: Wait for the user’s response after each tool use. Do not proceed until you have their confirmation. The user’s response may include:
34579
- * Tool success or failure details
34580
- * Linter errors
34581
- * Terminal output or other relevant feedback
34582
- 5. **Conciseness**: Never repeat or quote the entire tool command in your final user-facing message. Summarize outcomes clearly and avoid echoing commands verbatim.
34583
- 6. **Brevity**: Respond concisely and move the conversation forward. Do not re-issue the same command or re-trigger tool use without necessity.
34584
- 7. **Iteration**: Follow these steps iteratively, confirming success and addressing issues as you go.
34585
- 8. **Error Handling**: If a tool returns an error, analyze the error message and adjust your approach accordingly. Consider alternative tools or strategies to achieve the desired outcome.
34586
-
34587
- By adhering to these guidelines:
34588
- - You maintain clarity without accidentally re-invoking tools.
34589
- - You confirm each step’s results before proceeding.
34590
- - You provide only the necessary information in user-facing replies to prevent re-interpretation as new commands.`;
34571
+ 1. **Outline Your Thought Process**
34572
+ - Before using a tool, wrap your reasoning inside \`<thinking>\` tags. Be concise—just enough to clarify your plan and the rationale behind selecting a specific tool.
34573
+
34574
+ 2. **Wait for Feedback**
34575
+ - After using a tool, wait for the user's response indicating success/failure or any output logs. Do not assume the result of a tool without explicit confirmation.
34576
+
34577
+ 3. **Error Handling**
34578
+ - If a tool fails or produces an unexpected result, analyze the error, decide on an alternative approach or tool, and proceed carefully.
34579
+
34580
+ 4. **Avoid Repetition**
34581
+ - Do not quote or repeat previous commands or prompts verbatim. Move the conversation forward by focusing on the latest required action.
34582
+
34583
+ 5. **No Unnecessary Re-invocations**
34584
+ - Only invoke the same tool again if a genuine need arises (e.g., different parameters or updated context).`;
34591
34585
  };
34592
34586
  var agentsPrompt = (agents, name2) => `
34593
34587
  ====
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@polka-codes/cli",
3
- "version": "0.7.9",
3
+ "version": "0.7.11",
4
4
  "license": "AGPL-3.0",
5
5
  "author": "github@polka.codes",
6
6
  "type": "module",