@polos/sdk 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -56,9 +56,6 @@ function createWorkflowRegistry() {
56
56
  const workflows = /* @__PURE__ */ new Map();
57
57
  return {
58
58
  register(workflow) {
59
- if (workflows.has(workflow.id)) {
60
- throw new DuplicateWorkflowError(workflow.id);
61
- }
62
59
  workflows.set(workflow.id, workflow);
63
60
  },
64
61
  get(workflowId) {
@@ -1810,11 +1807,18 @@ function convertMiddlewareToolCallToPython(tc) {
1810
1807
  function convertVercelUsageToPython(usage) {
1811
1808
  const input = usage.inputTokens ?? 0;
1812
1809
  const output = usage.outputTokens ?? 0;
1813
- return {
1810
+ const result = {
1814
1811
  input_tokens: input,
1815
1812
  output_tokens: output,
1816
1813
  total_tokens: usage.totalTokens ?? input + output
1817
1814
  };
1815
+ if (usage.inputTokenDetails?.cacheReadTokens != null) {
1816
+ result.cache_read_input_tokens = usage.inputTokenDetails.cacheReadTokens;
1817
+ }
1818
+ if (usage.inputTokenDetails?.cacheWriteTokens != null) {
1819
+ result.cache_creation_input_tokens = usage.inputTokenDetails.cacheWriteTokens;
1820
+ }
1821
+ return result;
1818
1822
  }
1819
1823
  function convertFinishReason(reason) {
1820
1824
  if (!reason) return null;
@@ -1829,6 +1833,41 @@ function convertFinishReason(reason) {
1829
1833
  }
1830
1834
 
1831
1835
  // src/llm/llm.ts
1836
+ var ANTHROPIC_CACHE_BREAKPOINT = {
1837
+ anthropic: { cacheControl: { type: "ephemeral" } }
1838
+ };
1839
+ function isAnthropicModel(model) {
1840
+ return getModelProvider(model).startsWith("anthropic");
1841
+ }
1842
+ function applyAnthropicCacheControl(args, model) {
1843
+ if (!isAnthropicModel(model)) return;
1844
+ if (typeof args["system"] === "string") {
1845
+ args["system"] = {
1846
+ role: "system",
1847
+ content: args["system"],
1848
+ providerOptions: ANTHROPIC_CACHE_BREAKPOINT
1849
+ };
1850
+ }
1851
+ const tools = args["tools"];
1852
+ if (tools) {
1853
+ const toolNames = Object.keys(tools);
1854
+ if (toolNames.length > 0) {
1855
+ const lastToolName = toolNames[toolNames.length - 1];
1856
+ tools[lastToolName] = {
1857
+ ...tools[lastToolName],
1858
+ providerOptions: ANTHROPIC_CACHE_BREAKPOINT
1859
+ };
1860
+ }
1861
+ }
1862
+ const messages = args["messages"];
1863
+ if (messages && messages.length > 0) {
1864
+ const lastMsg = messages[messages.length - 1];
1865
+ messages[messages.length - 1] = {
1866
+ ...lastMsg,
1867
+ providerOptions: ANTHROPIC_CACHE_BREAKPOINT
1868
+ };
1869
+ }
1870
+ }
1832
1871
  function buildGenerateArgs(model, messages, options) {
1833
1872
  const args = { model, messages };
1834
1873
  const tools = convertToolsToVercel(options.tools);
@@ -1840,6 +1879,7 @@ function buildGenerateArgs(model, messages, options) {
1840
1879
  if (options.outputSchema) {
1841
1880
  args["experimental_output"] = ai.Output.object({ schema: options.outputSchema });
1842
1881
  }
1882
+ applyAnthropicCacheControl(args, model);
1843
1883
  return args;
1844
1884
  }
1845
1885
  var LLM = class {
@@ -2585,6 +2625,8 @@ async function agentStreamFunction(ctx, payload, agentDef) {
2585
2625
  let finalInputTokens = 0;
2586
2626
  let finalOutputTokens = 0;
2587
2627
  let finalTotalTokens = 0;
2628
+ let finalCacheReadInputTokens = 0;
2629
+ let finalCacheCreationInputTokens = 0;
2588
2630
  let lastLlmResultContent = null;
2589
2631
  const allToolResults = [];
2590
2632
  const steps = [];
@@ -2715,6 +2757,12 @@ async function agentStreamFunction(ctx, payload, agentDef) {
2715
2757
  finalInputTokens += llmResult.usage.input_tokens;
2716
2758
  finalOutputTokens += llmResult.usage.output_tokens;
2717
2759
  finalTotalTokens += llmResult.usage.total_tokens;
2760
+ if (llmResult.usage.cache_read_input_tokens) {
2761
+ finalCacheReadInputTokens += llmResult.usage.cache_read_input_tokens;
2762
+ }
2763
+ if (llmResult.usage.cache_creation_input_tokens) {
2764
+ finalCacheCreationInputTokens += llmResult.usage.cache_creation_input_tokens;
2765
+ }
2718
2766
  }
2719
2767
  lastLlmResultContent = llmResult.content;
2720
2768
  const toolCalls = llmResult.tool_calls ?? [];
@@ -2938,7 +2986,11 @@ Please provide ONLY valid JSON that matches the schema, with no additional text
2938
2986
  usage: {
2939
2987
  input_tokens: finalInputTokens,
2940
2988
  output_tokens: finalOutputTokens,
2941
- total_tokens: finalTotalTokens
2989
+ total_tokens: finalTotalTokens,
2990
+ ...finalCacheReadInputTokens > 0 && { cache_read_input_tokens: finalCacheReadInputTokens },
2991
+ ...finalCacheCreationInputTokens > 0 && {
2992
+ cache_creation_input_tokens: finalCacheCreationInputTokens
2993
+ }
2942
2994
  }
2943
2995
  };
2944
2996
  }
@@ -3099,7 +3151,7 @@ function defineAgent(config) {
3099
3151
  maxOutputTokens: config.maxOutputTokens
3100
3152
  },
3101
3153
  input,
3102
- streaming: streamingFlag ?? false,
3154
+ streaming: streamingFlag ?? config.streamToWorkflow ?? false,
3103
3155
  conversation_id: conversationIdValue
3104
3156
  };
3105
3157
  return agentStreamFunction(ctx, streamPayload, {
@@ -5803,6 +5855,13 @@ var DEFAULT_TIMEOUT_SECONDS = 300;
5803
5855
  var DEFAULT_MAX_OUTPUT_CHARS = 1e5;
5804
5856
  function spawnCommand(command, args, options) {
5805
5857
  return new Promise((resolve8, reject) => {
5858
+ let settled = false;
5859
+ const settle = (fn) => {
5860
+ if (!settled) {
5861
+ settled = true;
5862
+ fn();
5863
+ }
5864
+ };
5806
5865
  const proc = child_process.spawn(command, args, { stdio: ["pipe", "pipe", "pipe"] });
5807
5866
  let stdout = "";
5808
5867
  let stderr = "";
@@ -5813,6 +5872,12 @@ function spawnCommand(command, args, options) {
5813
5872
  proc.stderr.on("data", (data) => {
5814
5873
  stderr += data.toString();
5815
5874
  });
5875
+ proc.stdin.on("error", () => {
5876
+ });
5877
+ proc.stdout.on("error", () => {
5878
+ });
5879
+ proc.stderr.on("error", () => {
5880
+ });
5816
5881
  const timeoutMs = (options?.timeout ?? DEFAULT_TIMEOUT_SECONDS) * 1e3;
5817
5882
  const timer = setTimeout(() => {
5818
5883
  killed = true;
@@ -5820,24 +5885,31 @@ function spawnCommand(command, args, options) {
5820
5885
  }, timeoutMs);
5821
5886
  proc.on("close", (code) => {
5822
5887
  clearTimeout(timer);
5823
- if (killed) {
5824
- resolve8({
5825
- exitCode: 137,
5826
- stdout,
5827
- stderr: stderr + "\n[Process killed: timeout exceeded]"
5828
- });
5829
- } else {
5830
- resolve8({ exitCode: code ?? 1, stdout, stderr });
5831
- }
5888
+ settle(() => {
5889
+ if (killed) {
5890
+ resolve8({
5891
+ exitCode: 137,
5892
+ stdout,
5893
+ stderr: stderr + "\n[Process killed: timeout exceeded]"
5894
+ });
5895
+ } else {
5896
+ resolve8({ exitCode: code ?? 1, stdout, stderr });
5897
+ }
5898
+ });
5832
5899
  });
5833
5900
  proc.on("error", (err) => {
5834
5901
  clearTimeout(timer);
5835
- reject(err);
5902
+ settle(() => {
5903
+ reject(err);
5904
+ });
5836
5905
  });
5837
5906
  if (options?.stdin) {
5838
- proc.stdin.write(options.stdin);
5907
+ proc.stdin.write(options.stdin, () => {
5908
+ proc.stdin.end();
5909
+ });
5910
+ } else {
5911
+ proc.stdin.end();
5839
5912
  }
5840
- proc.stdin.end();
5841
5913
  });
5842
5914
  }
5843
5915
  var DockerEnvironment = class {
@@ -5893,7 +5965,7 @@ var DockerEnvironment = class {
5893
5965
  }
5894
5966
  async exec(command, opts) {
5895
5967
  this.assertInitialized();
5896
- const args = ["exec", "-i"];
5968
+ const args = opts?.stdin ? ["exec", "-i"] : ["exec"];
5897
5969
  const cwd = opts?.cwd ?? this.containerWorkdir;
5898
5970
  args.push("-w", cwd);
5899
5971
  if (opts?.env) {
@@ -6028,6 +6100,13 @@ var DEFAULT_TIMEOUT_SECONDS2 = 300;
6028
6100
  var DEFAULT_MAX_OUTPUT_CHARS2 = 1e5;
6029
6101
  function spawnLocal(command, options) {
6030
6102
  return new Promise((resolve8, reject) => {
6103
+ let settled = false;
6104
+ const settle = (fn) => {
6105
+ if (!settled) {
6106
+ settled = true;
6107
+ fn();
6108
+ }
6109
+ };
6031
6110
  const proc = child_process.spawn("sh", ["-c", command], {
6032
6111
  cwd: options.cwd,
6033
6112
  env: options.env ? { ...process.env, ...options.env } : void 0,
@@ -6042,6 +6121,12 @@ function spawnLocal(command, options) {
6042
6121
  proc.stderr.on("data", (data) => {
6043
6122
  stderr += data.toString();
6044
6123
  });
6124
+ proc.stdin.on("error", () => {
6125
+ });
6126
+ proc.stdout.on("error", () => {
6127
+ });
6128
+ proc.stderr.on("error", () => {
6129
+ });
6045
6130
  const timeoutMs = (options.timeout ?? DEFAULT_TIMEOUT_SECONDS2) * 1e3;
6046
6131
  const timer = setTimeout(() => {
6047
6132
  killed = true;
@@ -6049,24 +6134,31 @@ function spawnLocal(command, options) {
6049
6134
  }, timeoutMs);
6050
6135
  proc.on("close", (code) => {
6051
6136
  clearTimeout(timer);
6052
- if (killed) {
6053
- resolve8({
6054
- exitCode: 137,
6055
- stdout,
6056
- stderr: stderr + "\n[Process killed: timeout exceeded]"
6057
- });
6058
- } else {
6059
- resolve8({ exitCode: code ?? 1, stdout, stderr });
6060
- }
6137
+ settle(() => {
6138
+ if (killed) {
6139
+ resolve8({
6140
+ exitCode: 137,
6141
+ stdout,
6142
+ stderr: stderr + "\n[Process killed: timeout exceeded]"
6143
+ });
6144
+ } else {
6145
+ resolve8({ exitCode: code ?? 1, stdout, stderr });
6146
+ }
6147
+ });
6061
6148
  });
6062
6149
  proc.on("error", (err) => {
6063
6150
  clearTimeout(timer);
6064
- reject(err);
6151
+ settle(() => {
6152
+ reject(err);
6153
+ });
6065
6154
  });
6066
6155
  if (options.stdin) {
6067
- proc.stdin.write(options.stdin);
6156
+ proc.stdin.write(options.stdin, () => {
6157
+ proc.stdin.end();
6158
+ });
6159
+ } else {
6160
+ proc.stdin.end();
6068
6161
  }
6069
- proc.stdin.end();
6070
6162
  });
6071
6163
  }
6072
6164
  var LocalEnvironment = class {