veryfront 0.1.263 → 0.1.265

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/deno.js +1 -1
  2. package/esm/src/agent/hosted-child-mirror.d.ts +91 -0
  3. package/esm/src/agent/hosted-child-mirror.d.ts.map +1 -0
  4. package/esm/src/agent/hosted-child-mirror.js +118 -0
  5. package/esm/src/agent/index.d.ts +2 -0
  6. package/esm/src/agent/index.d.ts.map +1 -1
  7. package/esm/src/agent/index.js +2 -0
  8. package/esm/src/chat/hosted-ui-chunk-mapping.d.ts +111 -0
  9. package/esm/src/chat/hosted-ui-chunk-mapping.d.ts.map +1 -0
  10. package/esm/src/chat/hosted-ui-chunk-mapping.js +123 -0
  11. package/esm/src/chat/index.d.ts +1 -0
  12. package/esm/src/chat/index.d.ts.map +1 -1
  13. package/esm/src/chat/index.js +1 -0
  14. package/esm/src/provider/runtime-loader/provider-embedding-responses.d.ts +5 -0
  15. package/esm/src/provider/runtime-loader/provider-embedding-responses.d.ts.map +1 -0
  16. package/esm/src/provider/runtime-loader/provider-embedding-responses.js +50 -0
  17. package/esm/src/provider/runtime-loader/provider-finish-reasons.d.ts +9 -0
  18. package/esm/src/provider/runtime-loader/provider-finish-reasons.d.ts.map +1 -0
  19. package/esm/src/provider/runtime-loader/provider-finish-reasons.js +60 -0
  20. package/esm/src/provider/runtime-loader/provider-sse.d.ts +5 -0
  21. package/esm/src/provider/runtime-loader/provider-sse.d.ts.map +1 -0
  22. package/esm/src/provider/runtime-loader/provider-sse.js +23 -0
  23. package/esm/src/provider/runtime-loader/provider-usage.d.ts +19 -0
  24. package/esm/src/provider/runtime-loader/provider-usage.d.ts.map +1 -0
  25. package/esm/src/provider/runtime-loader/provider-usage.js +109 -0
  26. package/esm/src/provider/runtime-loader.d.ts.map +1 -1
  27. package/esm/src/provider/runtime-loader.js +4 -240
  28. package/esm/src/utils/version-constant.d.ts +1 -1
  29. package/esm/src/utils/version-constant.js +1 -1
  30. package/package.json +1 -1
  31. package/src/deno.js +1 -1
  32. package/src/src/agent/hosted-child-mirror.ts +208 -0
  33. package/src/src/agent/index.ts +15 -0
  34. package/src/src/chat/hosted-ui-chunk-mapping.ts +303 -0
  35. package/src/src/chat/index.ts +5 -0
  36. package/src/src/provider/runtime-loader/provider-embedding-responses.ts +61 -0
  37. package/src/src/provider/runtime-loader/provider-finish-reasons.ts +69 -0
  38. package/src/src/provider/runtime-loader/provider-sse.ts +29 -0
  39. package/src/src/provider/runtime-loader/provider-usage.ts +135 -0
  40. package/src/src/provider/runtime-loader.ts +21 -300
  41. package/src/src/utils/version-constant.ts +1 -1
@@ -0,0 +1,60 @@
1
+ export function normalizeAnthropicFinishReason(raw) {
2
+ if (typeof raw !== "string") {
3
+ return null;
4
+ }
5
+ switch (raw) {
6
+ case "tool_use":
7
+ return { unified: "tool-calls", raw };
8
+ case "end_turn":
9
+ case "stop_sequence":
10
+ return { unified: "stop", raw };
11
+ case "max_tokens":
12
+ return { unified: "length", raw };
13
+ default:
14
+ return raw;
15
+ }
16
+ }
17
+ export function normalizeGoogleFinishReason(raw) {
18
+ if (typeof raw !== "string") {
19
+ return null;
20
+ }
21
+ switch (raw) {
22
+ case "STOP":
23
+ return { unified: "stop", raw };
24
+ case "MAX_TOKENS":
25
+ return { unified: "length", raw };
26
+ case "SAFETY":
27
+ case "RECITATION":
28
+ return { unified: "content-filter", raw };
29
+ default:
30
+ return raw.toLowerCase();
31
+ }
32
+ }
33
+ export function normalizeOpenAIFinishReason(raw) {
34
+ if (typeof raw !== "string") {
35
+ return null;
36
+ }
37
+ if (raw === "tool_calls") {
38
+ return { unified: "tool-calls", raw };
39
+ }
40
+ if (raw === "content_filter") {
41
+ return { unified: "content-filter", raw };
42
+ }
43
+ return raw;
44
+ }
45
+ export function normalizeOpenAIResponsesFinishReason(raw) {
46
+ if (typeof raw !== "string")
47
+ return null;
48
+ switch (raw) {
49
+ case "completed":
50
+ return { unified: "stop", raw };
51
+ case "incomplete":
52
+ return { unified: "length", raw };
53
+ case "failed":
54
+ return { unified: "error", raw };
55
+ case "in_progress":
56
+ return null;
57
+ default:
58
+ return raw;
59
+ }
60
+ }
@@ -0,0 +1,5 @@
1
+ export declare function parseSseChunk(chunk: string): {
2
+ events: Array<unknown | "[DONE]">;
3
+ remainder: string;
4
+ };
5
+ //# sourceMappingURL=provider-sse.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"provider-sse.d.ts","sourceRoot":"","sources":["../../../../src/src/provider/runtime-loader/provider-sse.ts"],"names":[],"mappings":"AAAA,wBAAgB,aAAa,CAAC,KAAK,EAAE,MAAM,GAAG;IAC5C,MAAM,EAAE,KAAK,CAAC,OAAO,GAAG,QAAQ,CAAC,CAAC;IAClC,SAAS,EAAE,MAAM,CAAC;CACnB,CAyBA"}
@@ -0,0 +1,23 @@
1
+ export function parseSseChunk(chunk) {
2
+ const blocks = chunk.split(/\r?\n\r?\n/);
3
+ const remainder = blocks.pop() ?? "";
4
+ const events = blocks.flatMap((block) => {
5
+ const dataLines = block.split(/\r?\n/)
6
+ .filter((line) => line.startsWith("data:"))
7
+ .map((line) => line.slice(5).trimStart());
8
+ if (!dataLines.length) {
9
+ return [];
10
+ }
11
+ const payload = dataLines.join("\n").trim();
12
+ if (payload === "[DONE]") {
13
+ return ["[DONE]"];
14
+ }
15
+ try {
16
+ return [JSON.parse(payload)];
17
+ }
18
+ catch {
19
+ return [];
20
+ }
21
+ });
22
+ return { events, remainder };
23
+ }
@@ -0,0 +1,19 @@
1
+ export type RuntimeUsage = {
2
+ inputTokens?: number;
3
+ outputTokens?: number;
4
+ totalTokens?: number;
5
+ cacheCreationInputTokens?: number;
6
+ cacheReadInputTokens?: number;
7
+ };
8
+ export declare function extractAnthropicUsage(payload: unknown): RuntimeUsage | undefined;
9
+ export declare function extractGoogleUsage(payload: unknown): RuntimeUsage | undefined;
10
+ export declare function extractOpenAIUsage(payload: unknown): RuntimeUsage | undefined;
11
+ /**
12
+ * The Responses API uses `input_tokens` / `output_tokens` field names
13
+ * instead of Chat Completions' `prompt_tokens` / `completion_tokens`.
14
+ * It also nests cached input tokens under `input_tokens_details` and
15
+ * exposes reasoning tokens via `output_tokens_details.reasoning_tokens`.
16
+ */
17
+ export declare function extractOpenAIResponsesUsage(payload: unknown): RuntimeUsage | undefined;
18
+ export declare function mergeUsage(current: RuntimeUsage | undefined, next: RuntimeUsage | undefined): RuntimeUsage | undefined;
19
+ //# sourceMappingURL=provider-usage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"provider-usage.d.ts","sourceRoot":"","sources":["../../../../src/src/provider/runtime-loader/provider-usage.ts"],"names":[],"mappings":"AAEA,MAAM,MAAM,YAAY,GAAG;IACzB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,wBAAwB,CAAC,EAAE,MAAM,CAAC;IAClC,oBAAoB,CAAC,EAAE,MAAM,CAAC;CAC/B,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,OAAO,EAAE,OAAO,GAAG,YAAY,GAAG,SAAS,CAsBhF;AAED,wBAAgB,kBAAkB,CAAC,OAAO,EAAE,OAAO,GAAG,YAAY,GAAG,SAAS,CAoB7E;AAED,wBAAgB,kBAAkB,CAAC,OAAO,EAAE,OAAO,GAAG,YAAY,GAAG,SAAS,CAmB7E;AAED;;;;;GAKG;AACH,wBAAgB,2BAA2B,CAAC,OAAO,EAAE,OAAO,GAAG,YAAY,GAAG,SAAS,CAwBtF;AAED,wBAAgB,UAAU,CACxB,OAAO,EAAE,YAAY,GAAG,SAAS,EACjC,IAAI,EAAE,YAAY,GAAG,SAAS,GAC7B,YAAY,GAAG,SAAS,CAsB1B"}
@@ -0,0 +1,109 @@
1
+ import { readRecord } from "./provider-records.js";
2
+ export function extractAnthropicUsage(payload) {
3
+ const record = readRecord(payload);
4
+ const usage = readRecord(record?.usage);
5
+ if (!usage) {
6
+ return undefined;
7
+ }
8
+ const inputTokens = usage.input_tokens;
9
+ const outputTokens = usage.output_tokens;
10
+ const cacheCreationInputTokens = usage.cache_creation_input_tokens;
11
+ const cacheReadInputTokens = usage.cache_read_input_tokens;
12
+ return {
13
+ inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
14
+ outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
15
+ totalTokens: typeof inputTokens === "number" || typeof outputTokens === "number"
16
+ ? (typeof inputTokens === "number" ? inputTokens : 0) +
17
+ (typeof outputTokens === "number" ? outputTokens : 0)
18
+ : undefined,
19
+ ...(typeof cacheCreationInputTokens === "number" ? { cacheCreationInputTokens } : {}),
20
+ ...(typeof cacheReadInputTokens === "number" ? { cacheReadInputTokens } : {}),
21
+ };
22
+ }
23
+ export function extractGoogleUsage(payload) {
24
+ const record = readRecord(payload);
25
+ const usage = readRecord(record?.usageMetadata);
26
+ if (!usage) {
27
+ return undefined;
28
+ }
29
+ const inputTokens = usage.promptTokenCount;
30
+ const outputTokens = usage.candidatesTokenCount;
31
+ const totalTokens = usage.totalTokenCount;
32
+ const cachedContentTokenCount = usage.cachedContentTokenCount;
33
+ return {
34
+ inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
35
+ outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
36
+ totalTokens: typeof totalTokens === "number" ? totalTokens : undefined,
37
+ ...(typeof cachedContentTokenCount === "number"
38
+ ? { cacheReadInputTokens: cachedContentTokenCount }
39
+ : {}),
40
+ };
41
+ }
42
+ export function extractOpenAIUsage(payload) {
43
+ const record = readRecord(payload);
44
+ const usage = readRecord(record?.usage);
45
+ if (!usage) {
46
+ return undefined;
47
+ }
48
+ const inputTokens = usage.prompt_tokens;
49
+ const outputTokens = usage.completion_tokens;
50
+ const totalTokens = usage.total_tokens;
51
+ const promptTokensDetails = readRecord(usage.prompt_tokens_details);
52
+ const cachedTokens = promptTokensDetails?.cached_tokens;
53
+ return {
54
+ inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
55
+ outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
56
+ totalTokens: typeof totalTokens === "number" ? totalTokens : undefined,
57
+ ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
58
+ };
59
+ }
60
+ /**
61
+ * The Responses API uses `input_tokens` / `output_tokens` field names
62
+ * instead of Chat Completions' `prompt_tokens` / `completion_tokens`.
63
+ * It also nests cached input tokens under `input_tokens_details` and
64
+ * exposes reasoning tokens via `output_tokens_details.reasoning_tokens`.
65
+ */
66
+ export function extractOpenAIResponsesUsage(payload) {
67
+ const record = readRecord(payload);
68
+ // Streaming usage lives on response.completed inside `response.usage`;
69
+ // non-streaming has it at the top level.
70
+ const responseRecord = readRecord(record?.response);
71
+ const usage = readRecord(responseRecord?.usage) ?? readRecord(record?.usage);
72
+ if (!usage)
73
+ return undefined;
74
+ const inputTokens = typeof usage.input_tokens === "number" ? usage.input_tokens : undefined;
75
+ const outputTokens = typeof usage.output_tokens === "number" ? usage.output_tokens : undefined;
76
+ const totalTokens = typeof usage.total_tokens === "number"
77
+ ? usage.total_tokens
78
+ : (inputTokens !== undefined || outputTokens !== undefined
79
+ ? (inputTokens ?? 0) + (outputTokens ?? 0)
80
+ : undefined);
81
+ const inputDetails = readRecord(usage.input_tokens_details);
82
+ const cachedTokens = inputDetails?.cached_tokens;
83
+ return {
84
+ inputTokens,
85
+ outputTokens,
86
+ totalTokens,
87
+ ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
88
+ };
89
+ }
90
+ export function mergeUsage(current, next) {
91
+ if (!current) {
92
+ return next;
93
+ }
94
+ if (!next) {
95
+ return current;
96
+ }
97
+ const inputTokens = next.inputTokens ?? current.inputTokens;
98
+ const outputTokens = next.outputTokens ?? current.outputTokens;
99
+ const cacheCreationInputTokens = next.cacheCreationInputTokens ??
100
+ current.cacheCreationInputTokens;
101
+ const cacheReadInputTokens = next.cacheReadInputTokens ?? current.cacheReadInputTokens;
102
+ return {
103
+ inputTokens,
104
+ outputTokens,
105
+ totalTokens: (inputTokens ?? 0) + (outputTokens ?? 0),
106
+ ...(cacheCreationInputTokens !== undefined ? { cacheCreationInputTokens } : {}),
107
+ ...(cacheReadInputTokens !== undefined ? { cacheReadInputTokens } : {}),
108
+ };
109
+ }
@@ -1 +1 @@
1
- {"version":3,"file":"runtime-loader.d.ts","sourceRoot":"","sources":["../../../src/src/provider/runtime-loader.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAejE,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,mCAAmC,CAAC;AAGtE,OAAO,EACL,+BAA+B,EAC/B,8BAA8B,EAC/B,MAAM,uCAAuC,CAAC;AAE/C,OAAO,EACL,aAAa,EACb,uBAAuB,EACvB,kBAAkB,EAClB,sBAAsB,EACtB,oBAAoB,GACrB,MAAM,mCAAmC,CAAC;AAC3C,OAAO,EAAE,+BAA+B,EAAE,8BAA8B,EAAE,CAAC;AAE3E,MAAM,WAAW,mBAAmB;IAClC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAED,MAAM,WAAW,sBAAsB;IACrC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAED,MAAM,WAAW,mBAAmB;IAClC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAiaD;;;;;;GAMG;AACH,MAAM,MAAM,eAAe,GAAG;IAC5B,IAAI,EAAE,qBAAqB,GAAG,OAAO,CAAC;IACtC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,YAAY,CAAC;CACxB,CAAC;AAilEF,wBAAgB,wBAAwB,CACtC,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAsEd;AA4jBD,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAsEd;AAED,wBAAgB,2BAA2B,CACzC,MAAM,EAAE,sBAAsB,EAC9B,OAAO,EAAE,MAAM,GACd,YAAY,CAyEd;AAED,wBAAgB,wBAAwB,CACtC,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAkEd;AAED,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,gBAAgB,CAuClB;AAED,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,gBAAgB,CA6ClB"}
1
+ {"version":3,"file":"runtime-loader.d.ts","sourceRoot":"","sources":["../../../src/src/provider/runtime-loader.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAoCjE,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,mCAAmC,CAAC;AAGtE,OAAO,EACL,+BAA+B,EAC/B,8BAA8B,EAC/B,MAAM,uCAAuC,CAAC;AAE/C,OAAO,EACL,aAAa,EACb,uBAAuB,EACvB,kBAAkB,EAClB,sBAAsB,EACtB,oBAAoB,GACrB,MAAM,mCAAmC,CAAC;AAC3C,OAAO,EAAE,+BAA+B,EAAE,8BAA8B,EAAE,CAAC;AAE3E,MAAM,WAAW,mBAAmB;IAClC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAED,MAAM,WAAW,sBAAsB;IACrC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAED,MAAM,WAAW,mBAAmB;IAClC,MAAM,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACjC;AAqWD;;;;;;GAMG;AACH,MAAM,MAAM,eAAe,GAAG;IAC5B,IAAI,EAAE,qBAAqB,GAAG,OAAO,CAAC;IACtC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,YAAY,CAAC;CACxB,CAAC;AAm5DF,wBAAgB,wBAAwB,CACtC,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAsEd;AA0gBD,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAsEd;AAED,wBAAgB,2BAA2B,CACzC,MAAM,EAAE,sBAAsB,EAC9B,OAAO,EAAE,MAAM,GACd,YAAY,CAyEd;AAED,wBAAgB,wBAAwB,CACtC,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,YAAY,CAkEd;AAED,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,gBAAgB,CAuClB;AAED,wBAAgB,4BAA4B,CAC1C,MAAM,EAAE,mBAAmB,EAC3B,OAAO,EAAE,MAAM,GACd,gBAAgB,CA6ClB"}
@@ -1,59 +1,14 @@
1
1
  import { getAnthropicMessagesUrl, getGoogleEmbeddingUrl, getGoogleGenerateContentUrl, getGoogleStreamGenerateContentUrl, getOpenAIChatCompletionsUrl, getOpenAIEmbeddingUrl, getOpenAIResponsesUrl, } from "./runtime-loader/provider-endpoints.js";
2
+ import { extractGoogleEmbedding, extractGoogleUsageTokens, extractOpenAIEmbeddings, extractOpenAIUsageTokens, } from "./runtime-loader/provider-embedding-responses.js";
3
+ import { normalizeAnthropicFinishReason, normalizeGoogleFinishReason, normalizeOpenAIFinishReason, normalizeOpenAIResponsesFinishReason, } from "./runtime-loader/provider-finish-reasons.js";
2
4
  import { createAnthropicRequestInit, createGoogleRequestInit, createOpenAIRequestInit, } from "./runtime-loader/provider-request-init.js";
5
+ import { parseSseChunk } from "./runtime-loader/provider-sse.js";
6
+ import { extractAnthropicUsage, extractGoogleUsage, extractOpenAIResponsesUsage, extractOpenAIUsage, mergeUsage, } from "./runtime-loader/provider-usage.js";
3
7
  import { requestJson, requestStream } from "./runtime-loader/provider-http.js";
4
8
  import { readRecord } from "./runtime-loader/provider-records.js";
5
9
  import { TOOL_INPUT_PENDING_THRESHOLD_MS, withToolInputStatusTransitions, } from "./runtime-loader/tool-input-status.js";
6
10
  export { ProviderError, ProviderOverloadedError, ProviderQuotaError, ProviderRateLimitError, ProviderRequestError, } from "./runtime-loader/provider-http.js";
7
11
  export { TOOL_INPUT_PENDING_THRESHOLD_MS, withToolInputStatusTransitions };
8
- function isNumberArray(value) {
9
- return Array.isArray(value) && value.every((entry) => typeof entry === "number");
10
- }
11
- function extractOpenAIEmbeddings(payload) {
12
- const record = readRecord(payload);
13
- const data = record?.data;
14
- if (!Array.isArray(data)) {
15
- throw new Error("Invalid OpenAI embedding response: data array missing");
16
- }
17
- const embeddings = [];
18
- for (const item of data) {
19
- const itemRecord = readRecord(item);
20
- const embedding = itemRecord?.embedding;
21
- if (!isNumberArray(embedding)) {
22
- throw new Error("Invalid OpenAI embedding response: embedding vector missing");
23
- }
24
- embeddings.push(embedding);
25
- }
26
- return embeddings;
27
- }
28
- function extractOpenAIUsageTokens(payload) {
29
- const record = readRecord(payload);
30
- const usage = readRecord(record?.usage);
31
- const totalTokens = usage?.total_tokens;
32
- return typeof totalTokens === "number" ? totalTokens : undefined;
33
- }
34
- function extractGoogleEmbedding(payload) {
35
- const record = readRecord(payload);
36
- const embeddings = record?.embeddings;
37
- if (Array.isArray(embeddings) && embeddings.length > 0) {
38
- const firstEmbedding = readRecord(embeddings[0]);
39
- const values = firstEmbedding?.values;
40
- if (isNumberArray(values)) {
41
- return values;
42
- }
43
- }
44
- const embedding = readRecord(record?.embedding);
45
- const values = embedding?.values;
46
- if (isNumberArray(values)) {
47
- return values;
48
- }
49
- throw new Error("Invalid Google embedding response: embedding vector missing");
50
- }
51
- function extractGoogleUsageTokens(payload) {
52
- const record = readRecord(payload);
53
- const usageMetadata = readRecord(record?.usageMetadata);
54
- const promptTokenCount = usageMetadata?.promptTokenCount;
55
- return typeof promptTokenCount === "number" ? promptTokenCount : undefined;
56
- }
57
12
  function createWarningCollector() {
58
13
  const list = [];
59
14
  return {
@@ -162,63 +117,6 @@ function readProviderOptions(providerOptions, ...providerNames) {
162
117
  }
163
118
  return merged;
164
119
  }
165
- function normalizeAnthropicFinishReason(raw) {
166
- if (typeof raw !== "string") {
167
- return null;
168
- }
169
- switch (raw) {
170
- case "tool_use":
171
- return { unified: "tool-calls", raw };
172
- case "end_turn":
173
- case "stop_sequence":
174
- return { unified: "stop", raw };
175
- case "max_tokens":
176
- return { unified: "length", raw };
177
- default:
178
- return raw;
179
- }
180
- }
181
- function extractAnthropicUsage(payload) {
182
- const record = readRecord(payload);
183
- const usage = readRecord(record?.usage);
184
- if (!usage) {
185
- return undefined;
186
- }
187
- const inputTokens = usage.input_tokens;
188
- const outputTokens = usage.output_tokens;
189
- const cacheCreationInputTokens = usage.cache_creation_input_tokens;
190
- const cacheReadInputTokens = usage.cache_read_input_tokens;
191
- return {
192
- inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
193
- outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
194
- totalTokens: typeof inputTokens === "number" || typeof outputTokens === "number"
195
- ? (typeof inputTokens === "number" ? inputTokens : 0) +
196
- (typeof outputTokens === "number" ? outputTokens : 0)
197
- : undefined,
198
- ...(typeof cacheCreationInputTokens === "number" ? { cacheCreationInputTokens } : {}),
199
- ...(typeof cacheReadInputTokens === "number" ? { cacheReadInputTokens } : {}),
200
- };
201
- }
202
- function mergeUsage(current, next) {
203
- if (!current) {
204
- return next;
205
- }
206
- if (!next) {
207
- return current;
208
- }
209
- const inputTokens = next.inputTokens ?? current.inputTokens;
210
- const outputTokens = next.outputTokens ?? current.outputTokens;
211
- const cacheCreationInputTokens = next.cacheCreationInputTokens ??
212
- current.cacheCreationInputTokens;
213
- const cacheReadInputTokens = next.cacheReadInputTokens ?? current.cacheReadInputTokens;
214
- return {
215
- inputTokens,
216
- outputTokens,
217
- totalTokens: (inputTokens ?? 0) + (outputTokens ?? 0),
218
- ...(cacheCreationInputTokens !== undefined ? { cacheCreationInputTokens } : {}),
219
- ...(cacheReadInputTokens !== undefined ? { cacheReadInputTokens } : {}),
220
- };
221
- }
222
120
  function normalizeAnthropicToolChoice(toolChoice) {
223
121
  if (typeof toolChoice === "string") {
224
122
  return { type: toolChoice };
@@ -730,29 +628,6 @@ function buildAnthropicGenerateResult(payload) {
730
628
  usage: extractAnthropicUsage(payload),
731
629
  };
732
630
  }
733
- function parseSseChunk(chunk) {
734
- const blocks = chunk.split(/\r?\n\r?\n/);
735
- const remainder = blocks.pop() ?? "";
736
- const events = blocks.flatMap((block) => {
737
- const dataLines = block.split(/\r?\n/)
738
- .filter((line) => line.startsWith("data:"))
739
- .map((line) => line.slice(5).trimStart());
740
- if (!dataLines.length) {
741
- return [];
742
- }
743
- const payload = dataLines.join("\n").trim();
744
- if (payload === "[DONE]") {
745
- return ["[DONE]"];
746
- }
747
- try {
748
- return [JSON.parse(payload)];
749
- }
750
- catch {
751
- return [];
752
- }
753
- });
754
- return { events, remainder };
755
- }
756
631
  async function* streamAnthropicCompatibleParts(stream) {
757
632
  const decoder = new TextDecoder();
758
633
  let buffer = "";
@@ -951,36 +826,6 @@ async function* streamAnthropicCompatibleParts(stream) {
951
826
  ...(usage ? { usage } : {}),
952
827
  };
953
828
  }
954
- function normalizeOpenAIFinishReason(raw) {
955
- if (typeof raw !== "string") {
956
- return null;
957
- }
958
- if (raw === "tool_calls") {
959
- return { unified: "tool-calls", raw };
960
- }
961
- if (raw === "content_filter") {
962
- return { unified: "content-filter", raw };
963
- }
964
- return raw;
965
- }
966
- function extractOpenAIUsage(payload) {
967
- const record = readRecord(payload);
968
- const usage = readRecord(record?.usage);
969
- if (!usage) {
970
- return undefined;
971
- }
972
- const inputTokens = usage.prompt_tokens;
973
- const outputTokens = usage.completion_tokens;
974
- const totalTokens = usage.total_tokens;
975
- const promptTokensDetails = readRecord(usage.prompt_tokens_details);
976
- const cachedTokens = promptTokensDetails?.cached_tokens;
977
- return {
978
- inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
979
- outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
980
- totalTokens: typeof totalTokens === "number" ? totalTokens : undefined,
981
- ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
982
- };
983
- }
984
829
  function extractOpenAIContentText(content) {
985
830
  if (typeof content === "string") {
986
831
  return content;
@@ -1173,41 +1018,6 @@ function buildOpenAIChatRequest(modelId, providerName, options, stream, warnings
1173
1018
  Object.assign(body, providerOpts);
1174
1019
  return body;
1175
1020
  }
1176
- function normalizeGoogleFinishReason(raw) {
1177
- if (typeof raw !== "string") {
1178
- return null;
1179
- }
1180
- switch (raw) {
1181
- case "STOP":
1182
- return { unified: "stop", raw };
1183
- case "MAX_TOKENS":
1184
- return { unified: "length", raw };
1185
- case "SAFETY":
1186
- case "RECITATION":
1187
- return { unified: "content-filter", raw };
1188
- default:
1189
- return raw.toLowerCase();
1190
- }
1191
- }
1192
- function extractGoogleUsage(payload) {
1193
- const record = readRecord(payload);
1194
- const usage = readRecord(record?.usageMetadata);
1195
- if (!usage) {
1196
- return undefined;
1197
- }
1198
- const inputTokens = usage.promptTokenCount;
1199
- const outputTokens = usage.candidatesTokenCount;
1200
- const totalTokens = usage.totalTokenCount;
1201
- const cachedContentTokenCount = usage.cachedContentTokenCount;
1202
- return {
1203
- inputTokens: typeof inputTokens === "number" ? inputTokens : undefined,
1204
- outputTokens: typeof outputTokens === "number" ? outputTokens : undefined,
1205
- totalTokens: typeof totalTokens === "number" ? totalTokens : undefined,
1206
- ...(typeof cachedContentTokenCount === "number"
1207
- ? { cacheReadInputTokens: cachedContentTokenCount }
1208
- : {}),
1209
- };
1210
- }
1211
1021
  function toGoogleContents(prompt) {
1212
1022
  const systemParts = [];
1213
1023
  const contents = [];
@@ -2047,52 +1857,6 @@ function buildOpenAIResponsesRequest(modelId, providerName, options, stream, war
2047
1857
  Object.assign(body, readProviderOptions(options.providerOptions, "openai", providerName));
2048
1858
  return body;
2049
1859
  }
2050
- /**
2051
- * The Responses API uses `input_tokens` / `output_tokens` field names
2052
- * instead of Chat Completions' `prompt_tokens` / `completion_tokens`.
2053
- * It also nests cached input tokens under `input_tokens_details` and
2054
- * exposes reasoning tokens via `output_tokens_details.reasoning_tokens`.
2055
- */
2056
- function extractOpenAIResponsesUsage(payload) {
2057
- const record = readRecord(payload);
2058
- // Streaming usage lives on response.completed inside `response.usage`;
2059
- // non-streaming has it at the top level.
2060
- const responseRecord = readRecord(record?.response);
2061
- const usage = readRecord(responseRecord?.usage) ?? readRecord(record?.usage);
2062
- if (!usage)
2063
- return undefined;
2064
- const inputTokens = typeof usage.input_tokens === "number" ? usage.input_tokens : undefined;
2065
- const outputTokens = typeof usage.output_tokens === "number" ? usage.output_tokens : undefined;
2066
- const totalTokens = typeof usage.total_tokens === "number"
2067
- ? usage.total_tokens
2068
- : (inputTokens !== undefined || outputTokens !== undefined
2069
- ? (inputTokens ?? 0) + (outputTokens ?? 0)
2070
- : undefined);
2071
- const inputDetails = readRecord(usage.input_tokens_details);
2072
- const cachedTokens = inputDetails?.cached_tokens;
2073
- return {
2074
- inputTokens,
2075
- outputTokens,
2076
- totalTokens,
2077
- ...(typeof cachedTokens === "number" ? { cacheReadInputTokens: cachedTokens } : {}),
2078
- };
2079
- }
2080
- function normalizeOpenAIResponsesFinishReason(raw) {
2081
- if (typeof raw !== "string")
2082
- return null;
2083
- switch (raw) {
2084
- case "completed":
2085
- return { unified: "stop", raw };
2086
- case "incomplete":
2087
- return { unified: "length", raw };
2088
- case "failed":
2089
- return { unified: "error", raw };
2090
- case "in_progress":
2091
- return null;
2092
- default:
2093
- return raw;
2094
- }
2095
- }
2096
1860
  function buildOpenAIResponsesGenerateResult(payload) {
2097
1861
  const record = readRecord(payload);
2098
1862
  const output = Array.isArray(record?.output) ? record.output : [];
@@ -1,2 +1,2 @@
1
- export declare const VERSION = "0.1.263";
1
+ export declare const VERSION = "0.1.265";
2
2
  //# sourceMappingURL=version-constant.d.ts.map
@@ -1,3 +1,3 @@
1
1
  // Keep in sync with deno.json version.
2
2
  // scripts/release.ts updates this constant during releases.
3
- export const VERSION = "0.1.263";
3
+ export const VERSION = "0.1.265";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "veryfront",
3
- "version": "0.1.263",
3
+ "version": "0.1.265",
4
4
  "description": "The simplest way to build AI-powered apps",
5
5
  "keywords": [
6
6
  "react",
package/src/deno.js CHANGED
@@ -1,6 +1,6 @@
1
1
  export default {
2
2
  "name": "veryfront",
3
- "version": "0.1.263",
3
+ "version": "0.1.265",
4
4
  "license": "Apache-2.0",
5
5
  "nodeModulesDir": "auto",
6
6
  "workspace": [