@alpic80/rivet-core 1.19.1-aidon.1 → 1.19.1-aidon.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -64,6 +64,8 @@ export class GraphProcessor {
|
|
|
64
64
|
#aborted = false;
|
|
65
65
|
#abortSuccessfully = false;
|
|
66
66
|
#abortError = undefined;
|
|
67
|
+
#totalRequestTokens = 0;
|
|
68
|
+
#totalResponseTokens = 0;
|
|
67
69
|
#totalCost = 0;
|
|
68
70
|
#ignoreNodes = undefined;
|
|
69
71
|
#nodeAbortControllers = new Map();
|
|
@@ -480,6 +482,18 @@ export class GraphProcessor {
|
|
|
480
482
|
}
|
|
481
483
|
throw error;
|
|
482
484
|
}
|
|
485
|
+
if (this.#graphOutputs['requestTokens'] == null) {
|
|
486
|
+
this.#graphOutputs['requestTokens'] = {
|
|
487
|
+
type: 'number',
|
|
488
|
+
value: this.#totalRequestTokens,
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
if (this.#graphOutputs['responseTokens'] == null) {
|
|
492
|
+
this.#graphOutputs['responseTokens'] = {
|
|
493
|
+
type: 'number',
|
|
494
|
+
value: this.#totalResponseTokens,
|
|
495
|
+
};
|
|
496
|
+
}
|
|
483
497
|
if (this.#graphOutputs['cost'] == null) {
|
|
484
498
|
this.#graphOutputs['cost'] = {
|
|
485
499
|
type: 'number',
|
|
@@ -811,6 +825,12 @@ export class GraphProcessor {
|
|
|
811
825
|
]));
|
|
812
826
|
try {
|
|
813
827
|
const output = await this.#processNodeWithInputData(node, inputs, i, processId, (node, partialOutputs, index) => this.#emitter.emit('partialOutput', { node, outputs: partialOutputs, index, processId }));
|
|
828
|
+
if (output['requestTokens']?.type === 'number') {
|
|
829
|
+
this.#totalRequestTokens += coerceTypeOptional(output['requestTokens'], 'number') ?? 0;
|
|
830
|
+
}
|
|
831
|
+
if (output['responseTokens']?.type === 'number') {
|
|
832
|
+
this.#totalResponseTokens += coerceTypeOptional(output['responseTokens'], 'number') ?? 0;
|
|
833
|
+
}
|
|
814
834
|
if (output['cost']?.type === 'number') {
|
|
815
835
|
this.#totalCost += coerceTypeOptional(output['cost'], 'number') ?? 0;
|
|
816
836
|
}
|
|
@@ -829,6 +849,12 @@ export class GraphProcessor {
|
|
|
829
849
|
]));
|
|
830
850
|
try {
|
|
831
851
|
const output = await this.#processNodeWithInputData(node, inputs, i, processId, (node, partialOutputs, index) => this.#emitter.emit('partialOutput', { node, outputs: partialOutputs, index, processId }));
|
|
852
|
+
if (output['requestTokens']?.type === 'number') {
|
|
853
|
+
this.#totalRequestTokens += coerceTypeOptional(output['requestTokens'], 'number') ?? 0;
|
|
854
|
+
}
|
|
855
|
+
if (output['responseTokens']?.type === 'number') {
|
|
856
|
+
this.#totalResponseTokens += coerceTypeOptional(output['responseTokens'], 'number') ?? 0;
|
|
857
|
+
}
|
|
832
858
|
if (output['cost']?.type === 'number') {
|
|
833
859
|
this.#totalCost += coerceTypeOptional(output['cost'], 'number') ?? 0;
|
|
834
860
|
}
|
|
@@ -858,6 +884,8 @@ export class GraphProcessor {
|
|
|
858
884
|
}, {});
|
|
859
885
|
this.#nodeResults.set(node.id, aggregateResults);
|
|
860
886
|
this.#visitedNodes.add(node.id);
|
|
887
|
+
this.#totalRequestTokens += sum(results.map((r) => coerceTypeOptional(r.output?.['requestTokens'], 'number') ?? 0));
|
|
888
|
+
this.#totalResponseTokens += sum(results.map((r) => coerceTypeOptional(r.output?.['responseTokens'], 'number') ?? 0));
|
|
861
889
|
this.#totalCost += sum(results.map((r) => coerceTypeOptional(r.output?.['cost'], 'number') ?? 0));
|
|
862
890
|
this.#emitter.emit('nodeFinish', { node, outputs: aggregateResults, processId });
|
|
863
891
|
}
|
|
@@ -875,6 +903,12 @@ export class GraphProcessor {
|
|
|
875
903
|
const outputValues = await this.#processNodeWithInputData(node, inputValues, 0, processId, (node, partialOutputs, index) => this.#emitter.emit('partialOutput', { node, outputs: partialOutputs, index, processId }));
|
|
876
904
|
this.#nodeResults.set(node.id, outputValues);
|
|
877
905
|
this.#visitedNodes.add(node.id);
|
|
906
|
+
if (outputValues['requestTokens']?.type === 'number') {
|
|
907
|
+
this.#totalRequestTokens += coerceTypeOptional(outputValues['requestTokens'], 'number') ?? 0;
|
|
908
|
+
}
|
|
909
|
+
if (outputValues['responseTokens']?.type === 'number') {
|
|
910
|
+
this.#totalResponseTokens += coerceTypeOptional(outputValues['responseTokens'], 'number') ?? 0;
|
|
911
|
+
}
|
|
878
912
|
if (outputValues['cost']?.type === 'number') {
|
|
879
913
|
this.#totalCost += coerceTypeOptional(outputValues['cost'], 'number') ?? 0;
|
|
880
914
|
}
|
|
@@ -728,6 +728,8 @@ export class ChatNodeImpl extends NodeImpl {
|
|
|
728
728
|
}
|
|
729
729
|
}
|
|
730
730
|
const startTime = Date.now();
|
|
731
|
+
let usagePromptTokens = -1;
|
|
732
|
+
let usageCompletionTokens = -1;
|
|
731
733
|
const chunks = streamChatCompletions({
|
|
732
734
|
auth: {
|
|
733
735
|
apiKey: context.settings.openAiKey ?? '',
|
|
@@ -746,6 +748,10 @@ export class ChatNodeImpl extends NodeImpl {
|
|
|
746
748
|
// Could be error for some reason 🤷♂️ but ignoring has worked for me so far.
|
|
747
749
|
continue;
|
|
748
750
|
}
|
|
751
|
+
if (chunk.choices.length == 0 && chunk.usage) { //capture the usage info
|
|
752
|
+
usagePromptTokens = chunk.usage.prompt_tokens;
|
|
753
|
+
usageCompletionTokens = chunk.usage.completion_tokens;
|
|
754
|
+
}
|
|
749
755
|
for (const { delta, index } of chunk.choices) {
|
|
750
756
|
if (delta.content != null) {
|
|
751
757
|
responseChoicesParts[index] ??= [];
|
|
@@ -847,11 +853,22 @@ export class ChatNodeImpl extends NodeImpl {
|
|
|
847
853
|
throw new Error('No response from OpenAI');
|
|
848
854
|
}
|
|
849
855
|
output['in-messages'] = { type: 'chat-message[]', value: messages };
|
|
850
|
-
|
|
856
|
+
let finalTokenCount = tokenCount * (numberOfChoices ?? 1);
|
|
851
857
|
let responseTokenCount = 0;
|
|
852
858
|
for (const choiceParts of responseChoicesParts) {
|
|
853
|
-
responseTokenCount += await context.tokenizer.getTokenCountForString(choiceParts.join(), tokenizerInfo);
|
|
859
|
+
responseTokenCount += await context.tokenizer.getTokenCountForString(choiceParts.join(''), tokenizerInfo);
|
|
860
|
+
}
|
|
861
|
+
if (usagePromptTokens != -1 && usageCompletionTokens != -1) {
|
|
862
|
+
if (finalTokenCount != usagePromptTokens) {
|
|
863
|
+
console.log(`calculated token count:${finalTokenCount}, usage:${usagePromptTokens}`);
|
|
864
|
+
finalTokenCount = usagePromptTokens;
|
|
865
|
+
}
|
|
866
|
+
if (responseTokenCount != usageCompletionTokens) {
|
|
867
|
+
console.log(`calculated response token count:${responseTokenCount}, usage:${usageCompletionTokens}`);
|
|
868
|
+
responseTokenCount = usageCompletionTokens;
|
|
869
|
+
}
|
|
854
870
|
}
|
|
871
|
+
output['requestTokens'] = { type: 'number', value: finalTokenCount };
|
|
855
872
|
output['responseTokens'] = { type: 'number', value: responseTokenCount };
|
|
856
873
|
const promptCostPerThousand = model in openaiModels ? openaiModels[model].cost.prompt : 0;
|
|
857
874
|
const completionCostPerThousand = model in openaiModels ? openaiModels[model].cost.completion : 0;
|
package/dist/esm/utils/openai.js
CHANGED
|
@@ -169,6 +169,7 @@ export class OpenAIError extends Error {
|
|
|
169
169
|
this.name = 'OpenAIError';
|
|
170
170
|
}
|
|
171
171
|
}
|
|
172
|
+
;
|
|
172
173
|
export async function* streamChatCompletions({ endpoint, auth, signal, headers, timeout, ...rest }) {
|
|
173
174
|
const abortSignal = signal ?? new AbortController().signal;
|
|
174
175
|
const response = await fetchEventSource(endpoint, {
|
|
@@ -182,6 +183,9 @@ export async function* streamChatCompletions({ endpoint, auth, signal, headers,
|
|
|
182
183
|
body: JSON.stringify({
|
|
183
184
|
...rest,
|
|
184
185
|
stream: true,
|
|
186
|
+
stream_options: {
|
|
187
|
+
"include_usage": true
|
|
188
|
+
},
|
|
185
189
|
}),
|
|
186
190
|
signal: abortSignal,
|
|
187
191
|
}, timeout ?? DEFAULT_CHAT_NODE_TIMEOUT);
|
|
@@ -311,6 +311,7 @@ export type ChatCompletionChunk = {
|
|
|
311
311
|
created: number;
|
|
312
312
|
model: string;
|
|
313
313
|
choices?: ChatCompletionChunkChoice[];
|
|
314
|
+
usage?: CompletionUsage;
|
|
314
315
|
};
|
|
315
316
|
export type GptFunctionCall = {
|
|
316
317
|
name: string;
|
|
@@ -320,6 +321,11 @@ export type GptFunctionCallDelta = {
|
|
|
320
321
|
name?: string;
|
|
321
322
|
arguments?: string;
|
|
322
323
|
};
|
|
324
|
+
export interface CompletionUsage {
|
|
325
|
+
completion_tokens: number;
|
|
326
|
+
prompt_tokens: number;
|
|
327
|
+
total_tokens: number;
|
|
328
|
+
}
|
|
323
329
|
export type ChatCompletionChunkChoice = {
|
|
324
330
|
index: number;
|
|
325
331
|
message_index: number;
|
package/package.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"name": "@alpic80/rivet-core",
|
|
3
3
|
"license": "MIT",
|
|
4
4
|
"repository": "https://github.com/castortech/rivet",
|
|
5
|
-
"version": "1.19.1-aidon.
|
|
5
|
+
"version": "1.19.1-aidon.2",
|
|
6
6
|
"packageManager": "yarn@3.5.0",
|
|
7
7
|
"main": "dist/cjs/bundle.cjs",
|
|
8
8
|
"module": "dist/esm/index.js",
|