deadpipe 2.0.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,11 +1,9 @@
1
1
  // src/index.ts
2
- var VERSION = "2.0.0";
2
+ var VERSION = "3.0.0";
3
3
  var MODEL_COSTS = {
4
4
  // OpenAI
5
5
  "gpt-4": { input: 0.03, output: 0.06 },
6
- // legacy
7
6
  "gpt-4-turbo": { input: 0.01, output: 0.03 },
8
- // legacy
9
7
  "gpt-4o": { input: 5e-3, output: 0.015 },
10
8
  "gpt-4o-mini": { input: 15e-5, output: 6e-4 },
11
9
  "gpt-4.1": { input: 2e-3, output: 8e-3 },
@@ -13,14 +11,37 @@ var MODEL_COSTS = {
13
11
  "gpt-5": { input: 175e-5, output: 0.014 },
14
12
  "gpt-5-mini": { input: 25e-5, output: 2e-3 },
15
13
  "gpt-5.2-pro": { input: 0.021, output: 0.168 },
14
+ "o1": { input: 0.015, output: 0.06 },
15
+ "o1-mini": { input: 3e-3, output: 0.012 },
16
+ "o1-pro": { input: 0.15, output: 0.6 },
16
17
  // Anthropic
17
18
  "claude-3-opus": { input: 0.015, output: 0.075 },
18
19
  "claude-3-sonnet": { input: 3e-3, output: 0.015 },
19
20
  "claude-3-haiku": { input: 25e-5, output: 125e-5 },
20
21
  "claude-3.5-sonnet": { input: 3e-3, output: 0.015 },
22
+ "claude-3.5-haiku": { input: 8e-4, output: 4e-3 },
21
23
  "claude-opus-4": { input: 0.015, output: 0.075 },
22
24
  "claude-sonnet-4": { input: 3e-3, output: 0.015 },
23
- "claude-haiku-4": { input: 25e-5, output: 125e-5 }
25
+ "claude-haiku-4": { input: 25e-5, output: 125e-5 },
26
+ // Google AI / Gemini
27
+ "gemini-1.5-pro": { input: 125e-5, output: 5e-3 },
28
+ "gemini-1.5-flash": { input: 75e-6, output: 3e-4 },
29
+ "gemini-2.0-flash": { input: 1e-4, output: 4e-4 },
30
+ "gemini-2.0-pro": { input: 125e-5, output: 5e-3 },
31
+ "gemini-pro": { input: 5e-4, output: 15e-4 },
32
+ // Mistral
33
+ "mistral-large": { input: 4e-3, output: 0.012 },
34
+ "mistral-medium": { input: 27e-4, output: 81e-4 },
35
+ "mistral-small": { input: 1e-3, output: 3e-3 },
36
+ "mistral-nemo": { input: 3e-4, output: 3e-4 },
37
+ "codestral": { input: 1e-3, output: 3e-3 },
38
+ "pixtral": { input: 2e-3, output: 6e-3 },
39
+ "ministral": { input: 1e-4, output: 1e-4 },
40
+ // Cohere
41
+ "command-r-plus": { input: 3e-3, output: 0.015 },
42
+ "command-r": { input: 5e-4, output: 15e-4 },
43
+ "command": { input: 1e-3, output: 2e-3 },
44
+ "command-light": { input: 3e-4, output: 6e-4 }
24
45
  };
25
46
  function estimateCost(model, inputTokens, outputTokens) {
26
47
  const modelLower = model.toLowerCase();
@@ -101,20 +122,54 @@ function validateNumericBounds(data, numericBounds) {
101
122
  }
102
123
  function detectProvider(response) {
103
124
  if (!response) return "unknown";
104
- if (Array.isArray(response.content) || response.stop_reason !== void 0) {
125
+ if (Array.isArray(response.content) && response.stop_reason !== void 0) {
105
126
  return "anthropic";
106
127
  }
128
+ if (response.candidates !== void 0 || response.promptFeedback !== void 0) {
129
+ return "google";
130
+ }
131
+ if (response.text !== void 0 && response.generation_id !== void 0 || response.generations !== void 0) {
132
+ return "cohere";
133
+ }
107
134
  if (response.choices !== void 0 || response.output !== void 0) {
135
+ const model = String(response.model || "").toLowerCase();
136
+ if (model.includes("mistral") || model.includes("codestral") || model.includes("pixtral") || model.includes("ministral")) {
137
+ return "mistral";
138
+ }
108
139
  return "openai";
109
140
  }
110
141
  if (response.model) {
111
142
  const modelLower = String(response.model).toLowerCase();
112
- if (modelLower.includes("claude")) {
113
- return "anthropic";
114
- }
115
- if (modelLower.includes("gpt") || modelLower.includes("o1")) {
116
- return "openai";
143
+ if (modelLower.includes("claude")) return "anthropic";
144
+ if (modelLower.includes("gemini")) return "google";
145
+ if (modelLower.includes("mistral") || modelLower.includes("codestral")) return "mistral";
146
+ if (modelLower.includes("command")) return "cohere";
147
+ if (modelLower.includes("gpt") || modelLower.includes("o1")) return "openai";
148
+ }
149
+ return "unknown";
150
+ }
151
+ function detectClientProvider(client) {
152
+ if (!client) return "unknown";
153
+ if (client.chat?.completions || client.responses) {
154
+ if (client._client?.baseURL?.includes("mistral") || client.baseURL?.includes("mistral")) {
155
+ return "mistral";
117
156
  }
157
+ return "openai";
158
+ }
159
+ if (client.messages?.create) {
160
+ return "anthropic";
161
+ }
162
+ if (client.getGenerativeModel) {
163
+ return "google";
164
+ }
165
+ if (client.chat?.complete || client.chat) {
166
+ return "mistral";
167
+ }
168
+ if (client.chat && typeof client.chat === "function") {
169
+ return "cohere";
170
+ }
171
+ if (client.generate) {
172
+ return "cohere";
118
173
  }
119
174
  return "unknown";
120
175
  }
@@ -197,6 +252,110 @@ function extractAnthropicResponse(response) {
197
252
  }
198
253
  return result;
199
254
  }
255
+ function extractGoogleAIResponse(response) {
256
+ const result = {
257
+ model: "",
258
+ content: "",
259
+ inputTokens: null,
260
+ outputTokens: null,
261
+ totalTokens: null,
262
+ finishReason: null,
263
+ toolCalls: [],
264
+ logprobs: null
265
+ };
266
+ if (response?.modelVersion) {
267
+ result.model = response.modelVersion;
268
+ }
269
+ if (response?.candidates?.[0]) {
270
+ const candidate = response.candidates[0];
271
+ if (candidate.content?.parts) {
272
+ const textParts = candidate.content.parts.filter((part) => part.text !== void 0).map((part) => part.text);
273
+ result.content = textParts.join("");
274
+ const functionCalls = candidate.content.parts.filter((part) => part.functionCall).map((part) => ({
275
+ name: part.functionCall.name,
276
+ arguments: JSON.stringify(part.functionCall.args)
277
+ }));
278
+ result.toolCalls = functionCalls;
279
+ }
280
+ if (candidate.finishReason) {
281
+ result.finishReason = candidate.finishReason;
282
+ }
283
+ }
284
+ if (response?.usageMetadata) {
285
+ result.inputTokens = response.usageMetadata.promptTokenCount ?? null;
286
+ result.outputTokens = response.usageMetadata.candidatesTokenCount ?? null;
287
+ result.totalTokens = response.usageMetadata.totalTokenCount ?? null;
288
+ }
289
+ return result;
290
+ }
291
+ function extractMistralResponse(response) {
292
+ const result = extractOpenAIResponse(response);
293
+ if (response?.model) {
294
+ result.model = response.model;
295
+ }
296
+ return result;
297
+ }
298
+ function extractCohereResponse(response) {
299
+ const result = {
300
+ model: "",
301
+ content: "",
302
+ inputTokens: null,
303
+ outputTokens: null,
304
+ totalTokens: null,
305
+ finishReason: null,
306
+ toolCalls: [],
307
+ logprobs: null
308
+ };
309
+ if (response?.text !== void 0) {
310
+ result.content = response.text || "";
311
+ }
312
+ if (response?.generations?.[0]) {
313
+ result.content = response.generations[0].text || "";
314
+ }
315
+ if (response?.model) {
316
+ result.model = response.model;
317
+ }
318
+ if (response?.finish_reason) {
319
+ result.finishReason = response.finish_reason;
320
+ }
321
+ if (response?.tool_calls && Array.isArray(response.tool_calls)) {
322
+ result.toolCalls = response.tool_calls.map((tc) => ({
323
+ name: tc.name,
324
+ arguments: JSON.stringify(tc.parameters)
325
+ }));
326
+ }
327
+ if (response?.meta?.tokens) {
328
+ result.inputTokens = response.meta.tokens.input_tokens ?? null;
329
+ result.outputTokens = response.meta.tokens.output_tokens ?? null;
330
+ if (result.inputTokens !== null && result.outputTokens !== null) {
331
+ result.totalTokens = result.inputTokens + result.outputTokens;
332
+ }
333
+ }
334
+ if (response?.meta?.billed_units) {
335
+ result.inputTokens = response.meta.billed_units.input_tokens ?? null;
336
+ result.outputTokens = response.meta.billed_units.output_tokens ?? null;
337
+ if (result.inputTokens !== null && result.outputTokens !== null) {
338
+ result.totalTokens = result.inputTokens + result.outputTokens;
339
+ }
340
+ }
341
+ return result;
342
+ }
343
+ function extractResponse(response, provider) {
344
+ const detectedProvider = provider || detectProvider(response);
345
+ switch (detectedProvider) {
346
+ case "anthropic":
347
+ return extractAnthropicResponse(response);
348
+ case "google":
349
+ return extractGoogleAIResponse(response);
350
+ case "mistral":
351
+ return extractMistralResponse(response);
352
+ case "cohere":
353
+ return extractCohereResponse(response);
354
+ case "openai":
355
+ default:
356
+ return extractOpenAIResponse(response);
357
+ }
358
+ }
200
359
  function calculateLogprobMean(logprobs) {
201
360
  if (!logprobs?.content) return null;
202
361
  try {
@@ -281,15 +440,20 @@ var PromptTracker = class {
281
440
  this.telemetry.total_latency = this.startTime ? this.endTime - this.startTime : 0;
282
441
  const detectedProvider = detectProvider(response);
283
442
  this.telemetry.provider = detectedProvider !== "unknown" ? detectedProvider : "openai";
284
- const extracted = detectedProvider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
443
+ const extracted = extractResponse(response, detectedProvider);
285
444
  if (input) {
286
445
  const messages = input.messages || [];
287
446
  const tools = input.tools;
288
447
  let systemPrompt;
289
- for (const msg of messages) {
290
- if (msg.role === "system") {
291
- systemPrompt = msg.content || "";
292
- break;
448
+ if (input.system) {
449
+ systemPrompt = typeof input.system === "string" ? input.system : JSON.stringify(input.system);
450
+ }
451
+ if (!systemPrompt) {
452
+ for (const msg of messages) {
453
+ if (msg.role === "system") {
454
+ systemPrompt = msg.content || "";
455
+ break;
456
+ }
293
457
  }
294
458
  }
295
459
  if (messages.length > 0) {
@@ -321,7 +485,7 @@ var PromptTracker = class {
321
485
  const content = extracted.content;
322
486
  this.telemetry.output_length = content?.length ?? 0;
323
487
  this.telemetry.empty_output = !content || content.trim().length === 0;
324
- this.telemetry.truncated = extracted.finishReason === "length";
488
+ this.telemetry.truncated = extracted.finishReason === "length" || extracted.finishReason === "MAX_TOKENS";
325
489
  const MAX_PREVIEW_LENGTH = 2e3;
326
490
  if (content) {
327
491
  this.telemetry.output_preview = content.length > MAX_PREVIEW_LENGTH ? content.substring(0, MAX_PREVIEW_LENGTH) + "..." : content;
@@ -497,15 +661,6 @@ function wrapOpenAI(client, options) {
497
661
  ...client.chat.completions,
498
662
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
499
663
  create: async (params) => {
500
- const messages = params.messages || [];
501
- const tools = params.tools;
502
- let systemPrompt;
503
- for (const msg of messages) {
504
- if (msg.role === "system") {
505
- systemPrompt = msg.content || "";
506
- break;
507
- }
508
- }
509
664
  return track(
510
665
  promptId,
511
666
  async (t) => {
@@ -524,8 +679,6 @@ function wrapOpenAI(client, options) {
524
679
  ...client.responses,
525
680
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
526
681
  create: async (params) => {
527
- const inputContent = params.input || "";
528
- const messages = typeof inputContent === "string" ? [{ role: "user", content: inputContent }] : inputContent;
529
682
  return track(
530
683
  promptId,
531
684
  async (t) => {
@@ -540,18 +693,210 @@ function wrapOpenAI(client, options) {
540
693
  }
541
694
  return wrappedClient;
542
695
  }
543
- var index_default = wrapOpenAI;
696
+ function wrapAnthropic(client, options) {
697
+ const { promptId, ...trackOptions } = options;
698
+ const wrappedClient = Object.create(client);
699
+ if (client.messages) {
700
+ wrappedClient.messages = {
701
+ ...client.messages,
702
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
703
+ create: async (params) => {
704
+ return track(
705
+ promptId,
706
+ async (t) => {
707
+ const response = await client.messages.create(params);
708
+ t.record(response, void 0, params);
709
+ return response;
710
+ },
711
+ trackOptions
712
+ );
713
+ }
714
+ };
715
+ }
716
+ return wrappedClient;
717
+ }
718
+ function wrapGoogleAI(client, options) {
719
+ const { promptId, ...trackOptions } = options;
720
+ return {
721
+ ...client,
722
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
723
+ getGenerativeModel: (modelParams) => {
724
+ const model = client.getGenerativeModel(modelParams);
725
+ return {
726
+ ...model,
727
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
728
+ generateContent: async (params) => {
729
+ return track(
730
+ promptId,
731
+ async (t) => {
732
+ const response = await model.generateContent(params);
733
+ const input = {
734
+ messages: Array.isArray(params) ? params.map((p) => ({
735
+ role: "user",
736
+ content: typeof p === "string" ? p : p.text || ""
737
+ })) : [{ role: "user", content: typeof params === "string" ? params : params.contents?.[0]?.parts?.[0]?.text || "" }]
738
+ };
739
+ t.record(response.response || response, void 0, input);
740
+ return response;
741
+ },
742
+ trackOptions
743
+ );
744
+ },
745
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
746
+ startChat: (chatParams) => {
747
+ const chat = model.startChat(chatParams);
748
+ return {
749
+ ...chat,
750
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
751
+ sendMessage: async (message) => {
752
+ return track(
753
+ promptId,
754
+ async (t) => {
755
+ const response = await chat.sendMessage(message);
756
+ const input = {
757
+ messages: [{ role: "user", content: typeof message === "string" ? message : JSON.stringify(message) }]
758
+ };
759
+ t.record(response.response || response, void 0, input);
760
+ return response;
761
+ },
762
+ trackOptions
763
+ );
764
+ }
765
+ };
766
+ }
767
+ };
768
+ }
769
+ };
770
+ }
771
+ function wrapMistral(client, options) {
772
+ const { promptId, ...trackOptions } = options;
773
+ const wrappedClient = Object.create(client);
774
+ if (client.chat?.complete) {
775
+ wrappedClient.chat = {
776
+ ...client.chat,
777
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
778
+ complete: async (params) => {
779
+ return track(
780
+ promptId,
781
+ async (t) => {
782
+ const response = await client.chat.complete(params);
783
+ t.record(response, void 0, params);
784
+ return response;
785
+ },
786
+ trackOptions
787
+ );
788
+ }
789
+ };
790
+ }
791
+ if (client.chat?.completions) {
792
+ wrappedClient.chat = {
793
+ ...client.chat,
794
+ completions: {
795
+ ...client.chat.completions,
796
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
797
+ create: async (params) => {
798
+ return track(
799
+ promptId,
800
+ async (t) => {
801
+ const response = await client.chat.completions.create(params);
802
+ t.record(response, void 0, params);
803
+ return response;
804
+ },
805
+ trackOptions
806
+ );
807
+ }
808
+ }
809
+ };
810
+ }
811
+ return wrappedClient;
812
+ }
813
+ function wrapCohere(client, options) {
814
+ const { promptId, ...trackOptions } = options;
815
+ const wrappedClient = Object.create(client);
816
+ if (typeof client.chat === "function") {
817
+ wrappedClient.chat = async (params) => {
818
+ return track(
819
+ promptId,
820
+ async (t) => {
821
+ const response = await client.chat(params);
822
+ const input = {
823
+ messages: params.chat_history || []
824
+ };
825
+ if (params.message) {
826
+ input.messages.push({ role: "user", content: params.message });
827
+ }
828
+ t.record(response, void 0, input);
829
+ return response;
830
+ },
831
+ trackOptions
832
+ );
833
+ };
834
+ }
835
+ if (typeof client.generate === "function") {
836
+ wrappedClient.generate = async (params) => {
837
+ return track(
838
+ promptId,
839
+ async (t) => {
840
+ const response = await client.generate(params);
841
+ const input = {
842
+ messages: [{ role: "user", content: params.prompt || "" }]
843
+ };
844
+ t.record(response, void 0, input);
845
+ return response;
846
+ },
847
+ trackOptions
848
+ );
849
+ };
850
+ }
851
+ return wrappedClient;
852
+ }
853
+ function wrap(client, options) {
854
+ const provider = detectClientProvider(client);
855
+ switch (provider) {
856
+ case "openai":
857
+ return wrapOpenAI(client, options);
858
+ case "anthropic":
859
+ return wrapAnthropic(client, options);
860
+ case "google":
861
+ return wrapGoogleAI(client, options);
862
+ case "mistral":
863
+ return wrapMistral(client, options);
864
+ case "cohere":
865
+ return wrapCohere(client, options);
866
+ default:
867
+ if (client.chat?.completions) {
868
+ return wrapOpenAI(client, options);
869
+ }
870
+ if (client.messages) {
871
+ return wrapAnthropic(client, options);
872
+ }
873
+ console.warn("[Deadpipe] Could not detect provider. Returning unwrapped client.");
874
+ return client;
875
+ }
876
+ }
877
+ var index_default = wrap;
544
878
  export {
545
879
  PromptTracker,
546
880
  VERSION,
547
881
  index_default as default,
882
+ detectClientProvider,
548
883
  detectProvider,
549
884
  detectRefusal,
550
885
  estimateCost,
551
886
  extractAnthropicResponse,
887
+ extractCohereResponse,
888
+ extractGoogleAIResponse,
889
+ extractMistralResponse,
552
890
  extractOpenAIResponse,
891
+ extractResponse,
553
892
  track,
554
893
  validateEnumBounds,
555
894
  validateNumericBounds,
895
+ wrap,
896
+ wrapAnthropic,
897
+ wrap as wrapClient,
898
+ wrapCohere,
899
+ wrapGoogleAI,
900
+ wrapMistral,
556
901
  wrapOpenAI
557
902
  };
package/package.json CHANGED
@@ -1,11 +1,15 @@
1
1
  {
2
2
  "name": "deadpipe",
3
- "version": "2.0.2",
4
- "description": "LLM observability that answers: Is this prompt behaving the same as when it was last safe?",
3
+ "version": "3.0.1",
4
+ "description": "LLM observability that answers: Is this prompt still behaving safely?",
5
5
  "keywords": [
6
6
  "llm",
7
7
  "openai",
8
8
  "anthropic",
9
+ "google-ai",
10
+ "gemini",
11
+ "mistral",
12
+ "cohere",
9
13
  "gpt",
10
14
  "claude",
11
15
  "observability",