@botpress/vai 0.0.1-beta.1 → 0.0.1-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -6
- package/.env +0 -3
- package/src/assertions/check.ts +0 -28
- package/src/assertions/extension.ts +0 -51
- package/src/assertions/extract.ts +0 -39
- package/src/assertions/filter.ts +0 -86
- package/src/assertions/rate.ts +0 -40
- package/src/context.ts +0 -65
- package/src/hooks/setEvaluator.ts +0 -13
- package/src/hooks/setupClient.ts +0 -6
- package/src/index.ts +0 -9
- package/src/models.ts +0 -394
- package/src/scripts/update-models.ts +0 -76
- package/src/scripts/update-types.ts +0 -59
- package/src/sdk-interfaces/llm/generateContent.ts +0 -127
- package/src/sdk-interfaces/llm/listLanguageModels.ts +0 -19
- package/src/task/compare.ts +0 -72
- package/src/utils/asyncAssertion.ts +0 -40
- package/src/utils/deferred.ts +0 -20
- package/src/utils/predictJson.ts +0 -114
package/package.json
CHANGED
|
@@ -1,14 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@botpress/vai",
|
|
3
|
-
"version": "0.0.1-beta.
|
|
3
|
+
"version": "0.0.1-beta.3",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "Vitest AI (vai) – a vitest extension for testing with LLMs",
|
|
6
|
-
"main": "src/index.ts",
|
|
7
6
|
"exports": {
|
|
8
7
|
".": {
|
|
9
|
-
"types": "./dist/
|
|
10
|
-
"import": "./dist/
|
|
11
|
-
"require": "./dist/
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"import": "./dist/index.js",
|
|
10
|
+
"require": "./dist/index.cjs"
|
|
12
11
|
}
|
|
13
12
|
},
|
|
14
13
|
"scripts": {
|
|
@@ -40,6 +39,6 @@
|
|
|
40
39
|
"@botpress/client": "^0.36.2",
|
|
41
40
|
"@botpress/sdk": "^1.6.1",
|
|
42
41
|
"@botpress/wasm": "^1.0.0",
|
|
43
|
-
"vitest": "^2
|
|
42
|
+
"vitest": "^2 || ^3 || ^4 || ^5"
|
|
44
43
|
}
|
|
45
44
|
}
|
package/.env
DELETED
package/src/assertions/check.ts
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
import { z } from '@botpress/sdk'
|
|
2
|
-
import { Context } from '../context'
|
|
3
|
-
import { asyncExpect } from '../utils/asyncAssertion'
|
|
4
|
-
import { Input, predictJson } from '../utils/predictJson'
|
|
5
|
-
import { makeToMatchInlineSnapshot, toAssertion } from './extension'
|
|
6
|
-
|
|
7
|
-
export type CheckOptions<T> = {
|
|
8
|
-
examples?: { value: T; expected: boolean; reason: string }[]
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
export function check<T extends Input>(value: T, condition: string, options?: CheckOptions<T>) {
|
|
12
|
-
const promise = predictJson({
|
|
13
|
-
systemMessage: `Check that the value meets the condition: ${condition}`,
|
|
14
|
-
examples: options?.examples?.map(({ value, reason, expected }) => ({
|
|
15
|
-
input: value,
|
|
16
|
-
output: { reason, result: expected }
|
|
17
|
-
})),
|
|
18
|
-
outputSchema: z.boolean(),
|
|
19
|
-
model: Context.evaluatorModel,
|
|
20
|
-
input: value
|
|
21
|
-
})
|
|
22
|
-
|
|
23
|
-
return {
|
|
24
|
-
...toAssertion(promise),
|
|
25
|
-
toBe: (expected: boolean) => asyncExpect(promise, (expect) => expect.toEqual(expected)),
|
|
26
|
-
toMatchInlineSnapshot: makeToMatchInlineSnapshot(promise)
|
|
27
|
-
}
|
|
28
|
-
}
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
import json5 from 'json5'
|
|
2
|
-
import { expect } from 'vitest'
|
|
3
|
-
import { getCurrentTest } from 'vitest/suite'
|
|
4
|
-
|
|
5
|
-
import { asyncExpect } from '../utils/asyncAssertion'
|
|
6
|
-
import { Output } from '../utils/predictJson'
|
|
7
|
-
|
|
8
|
-
export type ExtendedPromise<T> = PromiseLike<Output<T>> & {
|
|
9
|
-
value: PromiseLike<T>
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export const toAssertion = <T>(promise: Promise<Output<T>>): ExtendedPromise<T> => {
|
|
13
|
-
return {
|
|
14
|
-
then: promise.then.bind(promise),
|
|
15
|
-
value: promise.then((value) => value.result)
|
|
16
|
-
}
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
export const makeToMatchInlineSnapshot =
|
|
20
|
-
<T>(promise: Promise<Output<T>>) =>
|
|
21
|
-
async (expected?: string) => {
|
|
22
|
-
const stack = new Error().stack!.split('\n')[2]
|
|
23
|
-
const newStack = `
|
|
24
|
-
at __INLINE_SNAPSHOT__ (node:internal/process/task_queues:1:1)
|
|
25
|
-
at randomLine (node:internal/process/task_queues:1:1)
|
|
26
|
-
${stack}
|
|
27
|
-
`.trim()
|
|
28
|
-
|
|
29
|
-
const obj = json5.parse(expected ?? '""')
|
|
30
|
-
const expectation = asyncExpect(promise, (expect) => expect.toMatchObject(obj)).catch(() => {
|
|
31
|
-
// we swallow the error here, as we're going to throw a new one with the correct stack
|
|
32
|
-
// this is just to make vitest happy and show a nice error message
|
|
33
|
-
})
|
|
34
|
-
|
|
35
|
-
try {
|
|
36
|
-
expect((await promise).result).toMatchObject(obj)
|
|
37
|
-
} catch (err) {
|
|
38
|
-
const newError = new Error()
|
|
39
|
-
newError.stack = newStack
|
|
40
|
-
|
|
41
|
-
expect.getState().snapshotState.match({
|
|
42
|
-
isInline: true,
|
|
43
|
-
received: (await promise).result,
|
|
44
|
-
testName: getCurrentTest()!.name,
|
|
45
|
-
error: newError,
|
|
46
|
-
inlineSnapshot: expected
|
|
47
|
-
})
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
return expectation
|
|
51
|
-
}
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
import { z } from '@botpress/sdk'
|
|
2
|
-
|
|
3
|
-
import { Context } from '../context'
|
|
4
|
-
import { asyncExpect } from '../utils/asyncAssertion'
|
|
5
|
-
import { Input, predictJson } from '../utils/predictJson'
|
|
6
|
-
import { makeToMatchInlineSnapshot, toAssertion } from './extension'
|
|
7
|
-
|
|
8
|
-
export type ExtractOptions<T, S> = {
|
|
9
|
-
description?: string
|
|
10
|
-
examples?: { value: T; extracted: S; reason: string }[]
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
export function extract<T extends Input, S extends z.AnyZodObject>(
|
|
14
|
-
value: T,
|
|
15
|
-
shape: S,
|
|
16
|
-
options?: ExtractOptions<T, z.infer<S>>
|
|
17
|
-
) {
|
|
18
|
-
const additionalMessage = options?.description
|
|
19
|
-
? `\nIn order to extract the right information, follow these instructions:\n${options.description}\n`
|
|
20
|
-
: ''
|
|
21
|
-
const promise = predictJson({
|
|
22
|
-
systemMessage:
|
|
23
|
-
'From the given input, extract the required information into the requested format.' + additionalMessage.trim(),
|
|
24
|
-
examples: options?.examples?.map(({ value, reason, extracted }) => ({
|
|
25
|
-
input: value,
|
|
26
|
-
output: { reason, result: extracted }
|
|
27
|
-
})),
|
|
28
|
-
outputSchema: shape,
|
|
29
|
-
model: Context.evaluatorModel,
|
|
30
|
-
input: value
|
|
31
|
-
})
|
|
32
|
-
|
|
33
|
-
return {
|
|
34
|
-
...toAssertion(promise),
|
|
35
|
-
toBe: (expected: z.infer<S>) => asyncExpect(promise, (expect) => expect.toEqual(expected)),
|
|
36
|
-
toMatchObject: (expected: Partial<z.infer<S>>) => asyncExpect(promise, (expect) => expect.toMatchObject(expected)),
|
|
37
|
-
toMatchInlineSnapshot: makeToMatchInlineSnapshot(promise)
|
|
38
|
-
}
|
|
39
|
-
}
|
package/src/assertions/filter.ts
DELETED
|
@@ -1,86 +0,0 @@
|
|
|
1
|
-
import { literal, z } from '@botpress/sdk'
|
|
2
|
-
|
|
3
|
-
import { Context } from '../context'
|
|
4
|
-
import { asyncExpect } from '../utils/asyncAssertion'
|
|
5
|
-
import { predictJson } from '../utils/predictJson'
|
|
6
|
-
import { makeToMatchInlineSnapshot, toAssertion } from './extension'
|
|
7
|
-
|
|
8
|
-
export type FilterOptions<T> = {
|
|
9
|
-
examples?: { value: T; reason: string; keep: boolean }[]
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
export function filter<U>(values: U[], condition: string, options?: FilterOptions<U>) {
|
|
13
|
-
const mappedValues = values.map((_, idx) =>
|
|
14
|
-
z.object({
|
|
15
|
-
index: literal(idx),
|
|
16
|
-
reason: z.string(),
|
|
17
|
-
keep: z.boolean()
|
|
18
|
-
})
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
const input = values.map((value, idx) => ({
|
|
22
|
-
index: idx,
|
|
23
|
-
value
|
|
24
|
-
}))
|
|
25
|
-
|
|
26
|
-
const schema = z
|
|
27
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
28
|
-
.tuple(mappedValues as any)
|
|
29
|
-
.describe(
|
|
30
|
-
'An array of the objects with the index and a boolean value indicating if the object should be kept or not'
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
const promise = predictJson({
|
|
34
|
-
systemMessage: `
|
|
35
|
-
Based on the following qualification criteria, you need to filter the given list of objects.
|
|
36
|
-
Citeria: ${condition}
|
|
37
|
-
|
|
38
|
-
---
|
|
39
|
-
You need to return an array of objects with the index and a boolean value indicating if the object should be kept or not.
|
|
40
|
-
`.trim(),
|
|
41
|
-
examples: options?.examples
|
|
42
|
-
? [
|
|
43
|
-
{
|
|
44
|
-
input: options?.examples?.map((v, index) => ({
|
|
45
|
-
index,
|
|
46
|
-
value: v.value
|
|
47
|
-
})),
|
|
48
|
-
output: {
|
|
49
|
-
reason: 'Here are some examples',
|
|
50
|
-
result: options?.examples?.map((v, idx) => ({
|
|
51
|
-
index: idx,
|
|
52
|
-
reason: v.reason,
|
|
53
|
-
keep: v.keep
|
|
54
|
-
}))
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
]
|
|
58
|
-
: undefined,
|
|
59
|
-
outputSchema: schema,
|
|
60
|
-
model: Context.evaluatorModel,
|
|
61
|
-
input
|
|
62
|
-
}).then((x) => {
|
|
63
|
-
const results = schema.parse(x.result) as { index: number; keep: boolean }[]
|
|
64
|
-
return {
|
|
65
|
-
result: values.filter((_, idx) => results.find((r) => r.index === idx)?.keep),
|
|
66
|
-
reason: x.reason
|
|
67
|
-
}
|
|
68
|
-
})
|
|
69
|
-
|
|
70
|
-
return {
|
|
71
|
-
...toAssertion(promise),
|
|
72
|
-
toBe: (expected: U[]) => asyncExpect(promise, (expect) => expect.toEqual(expected)),
|
|
73
|
-
toMatchInlineSnapshot: makeToMatchInlineSnapshot(promise),
|
|
74
|
-
toHaveNoneFiltered: () => asyncExpect(promise, (expect) => expect.toEqual(values)),
|
|
75
|
-
toHaveSomeFiltered: () => asyncExpect(promise, (expect) => expect.not.toEqual(values)),
|
|
76
|
-
toBeEmpty: () => asyncExpect(promise, (expect) => expect.toHaveLength(0)),
|
|
77
|
-
length: {
|
|
78
|
-
toBe: (expected: number) => asyncExpect(promise, (expect) => expect.toHaveLength(expected)),
|
|
79
|
-
toBeGreaterThanOrEqual: (expected: number) =>
|
|
80
|
-
asyncExpect(promise, (expect) => expect.length.greaterThanOrEqual(expected)),
|
|
81
|
-
toBeLessThanOrEqual: (expected: number) =>
|
|
82
|
-
asyncExpect(promise, (expect) => expect.length.lessThanOrEqual(expected)),
|
|
83
|
-
toBeBetween: (min: number, max: number) => asyncExpect(promise, (expect) => expect.length.within(min, max))
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
}
|
package/src/assertions/rate.ts
DELETED
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
import { z } from '@botpress/sdk'
|
|
2
|
-
|
|
3
|
-
import { Context } from '../context'
|
|
4
|
-
import { asyncExpect } from '../utils/asyncAssertion'
|
|
5
|
-
import { Input, predictJson } from '../utils/predictJson'
|
|
6
|
-
import { makeToMatchInlineSnapshot, toAssertion } from './extension'
|
|
7
|
-
|
|
8
|
-
export type RatingScore = 1 | 2 | 3 | 4 | 5
|
|
9
|
-
export type RateOptions<T> = {
|
|
10
|
-
examples?: { value: T; rating: number; reason: string }[]
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
export function rate<T extends Input>(value: T, condition: string, options?: RateOptions<T>) {
|
|
14
|
-
const schema = z.number().min(1).max(5).describe('Rating score, higher is better (1 is the worst, 5 is the best)')
|
|
15
|
-
const promise = predictJson({
|
|
16
|
-
systemMessage: `Based on the following qualification criteria, you need to rate the given situation from a score of 1 to 5.\nScoring: 1 is the worst score, 5 is the best score possible.\nCriteria: ${condition}`,
|
|
17
|
-
examples: options?.examples?.map(({ value, reason, rating }) => ({
|
|
18
|
-
input: value,
|
|
19
|
-
output: { reason, result: rating }
|
|
20
|
-
})),
|
|
21
|
-
outputSchema: schema,
|
|
22
|
-
model: Context.evaluatorModel,
|
|
23
|
-
input: value
|
|
24
|
-
}).then((x) => {
|
|
25
|
-
return {
|
|
26
|
-
result: typeof x.result === 'number' ? x.result : parseInt(x.result, 10),
|
|
27
|
-
reason: x.reason
|
|
28
|
-
}
|
|
29
|
-
})
|
|
30
|
-
|
|
31
|
-
return {
|
|
32
|
-
...toAssertion(promise),
|
|
33
|
-
toBe: (expected: number) => asyncExpect(promise, (expect) => expect.toEqual(expected)),
|
|
34
|
-
toMatchInlineSnapshot: makeToMatchInlineSnapshot(promise),
|
|
35
|
-
toBeGreaterThanOrEqual: (expected: RatingScore) =>
|
|
36
|
-
asyncExpect(promise, (expect) => expect.toBeGreaterThanOrEqual(expected)),
|
|
37
|
-
toBeLessThanOrEqual: (expected: RatingScore) =>
|
|
38
|
-
asyncExpect(promise, (expect) => expect.toBeLessThanOrEqual(expected))
|
|
39
|
-
}
|
|
40
|
-
}
|
package/src/context.ts
DELETED
|
@@ -1,65 +0,0 @@
|
|
|
1
|
-
import type { Client } from '@botpress/client'
|
|
2
|
-
import { onTestFinished } from 'vitest'
|
|
3
|
-
import { getCurrentTest } from 'vitest/suite'
|
|
4
|
-
import { Models } from './models'
|
|
5
|
-
|
|
6
|
-
export type EvaluatorModel = (typeof Models)[number]['id']
|
|
7
|
-
|
|
8
|
-
export type TestMetadata = {
|
|
9
|
-
isVaiTest: boolean
|
|
10
|
-
scenario?: string
|
|
11
|
-
evaluatorModel?: EvaluatorModel
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
const getTestMetadata = (): TestMetadata => {
|
|
15
|
-
const test = getCurrentTest()
|
|
16
|
-
return (test?.meta ?? {
|
|
17
|
-
isVaiTest: false
|
|
18
|
-
}) as TestMetadata
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
class VaiContext {
|
|
22
|
-
#client: Client | null = null
|
|
23
|
-
#wrapError = false
|
|
24
|
-
|
|
25
|
-
get wrapError() {
|
|
26
|
-
return this.#wrapError
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
get client() {
|
|
30
|
-
if (!this.#client) {
|
|
31
|
-
throw new Error('Botpress client is not set')
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
return this.#client
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
get evaluatorModel(): EvaluatorModel {
|
|
38
|
-
return getTestMetadata().evaluatorModel ?? 'openai__gpt-4o-mini-2024-07-18'
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
get scenario() {
|
|
42
|
-
return getTestMetadata().scenario
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
get isVaiTest() {
|
|
46
|
-
return getTestMetadata().isVaiTest
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
setClient(cognitive: Client) {
|
|
50
|
-
this.#client = cognitive
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
swallowErrors() {
|
|
54
|
-
if (!getCurrentTest()) {
|
|
55
|
-
throw new Error('cancelBail is a Vitest hook and must be called within a test')
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
this.#wrapError = true
|
|
59
|
-
onTestFinished(() => {
|
|
60
|
-
this.#wrapError = false
|
|
61
|
-
})
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
export const Context = new VaiContext()
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import { getCurrentTest } from 'vitest/suite'
|
|
2
|
-
import { EvaluatorModel, TestMetadata } from '../context'
|
|
3
|
-
|
|
4
|
-
export const setEvaluator = (model: EvaluatorModel) => {
|
|
5
|
-
const test = getCurrentTest()
|
|
6
|
-
|
|
7
|
-
if (!test) {
|
|
8
|
-
throw new Error('setEvaluator is a Vitest hook and must be called within a test')
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
const meta = test.meta as TestMetadata
|
|
12
|
-
meta.evaluatorModel = model
|
|
13
|
-
}
|
package/src/hooks/setupClient.ts
DELETED
package/src/index.ts
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
export { compare } from './task/compare'
|
|
2
|
-
|
|
3
|
-
export { check } from './assertions/check'
|
|
4
|
-
export { extract } from './assertions/extract'
|
|
5
|
-
export { filter } from './assertions/filter'
|
|
6
|
-
export { rate } from './assertions/rate'
|
|
7
|
-
|
|
8
|
-
export { setEvaluator } from './hooks/setEvaluator'
|
|
9
|
-
export { setupClient } from './hooks/setupClient'
|
package/src/models.ts
DELETED
|
@@ -1,394 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
// This file is generated. Do not edit it manually.
|
|
3
|
-
// See 'scripts/update-models.ts'
|
|
4
|
-
|
|
5
|
-
/* eslint-disable */
|
|
6
|
-
/* tslint:disable */
|
|
7
|
-
|
|
8
|
-
export const Models = [
|
|
9
|
-
{
|
|
10
|
-
"id": "anthropic__claude-3-haiku-20240307",
|
|
11
|
-
"name": "Claude 3 Haiku",
|
|
12
|
-
"integration": "anthropic",
|
|
13
|
-
"input": {
|
|
14
|
-
"maxTokens": 200000
|
|
15
|
-
},
|
|
16
|
-
"output": {
|
|
17
|
-
"maxTokens": 4096
|
|
18
|
-
}
|
|
19
|
-
},
|
|
20
|
-
{
|
|
21
|
-
"id": "anthropic__claude-3-5-sonnet-20240620",
|
|
22
|
-
"name": "Claude 3.5 Sonnet",
|
|
23
|
-
"integration": "anthropic",
|
|
24
|
-
"input": {
|
|
25
|
-
"maxTokens": 200000
|
|
26
|
-
},
|
|
27
|
-
"output": {
|
|
28
|
-
"maxTokens": 4096
|
|
29
|
-
}
|
|
30
|
-
},
|
|
31
|
-
{
|
|
32
|
-
"id": "cerebras__llama3.1-70b",
|
|
33
|
-
"name": "Llama 3.1 70B",
|
|
34
|
-
"integration": "cerebras",
|
|
35
|
-
"input": {
|
|
36
|
-
"maxTokens": 8192
|
|
37
|
-
},
|
|
38
|
-
"output": {
|
|
39
|
-
"maxTokens": 8192
|
|
40
|
-
}
|
|
41
|
-
},
|
|
42
|
-
{
|
|
43
|
-
"id": "cerebras__llama3.1-8b",
|
|
44
|
-
"name": "Llama 3.1 8B",
|
|
45
|
-
"integration": "cerebras",
|
|
46
|
-
"input": {
|
|
47
|
-
"maxTokens": 8192
|
|
48
|
-
},
|
|
49
|
-
"output": {
|
|
50
|
-
"maxTokens": 8192
|
|
51
|
-
}
|
|
52
|
-
},
|
|
53
|
-
{
|
|
54
|
-
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-instruct",
|
|
55
|
-
"name": "DeepSeek Coder V2 Instruct",
|
|
56
|
-
"integration": "fireworks-ai",
|
|
57
|
-
"input": {
|
|
58
|
-
"maxTokens": 131072
|
|
59
|
-
},
|
|
60
|
-
"output": {
|
|
61
|
-
"maxTokens": 131072
|
|
62
|
-
}
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
|
|
66
|
-
"name": "DeepSeek Coder V2 Lite",
|
|
67
|
-
"integration": "fireworks-ai",
|
|
68
|
-
"input": {
|
|
69
|
-
"maxTokens": 163840
|
|
70
|
-
},
|
|
71
|
-
"output": {
|
|
72
|
-
"maxTokens": 163840
|
|
73
|
-
}
|
|
74
|
-
},
|
|
75
|
-
{
|
|
76
|
-
"id": "fireworks-ai__accounts/fireworks/models/firellava-13b",
|
|
77
|
-
"name": "FireLLaVA-13B",
|
|
78
|
-
"integration": "fireworks-ai",
|
|
79
|
-
"input": {
|
|
80
|
-
"maxTokens": 4096
|
|
81
|
-
},
|
|
82
|
-
"output": {
|
|
83
|
-
"maxTokens": 4096
|
|
84
|
-
}
|
|
85
|
-
},
|
|
86
|
-
{
|
|
87
|
-
"id": "fireworks-ai__accounts/fireworks/models/firefunction-v2",
|
|
88
|
-
"name": "Firefunction V2",
|
|
89
|
-
"integration": "fireworks-ai",
|
|
90
|
-
"input": {
|
|
91
|
-
"maxTokens": 8192
|
|
92
|
-
},
|
|
93
|
-
"output": {
|
|
94
|
-
"maxTokens": 8192
|
|
95
|
-
}
|
|
96
|
-
},
|
|
97
|
-
{
|
|
98
|
-
"id": "fireworks-ai__accounts/fireworks/models/gemma2-9b-it",
|
|
99
|
-
"name": "Gemma 2 9B Instruct",
|
|
100
|
-
"integration": "fireworks-ai",
|
|
101
|
-
"input": {
|
|
102
|
-
"maxTokens": 8192
|
|
103
|
-
},
|
|
104
|
-
"output": {
|
|
105
|
-
"maxTokens": 8192
|
|
106
|
-
}
|
|
107
|
-
},
|
|
108
|
-
{
|
|
109
|
-
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
110
|
-
"name": "Llama 3.1 405B Instruct",
|
|
111
|
-
"integration": "fireworks-ai",
|
|
112
|
-
"input": {
|
|
113
|
-
"maxTokens": 131072
|
|
114
|
-
},
|
|
115
|
-
"output": {
|
|
116
|
-
"maxTokens": 131072
|
|
117
|
-
}
|
|
118
|
-
},
|
|
119
|
-
{
|
|
120
|
-
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
121
|
-
"name": "Llama 3.1 70B Instruct",
|
|
122
|
-
"integration": "fireworks-ai",
|
|
123
|
-
"input": {
|
|
124
|
-
"maxTokens": 131072
|
|
125
|
-
},
|
|
126
|
-
"output": {
|
|
127
|
-
"maxTokens": 131072
|
|
128
|
-
}
|
|
129
|
-
},
|
|
130
|
-
{
|
|
131
|
-
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
132
|
-
"name": "Llama 3.1 8B Instruct",
|
|
133
|
-
"integration": "fireworks-ai",
|
|
134
|
-
"input": {
|
|
135
|
-
"maxTokens": 131072
|
|
136
|
-
},
|
|
137
|
-
"output": {
|
|
138
|
-
"maxTokens": 131072
|
|
139
|
-
}
|
|
140
|
-
},
|
|
141
|
-
{
|
|
142
|
-
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x22b-instruct",
|
|
143
|
-
"name": "Mixtral MoE 8x22B Instruct",
|
|
144
|
-
"integration": "fireworks-ai",
|
|
145
|
-
"input": {
|
|
146
|
-
"maxTokens": 65536
|
|
147
|
-
},
|
|
148
|
-
"output": {
|
|
149
|
-
"maxTokens": 65536
|
|
150
|
-
}
|
|
151
|
-
},
|
|
152
|
-
{
|
|
153
|
-
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x7b-instruct",
|
|
154
|
-
"name": "Mixtral MoE 8x7B Instruct",
|
|
155
|
-
"integration": "fireworks-ai",
|
|
156
|
-
"input": {
|
|
157
|
-
"maxTokens": 32768
|
|
158
|
-
},
|
|
159
|
-
"output": {
|
|
160
|
-
"maxTokens": 32768
|
|
161
|
-
}
|
|
162
|
-
},
|
|
163
|
-
{
|
|
164
|
-
"id": "fireworks-ai__accounts/fireworks/models/mythomax-l2-13b",
|
|
165
|
-
"name": "MythoMax L2 13b",
|
|
166
|
-
"integration": "fireworks-ai",
|
|
167
|
-
"input": {
|
|
168
|
-
"maxTokens": 4096
|
|
169
|
-
},
|
|
170
|
-
"output": {
|
|
171
|
-
"maxTokens": 4096
|
|
172
|
-
}
|
|
173
|
-
},
|
|
174
|
-
{
|
|
175
|
-
"id": "fireworks-ai__accounts/fireworks/models/qwen2-72b-instruct",
|
|
176
|
-
"name": "Qwen2 72b Instruct",
|
|
177
|
-
"integration": "fireworks-ai",
|
|
178
|
-
"input": {
|
|
179
|
-
"maxTokens": 32768
|
|
180
|
-
},
|
|
181
|
-
"output": {
|
|
182
|
-
"maxTokens": 32768
|
|
183
|
-
}
|
|
184
|
-
},
|
|
185
|
-
{
|
|
186
|
-
"id": "groq__gemma2-9b-it",
|
|
187
|
-
"name": "Gemma2 9B",
|
|
188
|
-
"integration": "groq",
|
|
189
|
-
"input": {
|
|
190
|
-
"maxTokens": 8192
|
|
191
|
-
},
|
|
192
|
-
"output": {
|
|
193
|
-
"maxTokens": 8192
|
|
194
|
-
}
|
|
195
|
-
},
|
|
196
|
-
{
|
|
197
|
-
"id": "groq__llama3-70b-8192",
|
|
198
|
-
"name": "LLaMA 3 70B",
|
|
199
|
-
"integration": "groq",
|
|
200
|
-
"input": {
|
|
201
|
-
"maxTokens": 8192
|
|
202
|
-
},
|
|
203
|
-
"output": {
|
|
204
|
-
"maxTokens": 8192
|
|
205
|
-
}
|
|
206
|
-
},
|
|
207
|
-
{
|
|
208
|
-
"id": "groq__llama3-8b-8192",
|
|
209
|
-
"name": "LLaMA 3 8B",
|
|
210
|
-
"integration": "groq",
|
|
211
|
-
"input": {
|
|
212
|
-
"maxTokens": 8192
|
|
213
|
-
},
|
|
214
|
-
"output": {
|
|
215
|
-
"maxTokens": 8192
|
|
216
|
-
}
|
|
217
|
-
},
|
|
218
|
-
{
|
|
219
|
-
"id": "groq__llama-3.1-70b-versatile",
|
|
220
|
-
"name": "LLaMA 3.1 70B",
|
|
221
|
-
"integration": "groq",
|
|
222
|
-
"input": {
|
|
223
|
-
"maxTokens": 128000
|
|
224
|
-
},
|
|
225
|
-
"output": {
|
|
226
|
-
"maxTokens": 8192
|
|
227
|
-
}
|
|
228
|
-
},
|
|
229
|
-
{
|
|
230
|
-
"id": "groq__llama-3.1-8b-instant",
|
|
231
|
-
"name": "LLaMA 3.1 8B",
|
|
232
|
-
"integration": "groq",
|
|
233
|
-
"input": {
|
|
234
|
-
"maxTokens": 128000
|
|
235
|
-
},
|
|
236
|
-
"output": {
|
|
237
|
-
"maxTokens": 8192
|
|
238
|
-
}
|
|
239
|
-
},
|
|
240
|
-
{
|
|
241
|
-
"id": "groq__llama-3.2-11b-vision-preview",
|
|
242
|
-
"name": "LLaMA 3.2 11B Vision",
|
|
243
|
-
"integration": "groq",
|
|
244
|
-
"input": {
|
|
245
|
-
"maxTokens": 128000
|
|
246
|
-
},
|
|
247
|
-
"output": {
|
|
248
|
-
"maxTokens": 8192
|
|
249
|
-
}
|
|
250
|
-
},
|
|
251
|
-
{
|
|
252
|
-
"id": "groq__llama-3.2-1b-preview",
|
|
253
|
-
"name": "LLaMA 3.2 1B",
|
|
254
|
-
"integration": "groq",
|
|
255
|
-
"input": {
|
|
256
|
-
"maxTokens": 128000
|
|
257
|
-
},
|
|
258
|
-
"output": {
|
|
259
|
-
"maxTokens": 8192
|
|
260
|
-
}
|
|
261
|
-
},
|
|
262
|
-
{
|
|
263
|
-
"id": "groq__llama-3.2-3b-preview",
|
|
264
|
-
"name": "LLaMA 3.2 3B",
|
|
265
|
-
"integration": "groq",
|
|
266
|
-
"input": {
|
|
267
|
-
"maxTokens": 128000
|
|
268
|
-
},
|
|
269
|
-
"output": {
|
|
270
|
-
"maxTokens": 8192
|
|
271
|
-
}
|
|
272
|
-
},
|
|
273
|
-
{
|
|
274
|
-
"id": "groq__llama-3.2-90b-vision-preview",
|
|
275
|
-
"name": "LLaMA 3.2 90B Vision",
|
|
276
|
-
"integration": "groq",
|
|
277
|
-
"input": {
|
|
278
|
-
"maxTokens": 128000
|
|
279
|
-
},
|
|
280
|
-
"output": {
|
|
281
|
-
"maxTokens": 8192
|
|
282
|
-
}
|
|
283
|
-
},
|
|
284
|
-
{
|
|
285
|
-
"id": "groq__llama-3.3-70b-versatile",
|
|
286
|
-
"name": "LLaMA 3.3 70B",
|
|
287
|
-
"integration": "groq",
|
|
288
|
-
"input": {
|
|
289
|
-
"maxTokens": 128000
|
|
290
|
-
},
|
|
291
|
-
"output": {
|
|
292
|
-
"maxTokens": 32768
|
|
293
|
-
}
|
|
294
|
-
},
|
|
295
|
-
{
|
|
296
|
-
"id": "groq__mixtral-8x7b-32768",
|
|
297
|
-
"name": "Mixtral 8x7B",
|
|
298
|
-
"integration": "groq",
|
|
299
|
-
"input": {
|
|
300
|
-
"maxTokens": 32768
|
|
301
|
-
},
|
|
302
|
-
"output": {
|
|
303
|
-
"maxTokens": 32768
|
|
304
|
-
}
|
|
305
|
-
},
|
|
306
|
-
{
|
|
307
|
-
"id": "openai__o1-2024-12-17",
|
|
308
|
-
"name": "GPT o1",
|
|
309
|
-
"integration": "openai",
|
|
310
|
-
"input": {
|
|
311
|
-
"maxTokens": 200000
|
|
312
|
-
},
|
|
313
|
-
"output": {
|
|
314
|
-
"maxTokens": 100000
|
|
315
|
-
}
|
|
316
|
-
},
|
|
317
|
-
{
|
|
318
|
-
"id": "openai__o1-mini-2024-09-12",
|
|
319
|
-
"name": "GPT o1-mini",
|
|
320
|
-
"integration": "openai",
|
|
321
|
-
"input": {
|
|
322
|
-
"maxTokens": 128000
|
|
323
|
-
},
|
|
324
|
-
"output": {
|
|
325
|
-
"maxTokens": 65536
|
|
326
|
-
}
|
|
327
|
-
},
|
|
328
|
-
{
|
|
329
|
-
"id": "openai__gpt-3.5-turbo-0125",
|
|
330
|
-
"name": "GPT-3.5 Turbo",
|
|
331
|
-
"integration": "openai",
|
|
332
|
-
"input": {
|
|
333
|
-
"maxTokens": 128000
|
|
334
|
-
},
|
|
335
|
-
"output": {
|
|
336
|
-
"maxTokens": 4096
|
|
337
|
-
}
|
|
338
|
-
},
|
|
339
|
-
{
|
|
340
|
-
"id": "openai__gpt-4-turbo-2024-04-09",
|
|
341
|
-
"name": "GPT-4 Turbo",
|
|
342
|
-
"integration": "openai",
|
|
343
|
-
"input": {
|
|
344
|
-
"maxTokens": 128000
|
|
345
|
-
},
|
|
346
|
-
"output": {
|
|
347
|
-
"maxTokens": 4096
|
|
348
|
-
}
|
|
349
|
-
},
|
|
350
|
-
{
|
|
351
|
-
"id": "openai__gpt-4o-2024-08-06",
|
|
352
|
-
"name": "GPT-4o (August 2024)",
|
|
353
|
-
"integration": "openai",
|
|
354
|
-
"input": {
|
|
355
|
-
"maxTokens": 128000
|
|
356
|
-
},
|
|
357
|
-
"output": {
|
|
358
|
-
"maxTokens": 16384
|
|
359
|
-
}
|
|
360
|
-
},
|
|
361
|
-
{
|
|
362
|
-
"id": "openai__gpt-4o-2024-05-13",
|
|
363
|
-
"name": "GPT-4o (May 2024)",
|
|
364
|
-
"integration": "openai",
|
|
365
|
-
"input": {
|
|
366
|
-
"maxTokens": 128000
|
|
367
|
-
},
|
|
368
|
-
"output": {
|
|
369
|
-
"maxTokens": 4096
|
|
370
|
-
}
|
|
371
|
-
},
|
|
372
|
-
{
|
|
373
|
-
"id": "openai__gpt-4o-2024-11-20",
|
|
374
|
-
"name": "GPT-4o (November 2024)",
|
|
375
|
-
"integration": "openai",
|
|
376
|
-
"input": {
|
|
377
|
-
"maxTokens": 128000
|
|
378
|
-
},
|
|
379
|
-
"output": {
|
|
380
|
-
"maxTokens": 16384
|
|
381
|
-
}
|
|
382
|
-
},
|
|
383
|
-
{
|
|
384
|
-
"id": "openai__gpt-4o-mini-2024-07-18",
|
|
385
|
-
"name": "GPT-4o Mini",
|
|
386
|
-
"integration": "openai",
|
|
387
|
-
"input": {
|
|
388
|
-
"maxTokens": 128000
|
|
389
|
-
},
|
|
390
|
-
"output": {
|
|
391
|
-
"maxTokens": 16384
|
|
392
|
-
}
|
|
393
|
-
}
|
|
394
|
-
] as const
|
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
import { Client } from '@botpress/client'
|
|
2
|
-
|
|
3
|
-
import _ from 'lodash'
|
|
4
|
-
import fs from 'node:fs'
|
|
5
|
-
|
|
6
|
-
const LLM_LIST_MODELS = 'listLanguageModels'
|
|
7
|
-
|
|
8
|
-
const client = new Client({
|
|
9
|
-
apiUrl: process.env.CLOUD_API_ENDPOINT,
|
|
10
|
-
botId: process.env.CLOUD_BOT_ID,
|
|
11
|
-
token: process.env.CLOUD_PAT
|
|
12
|
-
})
|
|
13
|
-
|
|
14
|
-
const { bot } = await client.getBot({
|
|
15
|
-
id: process.env.CLOUD_BOT_ID!
|
|
16
|
-
})
|
|
17
|
-
|
|
18
|
-
type Model = {
|
|
19
|
-
id: string
|
|
20
|
-
name: string
|
|
21
|
-
integration: string
|
|
22
|
-
input: { maxTokens: number }
|
|
23
|
-
output: { maxTokens: number }
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
const models: Model[] = []
|
|
27
|
-
|
|
28
|
-
for (const integrationId in bot.integrations) {
|
|
29
|
-
const botIntegration = bot.integrations[integrationId]
|
|
30
|
-
if (botIntegration?.public && botIntegration?.enabled && botIntegration?.status === 'registered') {
|
|
31
|
-
try {
|
|
32
|
-
const { integration } = await client.getPublicIntegrationById({
|
|
33
|
-
id: botIntegration.id
|
|
34
|
-
})
|
|
35
|
-
|
|
36
|
-
const canListModels = Object.keys(integration.actions).includes(LLM_LIST_MODELS)
|
|
37
|
-
if (!canListModels) {
|
|
38
|
-
continue
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
const { output } = await client.callAction({
|
|
42
|
-
type: `${integration.name}:${LLM_LIST_MODELS}`,
|
|
43
|
-
input: {}
|
|
44
|
-
})
|
|
45
|
-
|
|
46
|
-
if (_.isArray(output?.models)) {
|
|
47
|
-
for (const model of output.models) {
|
|
48
|
-
models.push({
|
|
49
|
-
id: `${integration.name}__${model.id}`,
|
|
50
|
-
name: model.name,
|
|
51
|
-
integration: integration.name,
|
|
52
|
-
input: { maxTokens: model.input.maxTokens },
|
|
53
|
-
output: { maxTokens: model.output.maxTokens }
|
|
54
|
-
})
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
} catch (err: unknown) {
|
|
58
|
-
console.error('Error fetching integration:', err instanceof Error ? err.message : `${err}`)
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
const content = JSON.stringify(_.orderBy(models, ['integration', 'name']), null, 2)
|
|
64
|
-
|
|
65
|
-
fs.writeFileSync(
|
|
66
|
-
'./src/models.ts',
|
|
67
|
-
`
|
|
68
|
-
// This file is generated. Do not edit it manually.
|
|
69
|
-
// See 'scripts/update-models.ts'
|
|
70
|
-
|
|
71
|
-
/* eslint-disable */
|
|
72
|
-
/* tslint:disable */
|
|
73
|
-
|
|
74
|
-
export const Models = ${content} as const`,
|
|
75
|
-
'utf-8'
|
|
76
|
-
)
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
import { Client } from '@botpress/client'
|
|
2
|
-
import sdk from '@botpress/sdk'
|
|
3
|
-
|
|
4
|
-
import _ from 'lodash'
|
|
5
|
-
import fs from 'node:fs'
|
|
6
|
-
import path from 'node:path'
|
|
7
|
-
|
|
8
|
-
const { z } = sdk
|
|
9
|
-
|
|
10
|
-
const Interfaces = ['llm'] as const
|
|
11
|
-
|
|
12
|
-
const client = new Client({
|
|
13
|
-
apiUrl: process.env.CLOUD_API_ENDPOINT,
|
|
14
|
-
botId: process.env.CLOUD_BOT_ID,
|
|
15
|
-
token: process.env.CLOUD_PAT
|
|
16
|
-
})
|
|
17
|
-
|
|
18
|
-
for (const name of Interfaces) {
|
|
19
|
-
const { interfaces } = await client.listInterfaces({
|
|
20
|
-
name
|
|
21
|
-
})
|
|
22
|
-
|
|
23
|
-
const { interface: latest } = await client.getInterface({
|
|
24
|
-
id: _.maxBy(interfaces, 'version')!.id
|
|
25
|
-
})
|
|
26
|
-
|
|
27
|
-
for (const action of Object.keys(latest.actions)) {
|
|
28
|
-
const references = Object.keys(latest.entities).reduce((acc, key) => {
|
|
29
|
-
return { ...acc, [key]: z.fromJsonSchema(latest.entities?.[key]?.schema!) }
|
|
30
|
-
}, {})
|
|
31
|
-
const input = latest.actions[action]?.input.schema!
|
|
32
|
-
const output = latest.actions[action]?.output.schema!
|
|
33
|
-
|
|
34
|
-
const types = `
|
|
35
|
-
// This file is generated. Do not edit it manually.
|
|
36
|
-
// See 'scripts/update-models.ts'
|
|
37
|
-
|
|
38
|
-
/* eslint-disable */
|
|
39
|
-
/* tslint:disable */
|
|
40
|
-
|
|
41
|
-
export namespace ${name} {
|
|
42
|
-
export namespace ${action} {
|
|
43
|
-
export ${sdk.z
|
|
44
|
-
.fromJsonSchema(input)
|
|
45
|
-
.title('Input')
|
|
46
|
-
.dereference(references)
|
|
47
|
-
.toTypescript({ declaration: 'type' })};
|
|
48
|
-
export ${sdk.z
|
|
49
|
-
.fromJsonSchema(output)
|
|
50
|
-
.title('Output')
|
|
51
|
-
.dereference(references)
|
|
52
|
-
.toTypescript({ declaration: 'type' })};
|
|
53
|
-
}
|
|
54
|
-
}`
|
|
55
|
-
|
|
56
|
-
fs.mkdirSync(path.resolve(`./src/sdk-interfaces/${name}`), { recursive: true })
|
|
57
|
-
fs.writeFileSync(path.resolve(`./src/sdk-interfaces/${name}/${action}.ts`), types)
|
|
58
|
-
}
|
|
59
|
-
}
|
|
@@ -1,127 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
// This file is generated. Do not edit it manually.
|
|
3
|
-
// See 'scripts/update-models.ts'
|
|
4
|
-
|
|
5
|
-
/* eslint-disable */
|
|
6
|
-
/* tslint:disable */
|
|
7
|
-
|
|
8
|
-
export namespace llm {
|
|
9
|
-
export namespace generateContent {
|
|
10
|
-
export type Input = {
|
|
11
|
-
/** Model to use for content generation */
|
|
12
|
-
model?: { id: string }
|
|
13
|
-
;
|
|
14
|
-
/** Optional system prompt to guide the model */
|
|
15
|
-
systemPrompt?: string
|
|
16
|
-
;
|
|
17
|
-
/** Array of messages for the model to process */
|
|
18
|
-
messages: Array<{ role: 'user' | 'assistant'; type?: 'text' | 'tool_calls' | 'tool_result' | 'multipart';
|
|
19
|
-
/** Required if `type` is "tool_calls" */
|
|
20
|
-
toolCalls?: Array<{ id: string; type: 'function'; function: { name: string;
|
|
21
|
-
/** Some LLMs may generate invalid JSON for a tool call, so this will be `null` when it happens. */
|
|
22
|
-
arguments:
|
|
23
|
-
{ [key: string]: any } | null
|
|
24
|
-
} }>
|
|
25
|
-
;
|
|
26
|
-
/** Required if `type` is "tool_result" */
|
|
27
|
-
toolResultCallId?: string
|
|
28
|
-
;
|
|
29
|
-
/** Required unless `type` is "tool_call". If `type` is "multipart", this field must be an array of content objects. If `type` is "tool_result" then this field should be the result of the tool call (a plain string or a JSON-encoded array or object). If `type` is "tool_call" then the `toolCalls` field should be used instead. */
|
|
30
|
-
content:
|
|
31
|
-
|
|
32
|
-
string | Array<{ type: 'text' | 'image';
|
|
33
|
-
/** Indicates the MIME type of the content. If not provided it will be detected from the content-type header of the provided URL. */
|
|
34
|
-
mimeType?: string
|
|
35
|
-
;
|
|
36
|
-
/** Required if part type is "text" */
|
|
37
|
-
text?: string
|
|
38
|
-
;
|
|
39
|
-
/** Required if part type is "image" */
|
|
40
|
-
url?: string
|
|
41
|
-
}> | null
|
|
42
|
-
}>
|
|
43
|
-
;
|
|
44
|
-
/** Response format expected from the model. If "json_object" is chosen, you must instruct the model to generate JSON either via the system prompt or a user message. */
|
|
45
|
-
responseFormat?: 'text' | 'json_object'
|
|
46
|
-
;
|
|
47
|
-
/** Maximum number of tokens allowed in the generated response */
|
|
48
|
-
maxTokens?: number
|
|
49
|
-
;
|
|
50
|
-
/** Sampling temperature for the model. Higher values result in more random outputs. */
|
|
51
|
-
temperature?: /** Sampling temperature for the model. Higher values result in more random outputs. */ number
|
|
52
|
-
;
|
|
53
|
-
/** Top-p sampling parameter. Limits sampling to the smallest set of tokens with a cumulative probability above the threshold. */
|
|
54
|
-
topP?: /** Top-p sampling parameter. Limits sampling to the smallest set of tokens with a cumulative probability above the threshold. */ number
|
|
55
|
-
;
|
|
56
|
-
/** Sequences where the model should stop generating further tokens. */
|
|
57
|
-
stopSequences?: string[]
|
|
58
|
-
; tools?: Array<{ type: 'function'; function: {
|
|
59
|
-
/** Function name */
|
|
60
|
-
name: string
|
|
61
|
-
; description?: string;
|
|
62
|
-
/** JSON schema of the function arguments */
|
|
63
|
-
argumentsSchema?: { }
|
|
64
|
-
} }>; toolChoice?: { type?: 'auto' | 'specific' | 'any' | 'none' | '';
|
|
65
|
-
/** Required if `type` is "specific" */
|
|
66
|
-
functionName?: string
|
|
67
|
-
}; userId?: string;
|
|
68
|
-
/** Set to `true` to output debug information to the bot logs */
|
|
69
|
-
debug?: boolean
|
|
70
|
-
; meta?: {
|
|
71
|
-
/** Source of the prompt, e.g. agent/:id/:version cards/ai-generate, cards/ai-task, nodes/autonomous, etc. */
|
|
72
|
-
promptSource?: string
|
|
73
|
-
; promptCategory?: string;
|
|
74
|
-
/** Name of the integration that originally received the message that initiated this action */
|
|
75
|
-
integrationName?: string
|
|
76
|
-
} };;
|
|
77
|
-
export type Output = {
|
|
78
|
-
/** Response ID from LLM provider */
|
|
79
|
-
id: string
|
|
80
|
-
;
|
|
81
|
-
/** LLM provider name */
|
|
82
|
-
provider: string
|
|
83
|
-
;
|
|
84
|
-
/** Model name */
|
|
85
|
-
model: string
|
|
86
|
-
; choices: Array<{ type?: 'text' | 'tool_calls' | 'tool_result' | 'multipart';
|
|
87
|
-
/** Required if `type` is "tool_calls" */
|
|
88
|
-
toolCalls?: Array<{ id: string; type: 'function'; function: { name: string;
|
|
89
|
-
/** Some LLMs may generate invalid JSON for a tool call, so this will be `null` when it happens. */
|
|
90
|
-
arguments:
|
|
91
|
-
{ [key: string]: any } | null
|
|
92
|
-
} }>
|
|
93
|
-
;
|
|
94
|
-
/** Required if `type` is "tool_result" */
|
|
95
|
-
toolResultCallId?: string
|
|
96
|
-
;
|
|
97
|
-
/** Required unless `type` is "tool_call". If `type` is "multipart", this field must be an array of content objects. If `type` is "tool_result" then this field should be the result of the tool call (a plain string or a JSON-encoded array or object). If `type` is "tool_call" then the `toolCalls` field should be used instead. */
|
|
98
|
-
content:
|
|
99
|
-
|
|
100
|
-
string | Array<{ type: 'text' | 'image';
|
|
101
|
-
/** Indicates the MIME type of the content. If not provided it will be detected from the content-type header of the provided URL. */
|
|
102
|
-
mimeType?: string
|
|
103
|
-
;
|
|
104
|
-
/** Required if part type is "text" */
|
|
105
|
-
text?: string
|
|
106
|
-
;
|
|
107
|
-
/** Required if part type is "image" */
|
|
108
|
-
url?: string
|
|
109
|
-
}> | null
|
|
110
|
-
; role: 'assistant'; index: number; stopReason: 'stop' | 'max_tokens' | 'tool_calls' | 'content_filter' | 'other' }>; usage: {
|
|
111
|
-
/** Number of input tokens used by the model */
|
|
112
|
-
inputTokens: number
|
|
113
|
-
;
|
|
114
|
-
/** Cost of the input tokens received by the model, in U.S. dollars */
|
|
115
|
-
inputCost: number
|
|
116
|
-
;
|
|
117
|
-
/** Number of output tokens used by the model */
|
|
118
|
-
outputTokens: number
|
|
119
|
-
;
|
|
120
|
-
/** Cost of the output tokens generated by the model, in U.S. dollars */
|
|
121
|
-
outputCost: number
|
|
122
|
-
}; botpress: {
|
|
123
|
-
/** Total cost of the content generation, in U.S. dollars */
|
|
124
|
-
cost: number
|
|
125
|
-
} };;
|
|
126
|
-
}
|
|
127
|
-
}
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
// This file is generated. Do not edit it manually.
|
|
3
|
-
// See 'scripts/update-models.ts'
|
|
4
|
-
|
|
5
|
-
/* eslint-disable */
|
|
6
|
-
/* tslint:disable */
|
|
7
|
-
|
|
8
|
-
export namespace llm {
|
|
9
|
-
export namespace listLanguageModels {
|
|
10
|
-
export type Input = { };;
|
|
11
|
-
export type Output = { models: Array<{ id: string; name: string; description: string; tags: Array<'recommended' | 'deprecated' | 'general-purpose' | 'low-cost' | 'vision' | 'coding' | 'agents' | 'function-calling' | 'roleplay' | 'storytelling' | 'reasoning'>; input: { maxTokens: number;
|
|
12
|
-
/** Cost per 1 million tokens, in U.S. dollars */
|
|
13
|
-
costPer1MTokens: number
|
|
14
|
-
}; output: { maxTokens: number;
|
|
15
|
-
/** Cost per 1 million tokens, in U.S. dollars */
|
|
16
|
-
costPer1MTokens: number
|
|
17
|
-
} } & { id: string }> };;
|
|
18
|
-
}
|
|
19
|
-
}
|
package/src/task/compare.ts
DELETED
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
import { z } from '@botpress/sdk'
|
|
2
|
-
import { TestFunction } from 'vitest'
|
|
3
|
-
import { createTaskCollector, getCurrentSuite } from 'vitest/suite'
|
|
4
|
-
import { TestMetadata } from '../context'
|
|
5
|
-
import { Deferred } from '../utils/deferred'
|
|
6
|
-
|
|
7
|
-
const scenarioId = z
|
|
8
|
-
.string()
|
|
9
|
-
.trim()
|
|
10
|
-
.min(1, 'Scenario ID/name must not be empty')
|
|
11
|
-
.max(50, 'Scenario ID/name is too long')
|
|
12
|
-
|
|
13
|
-
export type ScenarioLike = z.infer<typeof ScenarioLike>
|
|
14
|
-
const ScenarioLike = z.union([
|
|
15
|
-
scenarioId,
|
|
16
|
-
z.object({ name: scenarioId }).passthrough(),
|
|
17
|
-
z.object({ id: scenarioId }).passthrough()
|
|
18
|
-
])
|
|
19
|
-
|
|
20
|
-
const getScenarioName = (scenario: ScenarioLike) =>
|
|
21
|
-
(typeof scenario === 'string' ? scenario : 'name' in scenario ? scenario?.name : scenario?.id) as string
|
|
22
|
-
|
|
23
|
-
const scenarioArgs = z
|
|
24
|
-
.array(ScenarioLike)
|
|
25
|
-
.min(2, 'You need at least two scenarios to compare')
|
|
26
|
-
.max(10, 'You can only compare up to 10 scenarios')
|
|
27
|
-
.refine((scenarios) => {
|
|
28
|
-
const set = new Set<string>()
|
|
29
|
-
scenarios.forEach((scenario) => set.add(getScenarioName(scenario)))
|
|
30
|
-
return set.size === scenarios.length
|
|
31
|
-
}, 'Scenarios names must be unique')
|
|
32
|
-
|
|
33
|
-
export function compare<T extends ReadonlyArray<ScenarioLike>>(
|
|
34
|
-
name: string | Function,
|
|
35
|
-
scenarios: T,
|
|
36
|
-
fn?: TestFunction<{
|
|
37
|
-
scenario: T[number]
|
|
38
|
-
}>
|
|
39
|
-
) {
|
|
40
|
-
scenarios = scenarioArgs.parse(scenarios) as unknown as T
|
|
41
|
-
|
|
42
|
-
return createTaskCollector((_name, fn, timeout) => {
|
|
43
|
-
const currentSuite = getCurrentSuite()
|
|
44
|
-
|
|
45
|
-
let completedCount = 0
|
|
46
|
-
const finished = new Deferred<void>()
|
|
47
|
-
|
|
48
|
-
for (const scenario of scenarios) {
|
|
49
|
-
const key = getScenarioName(scenario)
|
|
50
|
-
|
|
51
|
-
currentSuite.task(key, {
|
|
52
|
-
meta: {
|
|
53
|
-
scenario: key,
|
|
54
|
-
isVaiTest: true
|
|
55
|
-
} satisfies TestMetadata,
|
|
56
|
-
handler: async (context) => {
|
|
57
|
-
const extendedContext = Object.freeze({
|
|
58
|
-
scenario
|
|
59
|
-
})
|
|
60
|
-
context.onTestFinished(() => {
|
|
61
|
-
if (++completedCount === scenarios.length) {
|
|
62
|
-
finished.resolve()
|
|
63
|
-
}
|
|
64
|
-
})
|
|
65
|
-
|
|
66
|
-
await fn({ ...context, ...extendedContext })
|
|
67
|
-
},
|
|
68
|
-
timeout: timeout ?? 10_000
|
|
69
|
-
})
|
|
70
|
-
}
|
|
71
|
-
})(name, fn)
|
|
72
|
-
}
|
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
import { Assertion, expect } from 'vitest'
|
|
2
|
-
import { getCurrentTest } from 'vitest/suite'
|
|
3
|
-
import { Context } from '../context'
|
|
4
|
-
import { Output } from './predictJson'
|
|
5
|
-
|
|
6
|
-
export class AsyncExpectError<T> extends Error {
|
|
7
|
-
constructor(message: string, public readonly output: Output<T>) {
|
|
8
|
-
super(message)
|
|
9
|
-
this.name = 'AsyncExpectError'
|
|
10
|
-
}
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
const getErrorMessages = (e: unknown): string => {
|
|
14
|
-
if (e instanceof Error) {
|
|
15
|
-
return e.message
|
|
16
|
-
} else if (typeof e === 'string') {
|
|
17
|
-
return e
|
|
18
|
-
} else if (typeof e === 'object' && e !== null) {
|
|
19
|
-
return JSON.stringify(e)
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
return `Unknown error: ${e}`
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
export const asyncExpect = <T>(output: Promise<Output<T>>, assertion: (assert: Assertion<T>) => void) => {
|
|
26
|
-
const promise = output.then((x) => {
|
|
27
|
-
try {
|
|
28
|
-
assertion(expect(x.result, x.reason))
|
|
29
|
-
} catch (e: unknown) {
|
|
30
|
-
if (Context.wrapError) {
|
|
31
|
-
return new AsyncExpectError<T>(getErrorMessages(e), x)
|
|
32
|
-
}
|
|
33
|
-
throw e
|
|
34
|
-
}
|
|
35
|
-
return x
|
|
36
|
-
})
|
|
37
|
-
getCurrentTest()!.promises ??= []
|
|
38
|
-
getCurrentTest()!.promises!.push(promise)
|
|
39
|
-
return promise
|
|
40
|
-
}
|
package/src/utils/deferred.ts
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
export class Deferred<T> {
|
|
2
|
-
promise: Promise<T>
|
|
3
|
-
private _resolve!: (value: T | PromiseLike<T>) => void
|
|
4
|
-
private _reject!: (reason?: unknown) => void
|
|
5
|
-
|
|
6
|
-
constructor() {
|
|
7
|
-
this.promise = new Promise<T>((resolve, reject) => {
|
|
8
|
-
this._resolve = resolve
|
|
9
|
-
this._reject = reject
|
|
10
|
-
})
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
resolve(value: T | PromiseLike<T>): void {
|
|
14
|
-
this._resolve(value)
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
reject(reason?: unknown): void {
|
|
18
|
-
this._reject(reason)
|
|
19
|
-
}
|
|
20
|
-
}
|
package/src/utils/predictJson.ts
DELETED
|
@@ -1,114 +0,0 @@
|
|
|
1
|
-
import { z, ZodSchema } from '@botpress/sdk'
|
|
2
|
-
import JSON5 from 'json5'
|
|
3
|
-
import { Context } from '../context'
|
|
4
|
-
import { llm } from '../sdk-interfaces/llm/generateContent'
|
|
5
|
-
|
|
6
|
-
const nonEmptyString = z.string().trim().min(1)
|
|
7
|
-
const nonEmptyObject = z
|
|
8
|
-
.object({})
|
|
9
|
-
.passthrough()
|
|
10
|
-
.refine((value) => Object.keys(value).length > 0, {
|
|
11
|
-
message: 'Expected a non-empty object'
|
|
12
|
-
})
|
|
13
|
-
|
|
14
|
-
export type Input = z.infer<typeof Input>
|
|
15
|
-
const Input = nonEmptyString.or(nonEmptyObject).or(z.array(z.any()))
|
|
16
|
-
|
|
17
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
18
|
-
export type Output<T = any> = z.infer<typeof Output> & { result: T }
|
|
19
|
-
const Output = z.object({
|
|
20
|
-
reason: nonEmptyString.describe('A human-readable explanation of the result'),
|
|
21
|
-
result: z
|
|
22
|
-
.any()
|
|
23
|
-
.describe(
|
|
24
|
-
'Your best guess at the output according to the instructions provided, rooted in the context of the input and the reason above'
|
|
25
|
-
)
|
|
26
|
-
})
|
|
27
|
-
|
|
28
|
-
type Example = z.infer<typeof Example>
|
|
29
|
-
const Example = z.object({
|
|
30
|
-
input: Input,
|
|
31
|
-
output: Output
|
|
32
|
-
})
|
|
33
|
-
|
|
34
|
-
type InputOptions<T extends ZodSchema = ZodSchema> = z.input<typeof Options> & { outputSchema: T }
|
|
35
|
-
type Options = z.infer<typeof Options>
|
|
36
|
-
const Options = z.object({
|
|
37
|
-
systemMessage: z.string(),
|
|
38
|
-
examples: z.array(Example).default([]),
|
|
39
|
-
input: Input,
|
|
40
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
41
|
-
outputSchema: z.custom<ZodSchema<any>>((value) => value instanceof ZodSchema),
|
|
42
|
-
model: z.string()
|
|
43
|
-
})
|
|
44
|
-
|
|
45
|
-
type Message = {
|
|
46
|
-
role: 'user' | 'assistant' | 'system'
|
|
47
|
-
content: string
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
const isValidExample =
|
|
51
|
-
(outputSchema: ZodSchema) =>
|
|
52
|
-
(example: Example): example is Example =>
|
|
53
|
-
Input.safeParse(example.input).success &&
|
|
54
|
-
Output.safeParse(example.output).success &&
|
|
55
|
-
outputSchema.safeParse(example.output.result).success
|
|
56
|
-
|
|
57
|
-
export async function predictJson<T extends ZodSchema>(_options: InputOptions<T>): Promise<Output<z.infer<T>>> {
|
|
58
|
-
const options = Options.parse(_options)
|
|
59
|
-
const [integration, model] = options.model.split('__')
|
|
60
|
-
|
|
61
|
-
if (!model?.length) {
|
|
62
|
-
throw new Error('Invalid model')
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
const exampleMessages = options.examples
|
|
66
|
-
.filter(isValidExample(options.outputSchema))
|
|
67
|
-
.flatMap(({ input, output }) => [
|
|
68
|
-
{ role: 'user', content: JSON.stringify(input, null, 2) } satisfies Message,
|
|
69
|
-
{ role: 'assistant', content: JSON.stringify(output, null, 2) } satisfies Message
|
|
70
|
-
])
|
|
71
|
-
|
|
72
|
-
const outputSchema = Output.extend({
|
|
73
|
-
result: options.outputSchema.describe(Output.shape.result.description!)
|
|
74
|
-
})
|
|
75
|
-
|
|
76
|
-
const result = await Context.client.callAction({
|
|
77
|
-
type: `${integration}:generateContent`,
|
|
78
|
-
input: {
|
|
79
|
-
systemPrompt: `
|
|
80
|
-
${options.systemMessage}
|
|
81
|
-
|
|
82
|
-
---
|
|
83
|
-
Please generate a JSON response with the following format:
|
|
84
|
-
\`\`\`typescript
|
|
85
|
-
${await outputSchema.toTypescriptAsync()}
|
|
86
|
-
\`\`\`
|
|
87
|
-
`.trim(),
|
|
88
|
-
messages: [
|
|
89
|
-
...exampleMessages,
|
|
90
|
-
{
|
|
91
|
-
role: 'user',
|
|
92
|
-
content: JSON.stringify(options.input, null, 2)
|
|
93
|
-
}
|
|
94
|
-
],
|
|
95
|
-
temperature: 0,
|
|
96
|
-
responseFormat: 'json_object',
|
|
97
|
-
model: { id: model! }
|
|
98
|
-
} satisfies llm.generateContent.Input
|
|
99
|
-
})
|
|
100
|
-
|
|
101
|
-
const output = result.output as llm.generateContent.Output
|
|
102
|
-
|
|
103
|
-
if (!output.choices.length || typeof output.choices?.[0]?.content !== 'string') {
|
|
104
|
-
throw new Error('Invalid response from the model')
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
const json = output.choices[0].content.trim()
|
|
108
|
-
|
|
109
|
-
if (!json.length) {
|
|
110
|
-
throw new Error('No response from the model')
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
return outputSchema.parse(JSON5.parse(json)) as Output<z.infer<T>>
|
|
114
|
-
}
|