@botpress/zai 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/adapter.js +2 -0
- package/dist/adapters/botpress-table.js +168 -0
- package/dist/adapters/memory.js +12 -0
- package/dist/index.d.ts +99 -98
- package/dist/index.js +9 -1873
- package/dist/models.js +387 -0
- package/dist/operations/check.js +141 -0
- package/dist/operations/constants.js +2 -0
- package/dist/operations/errors.js +15 -0
- package/dist/operations/extract.js +212 -0
- package/dist/operations/filter.js +179 -0
- package/dist/operations/label.js +237 -0
- package/dist/operations/rewrite.js +111 -0
- package/dist/operations/summarize.js +132 -0
- package/dist/operations/text.js +46 -0
- package/dist/utils.js +43 -0
- package/dist/zai.js +140 -0
- package/package.json +21 -19
- package/src/adapters/adapter.ts +35 -0
- package/src/adapters/botpress-table.ts +210 -0
- package/src/adapters/memory.ts +13 -0
- package/src/index.ts +11 -0
- package/src/models.ts +394 -0
- package/src/operations/__tests/botpress_docs.txt +26040 -0
- package/src/operations/__tests/cache.jsonl +101 -0
- package/src/operations/__tests/index.ts +87 -0
- package/src/operations/check.ts +187 -0
- package/src/operations/constants.ts +2 -0
- package/src/operations/errors.ts +9 -0
- package/src/operations/extract.ts +291 -0
- package/src/operations/filter.ts +231 -0
- package/src/operations/label.ts +332 -0
- package/src/operations/rewrite.ts +148 -0
- package/src/operations/summarize.ts +193 -0
- package/src/operations/text.ts +63 -0
- package/src/sdk-interfaces/llm/generateContent.ts +127 -0
- package/src/sdk-interfaces/llm/listLanguageModels.ts +19 -0
- package/src/utils.ts +61 -0
- package/src/zai.ts +193 -0
- package/tsconfig.json +2 -2
- package/dist/index.cjs +0 -1903
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -916
- package/dist/index.js.map +0 -1
- package/tsup.config.ts +0 -16
- package/vitest.config.ts +0 -9
- package/vitest.setup.ts +0 -24
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import { type Client } from '@botpress/client'
|
|
2
|
+
import { z } from '@bpinternal/zui'
|
|
3
|
+
|
|
4
|
+
import { BotpressClient, GenerationMetadata } from '../utils'
|
|
5
|
+
import { Adapter, GetExamplesProps, SaveExampleProps } from './adapter'
|
|
6
|
+
|
|
7
|
+
const CRITICAL_TAGS = {
|
|
8
|
+
system: 'true',
|
|
9
|
+
'schema-purpose': 'active-learning',
|
|
10
|
+
'schema-version': 'Oct-2024'
|
|
11
|
+
} as const
|
|
12
|
+
|
|
13
|
+
const OPTIONAL_TAGS = {
|
|
14
|
+
'x-studio-title': 'Active Learning',
|
|
15
|
+
'x-studio-description': 'Table for storing active learning tasks and examples',
|
|
16
|
+
'x-studio-readonly': 'true',
|
|
17
|
+
'x-studio-icon': 'lucide://atom',
|
|
18
|
+
'x-studio-color': 'green'
|
|
19
|
+
} as const
|
|
20
|
+
|
|
21
|
+
const FACTOR = 30
|
|
22
|
+
|
|
23
|
+
const Props = z.object({
|
|
24
|
+
client: BotpressClient,
|
|
25
|
+
tableName: z
|
|
26
|
+
.string()
|
|
27
|
+
.regex(
|
|
28
|
+
/^[a-zA-Z0-9_]{1,45}Table$/,
|
|
29
|
+
'Table name must be lowercase and contain only letters, numbers and underscores'
|
|
30
|
+
)
|
|
31
|
+
})
|
|
32
|
+
|
|
33
|
+
export type TableSchema = z.input<typeof TableSchema>
|
|
34
|
+
const TableSchema = z.object({
|
|
35
|
+
taskType: z.string().describe('The type of the task (filter, extract, etc.)'),
|
|
36
|
+
taskId: z.string(),
|
|
37
|
+
key: z.string().describe('A unique key for the task (e.g. a hash of the input, taskId, taskType and instructions)'),
|
|
38
|
+
instructions: z.string(),
|
|
39
|
+
input: z.object({}).passthrough().describe('The input to the task'),
|
|
40
|
+
output: z.object({}).passthrough().describe('The expected output'),
|
|
41
|
+
explanation: z.string().nullable(),
|
|
42
|
+
metadata: GenerationMetadata,
|
|
43
|
+
status: z.enum(['pending', 'rejected', 'approved']),
|
|
44
|
+
feedback: z
|
|
45
|
+
.object({
|
|
46
|
+
rating: z.enum(['very-bad', 'bad', 'good', 'very-good']),
|
|
47
|
+
comment: z.string().nullable()
|
|
48
|
+
})
|
|
49
|
+
.nullable()
|
|
50
|
+
.default(null)
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
const searchableColumns = ['input'] as const satisfies Array<keyof typeof TableSchema.shape> as string[]
|
|
54
|
+
|
|
55
|
+
const TableJsonSchema = Object.entries(TableSchema.shape).reduce((acc, [key, value]) => {
|
|
56
|
+
acc[key] = value.toJsonSchema()
|
|
57
|
+
acc[key]['x-zui'] ??= {}
|
|
58
|
+
acc[key]['x-zui'].searchable = searchableColumns.includes(key)
|
|
59
|
+
return acc
|
|
60
|
+
}, {})
|
|
61
|
+
|
|
62
|
+
export class TableAdapter extends Adapter {
|
|
63
|
+
private client: Client
|
|
64
|
+
private tableName: string
|
|
65
|
+
|
|
66
|
+
private status: 'initialized' | 'ready' | 'error'
|
|
67
|
+
|
|
68
|
+
constructor(props: z.input<typeof Props>) {
|
|
69
|
+
super()
|
|
70
|
+
props = Props.parse(props)
|
|
71
|
+
this.client = props.client
|
|
72
|
+
this.tableName = props.tableName
|
|
73
|
+
this.status = 'ready'
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
public async getExamples<TInput, TOutput>({ taskType, taskId, input }: GetExamplesProps<TInput>) {
|
|
77
|
+
await this.assertTableExists()
|
|
78
|
+
|
|
79
|
+
const { rows } = await this.client
|
|
80
|
+
.findTableRows({
|
|
81
|
+
table: this.tableName,
|
|
82
|
+
search: JSON.stringify({ value: input }).substring(0, 1023), // Search is limited to 1024 characters
|
|
83
|
+
limit: 10, // TODO
|
|
84
|
+
filter: {
|
|
85
|
+
// Proximity match of approved examples
|
|
86
|
+
taskType,
|
|
87
|
+
taskId,
|
|
88
|
+
status: 'approved'
|
|
89
|
+
} satisfies Partial<TableSchema>
|
|
90
|
+
})
|
|
91
|
+
.catch((err) => {
|
|
92
|
+
// TODO: handle error
|
|
93
|
+
console.error(`Error fetching examples: ${err.message}`)
|
|
94
|
+
return { rows: [] }
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
return rows.map((row) => ({
|
|
98
|
+
key: row.key,
|
|
99
|
+
input: row.input.value as TInput,
|
|
100
|
+
output: row.output.value as TOutput,
|
|
101
|
+
explanation: row.explanation,
|
|
102
|
+
similarity: row.similarity ?? 0
|
|
103
|
+
}))
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
public async saveExample<TInput, TOutput>({
|
|
107
|
+
key,
|
|
108
|
+
taskType,
|
|
109
|
+
taskId,
|
|
110
|
+
instructions,
|
|
111
|
+
input,
|
|
112
|
+
output,
|
|
113
|
+
explanation,
|
|
114
|
+
metadata,
|
|
115
|
+
status = 'pending'
|
|
116
|
+
}: SaveExampleProps<TInput, TOutput>) {
|
|
117
|
+
await this.assertTableExists()
|
|
118
|
+
|
|
119
|
+
await this.client
|
|
120
|
+
.upsertTableRows({
|
|
121
|
+
table: this.tableName,
|
|
122
|
+
keyColumn: 'key',
|
|
123
|
+
rows: [
|
|
124
|
+
{
|
|
125
|
+
key,
|
|
126
|
+
taskType,
|
|
127
|
+
taskId,
|
|
128
|
+
instructions,
|
|
129
|
+
input: { value: input },
|
|
130
|
+
output: { value: output },
|
|
131
|
+
explanation: explanation ?? null,
|
|
132
|
+
status,
|
|
133
|
+
metadata
|
|
134
|
+
} satisfies TableSchema
|
|
135
|
+
]
|
|
136
|
+
})
|
|
137
|
+
.catch(() => {
|
|
138
|
+
// TODO: handle error
|
|
139
|
+
})
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
private async assertTableExists() {
|
|
143
|
+
if (this.status !== 'ready') {
|
|
144
|
+
return
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const { table, created } = await this.client
|
|
148
|
+
.getOrCreateTable({
|
|
149
|
+
table: this.tableName,
|
|
150
|
+
factor: FACTOR,
|
|
151
|
+
frozen: true,
|
|
152
|
+
isComputeEnabled: false,
|
|
153
|
+
tags: {
|
|
154
|
+
...CRITICAL_TAGS,
|
|
155
|
+
...OPTIONAL_TAGS
|
|
156
|
+
},
|
|
157
|
+
schema: TableJsonSchema
|
|
158
|
+
})
|
|
159
|
+
.catch(() => {
|
|
160
|
+
this.status = 'error'
|
|
161
|
+
return { table: null, created: false }
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
if (!table) {
|
|
165
|
+
return
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (!created) {
|
|
169
|
+
const issues: string[] = []
|
|
170
|
+
|
|
171
|
+
if (table.factor !== FACTOR) {
|
|
172
|
+
issues.push(`Factor is ${table.factor} instead of ${FACTOR}`)
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
if (table.frozen !== true) {
|
|
176
|
+
issues.push('Table is not frozen')
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
for (const [key, value] of Object.entries(CRITICAL_TAGS)) {
|
|
180
|
+
if (table.tags?.[key] !== value) {
|
|
181
|
+
issues.push(`Tag ${key} is ${table.tags?.[key]} instead of ${value}`)
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
for (const key of Object.keys(TableJsonSchema)) {
|
|
186
|
+
const column = table.schema?.properties[key]
|
|
187
|
+
const expected = TableJsonSchema[key] as { type: string }
|
|
188
|
+
|
|
189
|
+
if (!column) {
|
|
190
|
+
issues.push(`Column ${key} is missing`)
|
|
191
|
+
continue
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
if (column.type !== expected.type) {
|
|
195
|
+
issues.push(`Column ${key} has type ${column.type} instead of ${expected.type}`)
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (expected['x-zui'].searchable && !column['x-zui'].searchable) {
|
|
199
|
+
issues.push(`Column ${key} is not searchable but should be`)
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (issues.length) {
|
|
204
|
+
this.status = 'error'
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
this.status = 'initialized'
|
|
209
|
+
}
|
|
210
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { Zai } from './zai'
|
|
2
|
+
|
|
3
|
+
import './operations/text'
|
|
4
|
+
import './operations/rewrite'
|
|
5
|
+
import './operations/summarize'
|
|
6
|
+
import './operations/check'
|
|
7
|
+
import './operations/filter'
|
|
8
|
+
import './operations/extract'
|
|
9
|
+
import './operations/label'
|
|
10
|
+
|
|
11
|
+
export { Zai }
|
package/src/models.ts
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
|
|
2
|
+
// This file is generated. Do not edit it manually.
|
|
3
|
+
// See 'scripts/update-models.ts'
|
|
4
|
+
|
|
5
|
+
/* eslint-disable */
|
|
6
|
+
/* tslint:disable */
|
|
7
|
+
|
|
8
|
+
export const Models = [
|
|
9
|
+
{
|
|
10
|
+
"id": "anthropic__claude-3-haiku-20240307",
|
|
11
|
+
"name": "Claude 3 Haiku",
|
|
12
|
+
"integration": "anthropic",
|
|
13
|
+
"input": {
|
|
14
|
+
"maxTokens": 200000
|
|
15
|
+
},
|
|
16
|
+
"output": {
|
|
17
|
+
"maxTokens": 4096
|
|
18
|
+
}
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"id": "anthropic__claude-3-5-sonnet-20240620",
|
|
22
|
+
"name": "Claude 3.5 Sonnet",
|
|
23
|
+
"integration": "anthropic",
|
|
24
|
+
"input": {
|
|
25
|
+
"maxTokens": 200000
|
|
26
|
+
},
|
|
27
|
+
"output": {
|
|
28
|
+
"maxTokens": 4096
|
|
29
|
+
}
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
"id": "cerebras__llama3.1-70b",
|
|
33
|
+
"name": "Llama 3.1 70B",
|
|
34
|
+
"integration": "cerebras",
|
|
35
|
+
"input": {
|
|
36
|
+
"maxTokens": 8192
|
|
37
|
+
},
|
|
38
|
+
"output": {
|
|
39
|
+
"maxTokens": 8192
|
|
40
|
+
}
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"id": "cerebras__llama3.1-8b",
|
|
44
|
+
"name": "Llama 3.1 8B",
|
|
45
|
+
"integration": "cerebras",
|
|
46
|
+
"input": {
|
|
47
|
+
"maxTokens": 8192
|
|
48
|
+
},
|
|
49
|
+
"output": {
|
|
50
|
+
"maxTokens": 8192
|
|
51
|
+
}
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-instruct",
|
|
55
|
+
"name": "DeepSeek Coder V2 Instruct",
|
|
56
|
+
"integration": "fireworks-ai",
|
|
57
|
+
"input": {
|
|
58
|
+
"maxTokens": 131072
|
|
59
|
+
},
|
|
60
|
+
"output": {
|
|
61
|
+
"maxTokens": 131072
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
|
|
66
|
+
"name": "DeepSeek Coder V2 Lite",
|
|
67
|
+
"integration": "fireworks-ai",
|
|
68
|
+
"input": {
|
|
69
|
+
"maxTokens": 163840
|
|
70
|
+
},
|
|
71
|
+
"output": {
|
|
72
|
+
"maxTokens": 163840
|
|
73
|
+
}
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"id": "fireworks-ai__accounts/fireworks/models/firellava-13b",
|
|
77
|
+
"name": "FireLLaVA-13B",
|
|
78
|
+
"integration": "fireworks-ai",
|
|
79
|
+
"input": {
|
|
80
|
+
"maxTokens": 4096
|
|
81
|
+
},
|
|
82
|
+
"output": {
|
|
83
|
+
"maxTokens": 4096
|
|
84
|
+
}
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
"id": "fireworks-ai__accounts/fireworks/models/firefunction-v2",
|
|
88
|
+
"name": "Firefunction V2",
|
|
89
|
+
"integration": "fireworks-ai",
|
|
90
|
+
"input": {
|
|
91
|
+
"maxTokens": 8192
|
|
92
|
+
},
|
|
93
|
+
"output": {
|
|
94
|
+
"maxTokens": 8192
|
|
95
|
+
}
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
"id": "fireworks-ai__accounts/fireworks/models/gemma2-9b-it",
|
|
99
|
+
"name": "Gemma 2 9B Instruct",
|
|
100
|
+
"integration": "fireworks-ai",
|
|
101
|
+
"input": {
|
|
102
|
+
"maxTokens": 8192
|
|
103
|
+
},
|
|
104
|
+
"output": {
|
|
105
|
+
"maxTokens": 8192
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
110
|
+
"name": "Llama 3.1 405B Instruct",
|
|
111
|
+
"integration": "fireworks-ai",
|
|
112
|
+
"input": {
|
|
113
|
+
"maxTokens": 131072
|
|
114
|
+
},
|
|
115
|
+
"output": {
|
|
116
|
+
"maxTokens": 131072
|
|
117
|
+
}
|
|
118
|
+
},
|
|
119
|
+
{
|
|
120
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
121
|
+
"name": "Llama 3.1 70B Instruct",
|
|
122
|
+
"integration": "fireworks-ai",
|
|
123
|
+
"input": {
|
|
124
|
+
"maxTokens": 131072
|
|
125
|
+
},
|
|
126
|
+
"output": {
|
|
127
|
+
"maxTokens": 131072
|
|
128
|
+
}
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
132
|
+
"name": "Llama 3.1 8B Instruct",
|
|
133
|
+
"integration": "fireworks-ai",
|
|
134
|
+
"input": {
|
|
135
|
+
"maxTokens": 131072
|
|
136
|
+
},
|
|
137
|
+
"output": {
|
|
138
|
+
"maxTokens": 131072
|
|
139
|
+
}
|
|
140
|
+
},
|
|
141
|
+
{
|
|
142
|
+
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x22b-instruct",
|
|
143
|
+
"name": "Mixtral MoE 8x22B Instruct",
|
|
144
|
+
"integration": "fireworks-ai",
|
|
145
|
+
"input": {
|
|
146
|
+
"maxTokens": 65536
|
|
147
|
+
},
|
|
148
|
+
"output": {
|
|
149
|
+
"maxTokens": 65536
|
|
150
|
+
}
|
|
151
|
+
},
|
|
152
|
+
{
|
|
153
|
+
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x7b-instruct",
|
|
154
|
+
"name": "Mixtral MoE 8x7B Instruct",
|
|
155
|
+
"integration": "fireworks-ai",
|
|
156
|
+
"input": {
|
|
157
|
+
"maxTokens": 32768
|
|
158
|
+
},
|
|
159
|
+
"output": {
|
|
160
|
+
"maxTokens": 32768
|
|
161
|
+
}
|
|
162
|
+
},
|
|
163
|
+
{
|
|
164
|
+
"id": "fireworks-ai__accounts/fireworks/models/mythomax-l2-13b",
|
|
165
|
+
"name": "MythoMax L2 13b",
|
|
166
|
+
"integration": "fireworks-ai",
|
|
167
|
+
"input": {
|
|
168
|
+
"maxTokens": 4096
|
|
169
|
+
},
|
|
170
|
+
"output": {
|
|
171
|
+
"maxTokens": 4096
|
|
172
|
+
}
|
|
173
|
+
},
|
|
174
|
+
{
|
|
175
|
+
"id": "fireworks-ai__accounts/fireworks/models/qwen2-72b-instruct",
|
|
176
|
+
"name": "Qwen2 72b Instruct",
|
|
177
|
+
"integration": "fireworks-ai",
|
|
178
|
+
"input": {
|
|
179
|
+
"maxTokens": 32768
|
|
180
|
+
},
|
|
181
|
+
"output": {
|
|
182
|
+
"maxTokens": 32768
|
|
183
|
+
}
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
"id": "groq__gemma2-9b-it",
|
|
187
|
+
"name": "Gemma2 9B",
|
|
188
|
+
"integration": "groq",
|
|
189
|
+
"input": {
|
|
190
|
+
"maxTokens": 8192
|
|
191
|
+
},
|
|
192
|
+
"output": {
|
|
193
|
+
"maxTokens": 8192
|
|
194
|
+
}
|
|
195
|
+
},
|
|
196
|
+
{
|
|
197
|
+
"id": "groq__llama3-70b-8192",
|
|
198
|
+
"name": "LLaMA 3 70B",
|
|
199
|
+
"integration": "groq",
|
|
200
|
+
"input": {
|
|
201
|
+
"maxTokens": 8192
|
|
202
|
+
},
|
|
203
|
+
"output": {
|
|
204
|
+
"maxTokens": 8192
|
|
205
|
+
}
|
|
206
|
+
},
|
|
207
|
+
{
|
|
208
|
+
"id": "groq__llama3-8b-8192",
|
|
209
|
+
"name": "LLaMA 3 8B",
|
|
210
|
+
"integration": "groq",
|
|
211
|
+
"input": {
|
|
212
|
+
"maxTokens": 8192
|
|
213
|
+
},
|
|
214
|
+
"output": {
|
|
215
|
+
"maxTokens": 8192
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
"id": "groq__llama-3.1-70b-versatile",
|
|
220
|
+
"name": "LLaMA 3.1 70B",
|
|
221
|
+
"integration": "groq",
|
|
222
|
+
"input": {
|
|
223
|
+
"maxTokens": 128000
|
|
224
|
+
},
|
|
225
|
+
"output": {
|
|
226
|
+
"maxTokens": 8192
|
|
227
|
+
}
|
|
228
|
+
},
|
|
229
|
+
{
|
|
230
|
+
"id": "groq__llama-3.1-8b-instant",
|
|
231
|
+
"name": "LLaMA 3.1 8B",
|
|
232
|
+
"integration": "groq",
|
|
233
|
+
"input": {
|
|
234
|
+
"maxTokens": 128000
|
|
235
|
+
},
|
|
236
|
+
"output": {
|
|
237
|
+
"maxTokens": 8192
|
|
238
|
+
}
|
|
239
|
+
},
|
|
240
|
+
{
|
|
241
|
+
"id": "groq__llama-3.2-11b-vision-preview",
|
|
242
|
+
"name": "LLaMA 3.2 11B Vision",
|
|
243
|
+
"integration": "groq",
|
|
244
|
+
"input": {
|
|
245
|
+
"maxTokens": 128000
|
|
246
|
+
},
|
|
247
|
+
"output": {
|
|
248
|
+
"maxTokens": 8192
|
|
249
|
+
}
|
|
250
|
+
},
|
|
251
|
+
{
|
|
252
|
+
"id": "groq__llama-3.2-1b-preview",
|
|
253
|
+
"name": "LLaMA 3.2 1B",
|
|
254
|
+
"integration": "groq",
|
|
255
|
+
"input": {
|
|
256
|
+
"maxTokens": 128000
|
|
257
|
+
},
|
|
258
|
+
"output": {
|
|
259
|
+
"maxTokens": 8192
|
|
260
|
+
}
|
|
261
|
+
},
|
|
262
|
+
{
|
|
263
|
+
"id": "groq__llama-3.2-3b-preview",
|
|
264
|
+
"name": "LLaMA 3.2 3B",
|
|
265
|
+
"integration": "groq",
|
|
266
|
+
"input": {
|
|
267
|
+
"maxTokens": 128000
|
|
268
|
+
},
|
|
269
|
+
"output": {
|
|
270
|
+
"maxTokens": 8192
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
{
|
|
274
|
+
"id": "groq__llama-3.2-90b-vision-preview",
|
|
275
|
+
"name": "LLaMA 3.2 90B Vision",
|
|
276
|
+
"integration": "groq",
|
|
277
|
+
"input": {
|
|
278
|
+
"maxTokens": 128000
|
|
279
|
+
},
|
|
280
|
+
"output": {
|
|
281
|
+
"maxTokens": 8192
|
|
282
|
+
}
|
|
283
|
+
},
|
|
284
|
+
{
|
|
285
|
+
"id": "groq__llama-3.3-70b-versatile",
|
|
286
|
+
"name": "LLaMA 3.3 70B",
|
|
287
|
+
"integration": "groq",
|
|
288
|
+
"input": {
|
|
289
|
+
"maxTokens": 128000
|
|
290
|
+
},
|
|
291
|
+
"output": {
|
|
292
|
+
"maxTokens": 32768
|
|
293
|
+
}
|
|
294
|
+
},
|
|
295
|
+
{
|
|
296
|
+
"id": "groq__mixtral-8x7b-32768",
|
|
297
|
+
"name": "Mixtral 8x7B",
|
|
298
|
+
"integration": "groq",
|
|
299
|
+
"input": {
|
|
300
|
+
"maxTokens": 32768
|
|
301
|
+
},
|
|
302
|
+
"output": {
|
|
303
|
+
"maxTokens": 32768
|
|
304
|
+
}
|
|
305
|
+
},
|
|
306
|
+
{
|
|
307
|
+
"id": "openai__o1-2024-12-17",
|
|
308
|
+
"name": "GPT o1",
|
|
309
|
+
"integration": "openai",
|
|
310
|
+
"input": {
|
|
311
|
+
"maxTokens": 200000
|
|
312
|
+
},
|
|
313
|
+
"output": {
|
|
314
|
+
"maxTokens": 100000
|
|
315
|
+
}
|
|
316
|
+
},
|
|
317
|
+
{
|
|
318
|
+
"id": "openai__o1-mini-2024-09-12",
|
|
319
|
+
"name": "GPT o1-mini",
|
|
320
|
+
"integration": "openai",
|
|
321
|
+
"input": {
|
|
322
|
+
"maxTokens": 128000
|
|
323
|
+
},
|
|
324
|
+
"output": {
|
|
325
|
+
"maxTokens": 65536
|
|
326
|
+
}
|
|
327
|
+
},
|
|
328
|
+
{
|
|
329
|
+
"id": "openai__gpt-3.5-turbo-0125",
|
|
330
|
+
"name": "GPT-3.5 Turbo",
|
|
331
|
+
"integration": "openai",
|
|
332
|
+
"input": {
|
|
333
|
+
"maxTokens": 128000
|
|
334
|
+
},
|
|
335
|
+
"output": {
|
|
336
|
+
"maxTokens": 4096
|
|
337
|
+
}
|
|
338
|
+
},
|
|
339
|
+
{
|
|
340
|
+
"id": "openai__gpt-4-turbo-2024-04-09",
|
|
341
|
+
"name": "GPT-4 Turbo",
|
|
342
|
+
"integration": "openai",
|
|
343
|
+
"input": {
|
|
344
|
+
"maxTokens": 128000
|
|
345
|
+
},
|
|
346
|
+
"output": {
|
|
347
|
+
"maxTokens": 4096
|
|
348
|
+
}
|
|
349
|
+
},
|
|
350
|
+
{
|
|
351
|
+
"id": "openai__gpt-4o-2024-08-06",
|
|
352
|
+
"name": "GPT-4o (August 2024)",
|
|
353
|
+
"integration": "openai",
|
|
354
|
+
"input": {
|
|
355
|
+
"maxTokens": 128000
|
|
356
|
+
},
|
|
357
|
+
"output": {
|
|
358
|
+
"maxTokens": 16384
|
|
359
|
+
}
|
|
360
|
+
},
|
|
361
|
+
{
|
|
362
|
+
"id": "openai__gpt-4o-2024-05-13",
|
|
363
|
+
"name": "GPT-4o (May 2024)",
|
|
364
|
+
"integration": "openai",
|
|
365
|
+
"input": {
|
|
366
|
+
"maxTokens": 128000
|
|
367
|
+
},
|
|
368
|
+
"output": {
|
|
369
|
+
"maxTokens": 4096
|
|
370
|
+
}
|
|
371
|
+
},
|
|
372
|
+
{
|
|
373
|
+
"id": "openai__gpt-4o-2024-11-20",
|
|
374
|
+
"name": "GPT-4o (November 2024)",
|
|
375
|
+
"integration": "openai",
|
|
376
|
+
"input": {
|
|
377
|
+
"maxTokens": 128000
|
|
378
|
+
},
|
|
379
|
+
"output": {
|
|
380
|
+
"maxTokens": 16384
|
|
381
|
+
}
|
|
382
|
+
},
|
|
383
|
+
{
|
|
384
|
+
"id": "openai__gpt-4o-mini-2024-07-18",
|
|
385
|
+
"name": "GPT-4o Mini",
|
|
386
|
+
"integration": "openai",
|
|
387
|
+
"input": {
|
|
388
|
+
"maxTokens": 128000
|
|
389
|
+
},
|
|
390
|
+
"output": {
|
|
391
|
+
"maxTokens": 16384
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
] as const
|