@botpress/cognitive 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +9 -9
- package/.turbo/turbo-generate.log +1 -1
- package/dist/index.cjs +7 -4
- package/dist/index.cjs.map +2 -2
- package/dist/index.d.ts +14 -13
- package/dist/index.mjs +7 -4
- package/dist/index.mjs.map +2 -2
- package/e2e/client.test.ts +126 -0
- package/e2e/client.ts +13 -0
- package/e2e/models.json +562 -0
- package/e2e/models.test.ts +131 -0
- package/package.json +4 -4
- package/tsconfig.build.json +9 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import { describe, test, expect, vi, beforeEach } from 'vitest'
|
|
2
|
+
import { Cognitive } from '../src/client'
|
|
3
|
+
import { getTestClient } from './client'
|
|
4
|
+
import MODELS from './models.json'
|
|
5
|
+
import { RemoteModelProvider } from '../src/models'
|
|
6
|
+
import { GenerateContentOutput } from '../src/gen'
|
|
7
|
+
|
|
8
|
+
const RandomResponse = {
|
|
9
|
+
output: {
|
|
10
|
+
botpress: { cost: 123 },
|
|
11
|
+
choices: [{ role: 'assistant', content: 'This is the LLM response', stopReason: 'stop', index: 1 }],
|
|
12
|
+
id: '123456',
|
|
13
|
+
model: '',
|
|
14
|
+
provider: '',
|
|
15
|
+
usage: { inputCost: 1, inputTokens: 2, outputCost: 3, outputTokens: 4 },
|
|
16
|
+
} satisfies GenerateContentOutput,
|
|
17
|
+
meta: {},
|
|
18
|
+
} as const
|
|
19
|
+
|
|
20
|
+
// Simple mock for the provider
|
|
21
|
+
class MockProvider extends RemoteModelProvider {
|
|
22
|
+
fetchModelPreferences = vi.fn().mockResolvedValue(null)
|
|
23
|
+
fetchInstalledModels = vi.fn().mockResolvedValue(MODELS)
|
|
24
|
+
saveModelPreferences = vi.fn().mockResolvedValue(void 0)
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
class TestClient {
|
|
28
|
+
callAction = vi.fn().mockImplementation(() => {
|
|
29
|
+
if (this.axiosInstance.defaults?.signal?.aborted) {
|
|
30
|
+
throw this.axiosInstance.defaults?.signal.reason ?? 'Aborted'
|
|
31
|
+
}
|
|
32
|
+
return Promise.resolve(RandomResponse)
|
|
33
|
+
})
|
|
34
|
+
getBot = vi.fn()
|
|
35
|
+
getFile = vi.fn()
|
|
36
|
+
axiosInstance = {
|
|
37
|
+
defaults: { signal: new AbortController().signal },
|
|
38
|
+
}
|
|
39
|
+
config = { headers: { 'x-bot-id': 'test' } }
|
|
40
|
+
clone = () => this
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
describe('constructor', () => {
|
|
44
|
+
test('valid client', () => {
|
|
45
|
+
// Just check that no error is thrown
|
|
46
|
+
const provider = new MockProvider(getTestClient())
|
|
47
|
+
expect(() => new Cognitive({ client: getTestClient(), provider })).not.toThrow()
|
|
48
|
+
})
|
|
49
|
+
})
|
|
50
|
+
|
|
51
|
+
describe('client', () => {
|
|
52
|
+
let bp: TestClient
|
|
53
|
+
let client: Cognitive
|
|
54
|
+
let provider: MockProvider
|
|
55
|
+
|
|
56
|
+
beforeEach(() => {
|
|
57
|
+
vi.clearAllMocks()
|
|
58
|
+
bp = new TestClient()
|
|
59
|
+
provider = new MockProvider(bp)
|
|
60
|
+
client = new Cognitive({ client: bp, provider })
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
describe('predict (request)', () => {
|
|
64
|
+
test('fetches models when preferences are not available and saves the preferences', async () => {
|
|
65
|
+
await client.generateContent({ messages: [], model: 'best' })
|
|
66
|
+
expect(provider.fetchModelPreferences).toHaveBeenCalled()
|
|
67
|
+
expect(provider.fetchInstalledModels).toHaveBeenCalled()
|
|
68
|
+
expect(provider.saveModelPreferences).toHaveBeenCalled()
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
test('fetches model preferences the first time generateContent is called', async () => {
|
|
72
|
+
await client.generateContent({ messages: [], model: 'fast' })
|
|
73
|
+
// fetchInstalledModels is called because fetchModelPreferences returned null
|
|
74
|
+
expect(provider.fetchInstalledModels).toHaveBeenCalledTimes(1)
|
|
75
|
+
// A second call won't fetch again if preferences are cached
|
|
76
|
+
await client.generateContent({ messages: [], model: 'fast' })
|
|
77
|
+
expect(provider.fetchInstalledModels).toHaveBeenCalledTimes(1)
|
|
78
|
+
})
|
|
79
|
+
})
|
|
80
|
+
|
|
81
|
+
describe('predict (fallback)', () => {
|
|
82
|
+
test('when model is unavailable, registers the downtime, saves it, and selects another model', async () => {
|
|
83
|
+
client = new Cognitive({ client: bp, provider })
|
|
84
|
+
|
|
85
|
+
bp.callAction.mockRejectedValueOnce({
|
|
86
|
+
isApiError: true,
|
|
87
|
+
code: 400,
|
|
88
|
+
id: '123',
|
|
89
|
+
type: 'UPSTREAM_PROVIDER_FAILED',
|
|
90
|
+
subtype: 'UPSTREAM_PROVIDER_FAILED',
|
|
91
|
+
})
|
|
92
|
+
|
|
93
|
+
provider.fetchModelPreferences.mockResolvedValue({
|
|
94
|
+
best: ['a:a', 'b:b'],
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
// First generate call triggers fallback
|
|
98
|
+
await client.generateContent({ messages: [], model: 'a:a' })
|
|
99
|
+
|
|
100
|
+
expect(bp.callAction).toHaveBeenCalledTimes(2)
|
|
101
|
+
expect(provider.saveModelPreferences).toHaveBeenCalledOnce()
|
|
102
|
+
expect(provider.saveModelPreferences.mock.calls[0]?.[0].best).toMatchObject(['a:a', 'b:b'])
|
|
103
|
+
expect(provider.saveModelPreferences.mock.calls[0]?.[0].downtimes[0].ref).toBe('a:a')
|
|
104
|
+
})
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
describe('predict (abort)', () => {
|
|
108
|
+
test('abort request', async () => {
|
|
109
|
+
const ac = new AbortController()
|
|
110
|
+
ac.abort('Manual abort')
|
|
111
|
+
|
|
112
|
+
await expect(client.generateContent({ messages: [], signal: ac.signal })).rejects.toMatch('Manual abort')
|
|
113
|
+
})
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
describe('predict (response)', () => {
|
|
117
|
+
test('request cost and metrics are returned', async () => {
|
|
118
|
+
const resp = await client.generateContent({ messages: [] })
|
|
119
|
+
expect(resp.meta.cost.input).toBe(1)
|
|
120
|
+
expect(resp.meta.cost.output).toBe(3)
|
|
121
|
+
expect(resp.meta.tokens.input).toBe(2)
|
|
122
|
+
expect(resp.meta.tokens.output).toBe(4)
|
|
123
|
+
expect(resp.output.choices[0]?.content).toBe('This is the LLM response')
|
|
124
|
+
})
|
|
125
|
+
})
|
|
126
|
+
})
|
package/e2e/client.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import 'dotenv/config'
|
|
2
|
+
|
|
3
|
+
import { Client } from '@botpress/client'
|
|
4
|
+
import { getExtendedClient } from '../src/bp-client'
|
|
5
|
+
|
|
6
|
+
export const getTestClient = () =>
|
|
7
|
+
getExtendedClient(
|
|
8
|
+
new Client({
|
|
9
|
+
apiUrl: process.env.CLOUD_API_ENDPOINT ?? 'https://api.botpress.dev',
|
|
10
|
+
botId: process.env.CLOUD_BOT_ID,
|
|
11
|
+
token: process.env.CLOUD_PAT,
|
|
12
|
+
})
|
|
13
|
+
)
|