@forge/llm 0.2.0-next.2-experimental-f76634b → 0.2.0-next.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/out/fetch-wrapper.d.ts.map +1 -1
- package/out/fetch-wrapper.js +5 -1
- package/out/utils/error-handling.d.ts.map +1 -1
- package/out/utils/error-handling.js +1 -1
- package/package.json +2 -5
- package/out/__test__/error-handling.test.d.ts +0 -2
- package/out/__test__/error-handling.test.d.ts.map +0 -1
- package/out/__test__/error-handling.test.js +0 -105
- package/out/__test__/fetch-wrapper-test.d.ts +0 -2
- package/out/__test__/fetch-wrapper-test.d.ts.map +0 -1
- package/out/__test__/fetch-wrapper-test.js +0 -14
- package/out/__test__/index.test.d.ts +0 -2
- package/out/__test__/index.test.d.ts.map +0 -1
- package/out/__test__/index.test.js +0 -37
- package/out/__test__/llm-api.test.d.ts +0 -2
- package/out/__test__/llm-api.test.d.ts.map +0 -1
- package/out/__test__/llm-api.test.js +0 -104
- package/out/__test__/response-mapper.test.d.ts +0 -2
- package/out/__test__/response-mapper.test.d.ts.map +0 -1
- package/out/__test__/response-mapper.test.js +0 -73
- package/out/__test__/test-helpers.d.ts +0 -20
- package/out/__test__/test-helpers.d.ts.map +0 -1
- package/out/__test__/test-helpers.js +0 -48
- package/out/__test__/validators.test.d.ts +0 -2
- package/out/__test__/validators.test.d.ts.map +0 -1
- package/out/__test__/validators.test.js +0 -95
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# @forge/llm
|
|
2
|
+
|
|
3
|
+
## 0.2.0-next.4
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 9e49ce9: Excluded test files
|
|
8
|
+
|
|
9
|
+
## 0.2.0-next.3
|
|
10
|
+
|
|
11
|
+
### Minor Changes
|
|
12
|
+
|
|
13
|
+
- 4adf05c: Add trace Id
|
|
14
|
+
|
|
15
|
+
## 0.2.0-next.2
|
|
16
|
+
|
|
17
|
+
### Patch Changes
|
|
18
|
+
|
|
19
|
+
- Updated dependencies [def4ec3]
|
|
20
|
+
- @forge/api@6.3.0-next.2
|
|
21
|
+
|
|
22
|
+
## 0.2.0-next.1
|
|
23
|
+
|
|
24
|
+
### Minor Changes
|
|
25
|
+
|
|
26
|
+
- 4f53a62: Add support for 'usage' attribute in the chat's response object
|
|
27
|
+
|
|
28
|
+
## 0.1.1-next.0
|
|
29
|
+
|
|
30
|
+
### Patch Changes
|
|
31
|
+
|
|
32
|
+
- @forge/api@6.2.1-next.0
|
|
33
|
+
|
|
34
|
+
## 0.1.0
|
|
35
|
+
|
|
36
|
+
### Minor Changes
|
|
37
|
+
|
|
38
|
+
- dd34523: Forge LLM SDK added
|
|
39
|
+
- abe9c21: Forge LLM SDK core implementation
|
|
40
|
+
|
|
41
|
+
### Patch Changes
|
|
42
|
+
|
|
43
|
+
- Updated dependencies [0af355f]
|
|
44
|
+
- @forge/api@6.2.0
|
|
45
|
+
|
|
46
|
+
## 0.1.0-next.1
|
|
47
|
+
|
|
48
|
+
### Minor Changes
|
|
49
|
+
|
|
50
|
+
- abe9c21: Forge LLM SDK core implementation
|
|
51
|
+
|
|
52
|
+
## 0.1.0-next.0
|
|
53
|
+
|
|
54
|
+
### Minor Changes
|
|
55
|
+
|
|
56
|
+
- dd34523: Forge LLM SDK added
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fetch-wrapper.d.ts","sourceRoot":"","sources":["../src/fetch-wrapper.ts"],"names":[],"mappings":"AAAA,OAAO,
|
|
1
|
+
{"version":3,"file":"fetch-wrapper.d.ts","sourceRoot":"","sources":["../src/fetch-wrapper.ts"],"names":[],"mappings":"AAAA,OAAO,EAA6B,WAAW,EAAe,MAAM,YAAY,CAAC;AAEjF,wBAAgB,eAAe,IAAI,WAAW,CAc7C"}
|
package/out/fetch-wrapper.js
CHANGED
|
@@ -1,14 +1,18 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.getFetchWrapper = void 0;
|
|
4
|
+
const api_1 = require("@forge/api");
|
|
4
5
|
function getFetchWrapper() {
|
|
6
|
+
const { tracing } = (0, api_1.__getRuntime)();
|
|
5
7
|
return async function (path, options) {
|
|
6
8
|
const model = path?.split('/').pop();
|
|
7
9
|
return await global.__forge_fetch__({ type: 'llm', model: model }, path, {
|
|
8
10
|
...options,
|
|
9
11
|
headers: {
|
|
10
12
|
...options?.headers,
|
|
11
|
-
'Content-Type': 'application/json'
|
|
13
|
+
'Content-Type': 'application/json',
|
|
14
|
+
'x-b3-traceid': tracing.traceId,
|
|
15
|
+
'x-b3-spanid': tracing.spanId
|
|
12
16
|
}
|
|
13
17
|
});
|
|
14
18
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"error-handling.d.ts","sourceRoot":"","sources":["../../src/utils/error-handling.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuC,UAAU,EAAoB,MAAM,WAAW,CAAC;AAE9F,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;
|
|
1
|
+
{"version":3,"file":"error-handling.d.ts","sourceRoot":"","sources":["../../src/utils/error-handling.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuC,UAAU,EAAoB,MAAM,WAAW,CAAC;AAE9F,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AAQzC,wBAAgB,YAAY,CAAC,IAAI,EAAE,OAAO,GAAG,IAAI,IAAI,UAAU,CAE9D;AAED,wBAAsB,kBAAkB,CAAC,QAAQ,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC,CAwB7E;AAED,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,GAAG,SAAS,CAMnE"}
|
|
@@ -4,7 +4,7 @@ exports.safeGetParsedBody = exports.checkResponseError = exports.isForgeError =
|
|
|
4
4
|
const errors_1 = require("../errors");
|
|
5
5
|
const text_1 = require("../text");
|
|
6
6
|
function extractTraceId(response) {
|
|
7
|
-
return response.headers.get('x-b3-traceid') || response.headers.get('x-trace-id');
|
|
7
|
+
return (response.headers.get('x-b3-traceid') || response.headers.get('x-trace-id') || response.headers.get('atl-traceid'));
|
|
8
8
|
}
|
|
9
9
|
function isForgeError(body) {
|
|
10
10
|
return typeof body === 'object' && body !== null && 'code' in body && 'message' in body;
|
package/package.json
CHANGED
|
@@ -1,16 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@forge/llm",
|
|
3
|
-
"version": "0.2.0-next.
|
|
3
|
+
"version": "0.2.0-next.4",
|
|
4
4
|
"description": "Forge LLM SDK",
|
|
5
5
|
"main": "out/index.js",
|
|
6
6
|
"types": "out/index.d.ts",
|
|
7
|
-
"files": [
|
|
8
|
-
"out"
|
|
9
|
-
],
|
|
10
7
|
"author": "Atlassian",
|
|
11
8
|
"license": "SEE LICENSE IN LICENSE.txt",
|
|
12
9
|
"dependencies": {
|
|
13
|
-
"@forge/api": "^6.3.0-next.2
|
|
10
|
+
"@forge/api": "^6.3.0-next.2"
|
|
14
11
|
},
|
|
15
12
|
"devDependencies": {
|
|
16
13
|
"@types/node": "20.19.1",
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"error-handling.test.d.ts","sourceRoot":"","sources":["../../src/__test__/error-handling.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,105 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const errors_1 = require("../errors");
|
|
4
|
-
const error_handling_1 = require("../utils/error-handling");
|
|
5
|
-
describe('Error Handling', () => {
|
|
6
|
-
it('should handle a 400 response and return a ForgeLlmAPIError', async () => {
|
|
7
|
-
const code = 'UNSUPPORTED_AI_MODEL';
|
|
8
|
-
const message = 'Requested claude-3-5-sonnet-v2@20241022 not available for vendor: Claude';
|
|
9
|
-
const traceId = 'trace-123';
|
|
10
|
-
const apiResponse = new Response(JSON.stringify({
|
|
11
|
-
code: code,
|
|
12
|
-
message: message
|
|
13
|
-
}), {
|
|
14
|
-
status: 400,
|
|
15
|
-
statusText: 'Bad Request',
|
|
16
|
-
headers: {
|
|
17
|
-
'x-trace-id': traceId,
|
|
18
|
-
'Forge-Proxy-Error': 'UNSUPPORTED_AI_MODEL'
|
|
19
|
-
}
|
|
20
|
-
});
|
|
21
|
-
await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
|
|
22
|
-
status: 400,
|
|
23
|
-
statusText: 'Bad Request',
|
|
24
|
-
traceId
|
|
25
|
-
}, { code, message }));
|
|
26
|
-
});
|
|
27
|
-
it('should handle an Unknown error, that is a Forge error, with empty body and return a ForgeLlmAPIError', async () => {
|
|
28
|
-
const code = 'UNKNOWN_ERROR';
|
|
29
|
-
const traceId = 'trace-123';
|
|
30
|
-
const apiResponse = new Response(undefined, {
|
|
31
|
-
status: 500,
|
|
32
|
-
statusText: 'Internal Server Error',
|
|
33
|
-
headers: {
|
|
34
|
-
'x-trace-id': traceId
|
|
35
|
-
}
|
|
36
|
-
});
|
|
37
|
-
await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
|
|
38
|
-
status: 500,
|
|
39
|
-
statusText: 'Internal Server Error',
|
|
40
|
-
traceId
|
|
41
|
-
}, { code, message: 'Unexpected error in Forge LLM API', context: { responseText: '' } }));
|
|
42
|
-
});
|
|
43
|
-
it('should handle an Unknown error response, that is not a Forge error, with a json body and return a ForgeLlmAPIError', async () => {
|
|
44
|
-
const code = 'UNKNOWN_ERROR';
|
|
45
|
-
const body = JSON.stringify({
|
|
46
|
-
unknownError: 'Unexpected error with a non-json body'
|
|
47
|
-
});
|
|
48
|
-
const traceId = 'trace-123';
|
|
49
|
-
const apiResponse = new Response(body, {
|
|
50
|
-
status: 500,
|
|
51
|
-
statusText: 'Internal Server Error',
|
|
52
|
-
headers: {
|
|
53
|
-
'x-trace-id': traceId
|
|
54
|
-
}
|
|
55
|
-
});
|
|
56
|
-
await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
|
|
57
|
-
status: 500,
|
|
58
|
-
statusText: 'Internal Server Error',
|
|
59
|
-
traceId
|
|
60
|
-
}, {
|
|
61
|
-
message: 'Unexpected error in Forge LLM API',
|
|
62
|
-
code: code,
|
|
63
|
-
context: {
|
|
64
|
-
responseText: body
|
|
65
|
-
}
|
|
66
|
-
}));
|
|
67
|
-
});
|
|
68
|
-
it('should include context fields in the error when provided by the API', async () => {
|
|
69
|
-
const code = 'QUOTA_EXCEEDED';
|
|
70
|
-
const message = 'You have exceeded your quota';
|
|
71
|
-
const context = { quota: '1000', usage: '1500' };
|
|
72
|
-
const traceId = 'trace-123';
|
|
73
|
-
const apiResponse = new Response(JSON.stringify({ code, message, context }), {
|
|
74
|
-
status: 500,
|
|
75
|
-
statusText: 'Internal Server Error',
|
|
76
|
-
headers: {
|
|
77
|
-
'x-trace-id': traceId
|
|
78
|
-
}
|
|
79
|
-
});
|
|
80
|
-
await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
|
|
81
|
-
status: 500,
|
|
82
|
-
statusText: 'Internal Server Error',
|
|
83
|
-
traceId
|
|
84
|
-
}, { code, message, context }));
|
|
85
|
-
});
|
|
86
|
-
it('should include any extra fields in the error context when provided by the API', async () => {
|
|
87
|
-
const code = 'QUOTA_EXCEEDED';
|
|
88
|
-
const message = 'You have exceeded your quota';
|
|
89
|
-
const context = { quota: '1000', usage: '1500' };
|
|
90
|
-
const extraFields = { retryAfter: 30, plan: 'pro' };
|
|
91
|
-
const traceId = 'trace-123';
|
|
92
|
-
const apiResponse = new Response(JSON.stringify({ code, message, context, ...extraFields }), {
|
|
93
|
-
status: 500,
|
|
94
|
-
statusText: 'Internal Server Error',
|
|
95
|
-
headers: {
|
|
96
|
-
'x-trace-id': traceId
|
|
97
|
-
}
|
|
98
|
-
});
|
|
99
|
-
await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
|
|
100
|
-
status: 500,
|
|
101
|
-
statusText: 'Internal Server Error',
|
|
102
|
-
traceId
|
|
103
|
-
}, { code, message, context, ...extraFields }));
|
|
104
|
-
});
|
|
105
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"fetch-wrapper-test.d.ts","sourceRoot":"","sources":["../../src/__test__/fetch-wrapper-test.ts"],"names":[],"mappings":""}
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const fetch_wrapper_1 = require("../fetch-wrapper");
|
|
4
|
-
describe('FetchWrapper', () => {
|
|
5
|
-
it('should construct the args correctly', async () => {
|
|
6
|
-
const mockFetch = jest.fn().mockResolvedValue({ status: 200 });
|
|
7
|
-
global.__forge_fetch__ = mockFetch;
|
|
8
|
-
const fetchWrapper = (0, fetch_wrapper_1.getFetchWrapper)();
|
|
9
|
-
await fetchWrapper('https://llm/model-id', { method: 'POST' });
|
|
10
|
-
expect(mockFetch).toHaveBeenCalledTimes(1);
|
|
11
|
-
const [firstArg] = mockFetch.mock.calls[0];
|
|
12
|
-
expect(firstArg).toEqual({ type: 'llm', model: 'model-id' });
|
|
13
|
-
});
|
|
14
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.test.d.ts","sourceRoot":"","sources":["../../src/__test__/index.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const index_1 = require("../index");
|
|
4
|
-
const test_helpers_1 = require("./test-helpers");
|
|
5
|
-
const llm_api_1 = require("../llm-api");
|
|
6
|
-
const errors_1 = require("../errors");
|
|
7
|
-
jest.mock('../../src/fetch-wrapper', () => ({
|
|
8
|
-
getFetchWrapper: jest.fn(() => 'FAKE_FETCH_WRAPPER')
|
|
9
|
-
}));
|
|
10
|
-
jest.mock('../../src/llm-api', () => {
|
|
11
|
-
return {
|
|
12
|
-
LlmApiImpl: jest.fn().mockImplementation(() => ({
|
|
13
|
-
chat: jest.fn(async (...promptArgs) => ({ inputArgs: promptArgs }))
|
|
14
|
-
}))
|
|
15
|
-
};
|
|
16
|
-
});
|
|
17
|
-
describe('Chat', () => {
|
|
18
|
-
it('should call LlmApiImpl.chat with provided prompt payload', async () => {
|
|
19
|
-
const promptPayload = (0, test_helpers_1.constructPrompt)();
|
|
20
|
-
const result = await (0, index_1.chat)(promptPayload);
|
|
21
|
-
const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
|
|
22
|
-
expect(llmApi.chat).toHaveBeenCalledWith(promptPayload);
|
|
23
|
-
expect(result).toEqual({ inputArgs: [promptPayload] });
|
|
24
|
-
});
|
|
25
|
-
it('should propagate Forge errors returned', async () => {
|
|
26
|
-
const prompt = (0, test_helpers_1.constructPrompt)();
|
|
27
|
-
const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
|
|
28
|
-
llmApi.chat.mockRejectedValueOnce(new errors_1.ForgeLlmAPIError({ status: 400, statusText: 'bad request' }, { code: 'INVALID_BODY', message: 'Bad request' }));
|
|
29
|
-
await expect((0, index_1.chat)(prompt)).rejects.toThrow(errors_1.ForgeLlmAPIError);
|
|
30
|
-
});
|
|
31
|
-
it('should propagate validation errors', async () => {
|
|
32
|
-
const prompt = 'invalid_prompt';
|
|
33
|
-
const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
|
|
34
|
-
llmApi.chat.mockRejectedValueOnce(new errors_1.PromptValidationError('The payload is invalid: Prompt is required and must be a valid object.'));
|
|
35
|
-
await expect((0, index_1.chat)(prompt)).rejects.toThrow(errors_1.PromptValidationError);
|
|
36
|
-
});
|
|
37
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"llm-api.test.d.ts","sourceRoot":"","sources":["../../src/__test__/llm-api.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const tslib_1 = require("tslib");
|
|
4
|
-
const fetch_wrapper_1 = require("../fetch-wrapper");
|
|
5
|
-
const llm_api_1 = require("../llm-api");
|
|
6
|
-
const errors_1 = require("../errors");
|
|
7
|
-
const test_helpers_1 = require("./test-helpers");
|
|
8
|
-
const responseMapper = tslib_1.__importStar(require("../response-mapper"));
|
|
9
|
-
function setupEnvironment(response) {
|
|
10
|
-
const mockedFetch = jest.fn().mockResolvedValue(response || new Response(null, { status: 200 }));
|
|
11
|
-
global.__forge_fetch__ = jest.fn((_ctx, path, options) => mockedFetch(path, options));
|
|
12
|
-
}
|
|
13
|
-
describe('llm api', () => {
|
|
14
|
-
it('should make ForgeLlm request', async () => {
|
|
15
|
-
const llmResponse = (0, test_helpers_1.constructLlmResponse)();
|
|
16
|
-
const apiResponse = new Response(JSON.stringify(llmResponse), {
|
|
17
|
-
status: 200
|
|
18
|
-
});
|
|
19
|
-
const mockedFetch = jest.fn().mockResolvedValue(apiResponse);
|
|
20
|
-
global.__forge_fetch__ = jest.fn((_ctx, path, options) => mockedFetch(path, options));
|
|
21
|
-
const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
|
|
22
|
-
const promptBody = {
|
|
23
|
-
model: 'claude-sonnet-4-20250514',
|
|
24
|
-
messages: [
|
|
25
|
-
{ role: 'user', content: [{ type: 'text', text: 'Hello, how are you?' }] },
|
|
26
|
-
{ role: 'system', content: [{ type: 'text', text: 'You are a creative agent' }] }
|
|
27
|
-
],
|
|
28
|
-
temperature: 0.7,
|
|
29
|
-
max_completion_tokens: 1000,
|
|
30
|
-
top_p: 0.9
|
|
31
|
-
};
|
|
32
|
-
const prompt = (0, test_helpers_1.constructPrompt)(promptBody);
|
|
33
|
-
await llmAPI.chat(prompt);
|
|
34
|
-
const [[path, options]] = mockedFetch.mock.calls;
|
|
35
|
-
expect(path).toBe('https://llm/claude-sonnet-4-20250514');
|
|
36
|
-
expect(options.method).toBe('POST');
|
|
37
|
-
expect(options.headers).toEqual({ 'Content-Type': 'application/json' });
|
|
38
|
-
expect(JSON.parse(options.body)).toEqual({
|
|
39
|
-
messages: [
|
|
40
|
-
{
|
|
41
|
-
role: 'user',
|
|
42
|
-
content: [
|
|
43
|
-
{
|
|
44
|
-
type: 'text',
|
|
45
|
-
text: 'Hello, how are you?'
|
|
46
|
-
}
|
|
47
|
-
]
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
role: 'system',
|
|
51
|
-
content: [
|
|
52
|
-
{
|
|
53
|
-
type: 'text',
|
|
54
|
-
text: 'You are a creative agent'
|
|
55
|
-
}
|
|
56
|
-
]
|
|
57
|
-
}
|
|
58
|
-
],
|
|
59
|
-
temperature: 0.7,
|
|
60
|
-
max_completion_tokens: 1000,
|
|
61
|
-
top_p: 0.9
|
|
62
|
-
});
|
|
63
|
-
});
|
|
64
|
-
it('should throw a validation error when prompt is invalid', async () => {
|
|
65
|
-
const promptWithEmptyMessages = (0, test_helpers_1.constructPrompt)({ messages: [] });
|
|
66
|
-
const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
|
|
67
|
-
await expect(llmAPI.chat(promptWithEmptyMessages)).rejects.toThrow(errors_1.PromptValidationError);
|
|
68
|
-
});
|
|
69
|
-
it('should return a ChatResponse for a 200 success response', async () => {
|
|
70
|
-
const llmResponse = (0, test_helpers_1.constructLlmResponse)({
|
|
71
|
-
finish_reason: 'tool_calls',
|
|
72
|
-
index: 19,
|
|
73
|
-
message: {
|
|
74
|
-
role: 'assistant',
|
|
75
|
-
content: [
|
|
76
|
-
{
|
|
77
|
-
type: 'text',
|
|
78
|
-
text: "It's 20 degrees celsius"
|
|
79
|
-
}
|
|
80
|
-
]
|
|
81
|
-
}
|
|
82
|
-
});
|
|
83
|
-
const apiResponse = new Response(JSON.stringify(llmResponse), {
|
|
84
|
-
status: 200
|
|
85
|
-
});
|
|
86
|
-
setupEnvironment(apiResponse);
|
|
87
|
-
const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
|
|
88
|
-
const prompt = (0, test_helpers_1.constructPrompt)();
|
|
89
|
-
const mapResponseSpy = jest.spyOn(responseMapper, 'mapForgeLLMResponse');
|
|
90
|
-
const response = await llmAPI.chat(prompt);
|
|
91
|
-
expect(mapResponseSpy).toHaveBeenCalledWith(apiResponse);
|
|
92
|
-
expect(response.choices).toEqual(llmResponse.choices);
|
|
93
|
-
expect(response.usage).toEqual(llmResponse.usage);
|
|
94
|
-
});
|
|
95
|
-
it('should throw an error for a non-200 response', async () => {
|
|
96
|
-
const apiResponse = new Response(JSON.stringify({}), {
|
|
97
|
-
status: 400
|
|
98
|
-
});
|
|
99
|
-
setupEnvironment(apiResponse);
|
|
100
|
-
const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
|
|
101
|
-
const prompt = (0, test_helpers_1.constructPrompt)();
|
|
102
|
-
await expect(llmAPI.chat(prompt)).rejects.toThrow(errors_1.ForgeLlmAPIError);
|
|
103
|
-
});
|
|
104
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"response-mapper.test.d.ts","sourceRoot":"","sources":["../../src/__test__/response-mapper.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const response_mapper_1 = require("../response-mapper");
|
|
4
|
-
const test_helpers_1 = require("./test-helpers");
|
|
5
|
-
describe('response mapper', () => {
|
|
6
|
-
it('should map the Forge LLM response to a user response (choices empty)', async () => {
|
|
7
|
-
const llmResponseBody = { choices: [] };
|
|
8
|
-
const apiResponse = (0, test_helpers_1.makeApiResponse)({ json: () => Promise.resolve(llmResponseBody) });
|
|
9
|
-
const mappedResponse = await (0, response_mapper_1.mapForgeLLMResponse)(apiResponse);
|
|
10
|
-
expect(mappedResponse).toEqual({ choices: [], usage: undefined });
|
|
11
|
-
});
|
|
12
|
-
it('should map the Forge LLM response', async () => {
|
|
13
|
-
const llmResponseBody = {
|
|
14
|
-
choices: [
|
|
15
|
-
{
|
|
16
|
-
finish_reason: 'tool_calls',
|
|
17
|
-
index: 19,
|
|
18
|
-
message: {
|
|
19
|
-
role: 'assistant',
|
|
20
|
-
content: [
|
|
21
|
-
{ type: 'text', text: 'It is warm outside' },
|
|
22
|
-
{ type: 'text', text: "It's 20 degrees celsius" }
|
|
23
|
-
],
|
|
24
|
-
tool_calls: [
|
|
25
|
-
{
|
|
26
|
-
id: 'tool-call-123',
|
|
27
|
-
type: 'function',
|
|
28
|
-
function: {
|
|
29
|
-
name: 'WeatherTool',
|
|
30
|
-
arguments: {
|
|
31
|
-
location: 'San Francisco, CA',
|
|
32
|
-
unit: 'celsius'
|
|
33
|
-
}
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
]
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
]
|
|
40
|
-
};
|
|
41
|
-
const apiResponse = (0, test_helpers_1.makeApiResponse)({ json: () => Promise.resolve(llmResponseBody) });
|
|
42
|
-
const mappedResponse = await (0, response_mapper_1.mapForgeLLMResponse)(apiResponse);
|
|
43
|
-
expect(mappedResponse).toEqual({ choices: llmResponseBody.choices, usage: undefined });
|
|
44
|
-
});
|
|
45
|
-
it('should map the Forge LLM response with usage attribute', async () => {
|
|
46
|
-
const llmResponseBody = {
|
|
47
|
-
choices: [
|
|
48
|
-
{
|
|
49
|
-
finish_reason: 'stop',
|
|
50
|
-
message: {
|
|
51
|
-
role: 'assistant',
|
|
52
|
-
content: [{ type: 'text', text: 'Hello!' }]
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
],
|
|
56
|
-
usage: {
|
|
57
|
-
input_token: 10,
|
|
58
|
-
output_token: 20,
|
|
59
|
-
total_token: 30
|
|
60
|
-
}
|
|
61
|
-
};
|
|
62
|
-
const apiResponse = (0, test_helpers_1.makeApiResponse)({ json: () => Promise.resolve(llmResponseBody) });
|
|
63
|
-
const mappedResponse = await (0, response_mapper_1.mapForgeLLMResponse)(apiResponse);
|
|
64
|
-
expect(mappedResponse).toEqual({
|
|
65
|
-
choices: llmResponseBody.choices,
|
|
66
|
-
usage: {
|
|
67
|
-
input_token: 10,
|
|
68
|
-
output_token: 20,
|
|
69
|
-
total_token: 30
|
|
70
|
-
}
|
|
71
|
-
});
|
|
72
|
-
});
|
|
73
|
-
});
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import { Prompt } from '../interfaces/types';
|
|
2
|
-
import { APIResponse } from '@forge/api';
|
|
3
|
-
import { Choice } from '../interfaces/llm-api';
|
|
4
|
-
export declare const constructPrompt: (overrides?: {}) => Prompt;
|
|
5
|
-
export declare const makeApiResponse: (overrides: Partial<APIResponse>) => APIResponse;
|
|
6
|
-
export declare const constructLlmResponse: (overrides?: Partial<Choice>) => {
|
|
7
|
-
choices: {
|
|
8
|
-
finish_reason: string;
|
|
9
|
-
index: number;
|
|
10
|
-
message: import("../interfaces/llm-api").AssistantMessage | {
|
|
11
|
-
role: string;
|
|
12
|
-
content: {
|
|
13
|
-
type: string;
|
|
14
|
-
text: string;
|
|
15
|
-
}[];
|
|
16
|
-
};
|
|
17
|
-
}[];
|
|
18
|
-
usage: {};
|
|
19
|
-
};
|
|
20
|
-
//# sourceMappingURL=test-helpers.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"test-helpers.d.ts","sourceRoot":"","sources":["../../src/__test__/test-helpers.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAC7C,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AACzC,OAAO,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAE/C,eAAO,MAAM,eAAe,sBAAqB,MAU/C,CAAC;AAEH,eAAO,MAAM,eAAe,cAAe,QAAQ,WAAW,CAAC,KAAG,WAWjE,CAAC;AAEF,eAAO,MAAM,oBAAoB,eAAe,QAAQ,MAAM,CAAC;;;;;;;;;;;;;CAkB7D,CAAC"}
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.constructLlmResponse = exports.makeApiResponse = exports.constructPrompt = void 0;
|
|
4
|
-
const constructPrompt = (overrides = {}) => ({
|
|
5
|
-
model: 'claude-sonnet-4-20250514',
|
|
6
|
-
messages: [
|
|
7
|
-
{ role: 'user', content: [{ type: 'text', text: 'Hello, how are you?' }] },
|
|
8
|
-
{ role: 'system', content: [{ type: 'text', text: 'You are a creative agent' }] }
|
|
9
|
-
],
|
|
10
|
-
temperature: 0.7,
|
|
11
|
-
max_completion_tokens: 1000,
|
|
12
|
-
top_p: 0.9,
|
|
13
|
-
...overrides
|
|
14
|
-
});
|
|
15
|
-
exports.constructPrompt = constructPrompt;
|
|
16
|
-
const makeApiResponse = (overrides) => {
|
|
17
|
-
return {
|
|
18
|
-
status: 200,
|
|
19
|
-
statusText: 'OK',
|
|
20
|
-
headers: new Headers({
|
|
21
|
-
'Content-Type': 'application/json'
|
|
22
|
-
}),
|
|
23
|
-
json: async () => Promise.resolve({}),
|
|
24
|
-
text: async () => JSON.stringify({}),
|
|
25
|
-
...overrides
|
|
26
|
-
};
|
|
27
|
-
};
|
|
28
|
-
exports.makeApiResponse = makeApiResponse;
|
|
29
|
-
const constructLlmResponse = (overrides = {}) => ({
|
|
30
|
-
choices: [
|
|
31
|
-
{
|
|
32
|
-
finish_reason: 'tool_calls',
|
|
33
|
-
message: {
|
|
34
|
-
role: 'assistant',
|
|
35
|
-
content: [
|
|
36
|
-
{
|
|
37
|
-
type: 'text',
|
|
38
|
-
text: "It's 20 degrees celsius"
|
|
39
|
-
}
|
|
40
|
-
]
|
|
41
|
-
},
|
|
42
|
-
index: 19,
|
|
43
|
-
...overrides
|
|
44
|
-
}
|
|
45
|
-
],
|
|
46
|
-
usage: {}
|
|
47
|
-
});
|
|
48
|
-
exports.constructLlmResponse = constructLlmResponse;
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"validators.test.d.ts","sourceRoot":"","sources":["../../src/__test__/validators.test.ts"],"names":[],"mappings":""}
|
|
@@ -1,95 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
const validators_1 = require("../validators");
|
|
4
|
-
const errors_1 = require("../errors");
|
|
5
|
-
const test_helpers_1 = require("./test-helpers");
|
|
6
|
-
describe('Validators', () => {
|
|
7
|
-
describe('required fields', () => {
|
|
8
|
-
it('should validate a correct prompt', () => {
|
|
9
|
-
const validPrompt = (0, test_helpers_1.constructPrompt)();
|
|
10
|
-
expect(() => (0, validators_1.validatePrompt)(validPrompt)).not.toThrow();
|
|
11
|
-
});
|
|
12
|
-
it('should fail if no prompt is provided', () => {
|
|
13
|
-
const emptyPrompt = undefined;
|
|
14
|
-
expect(() => (0, validators_1.validatePrompt)(emptyPrompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Prompt is required and must be a valid object.'));
|
|
15
|
-
});
|
|
16
|
-
describe('model validation', () => {
|
|
17
|
-
it('should throw an error for a missing model', () => {
|
|
18
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ model: undefined });
|
|
19
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Model is required.'));
|
|
20
|
-
});
|
|
21
|
-
});
|
|
22
|
-
describe('messages validation', () => {
|
|
23
|
-
it('should throw an error when no messages are provided', () => {
|
|
24
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ messages: undefined });
|
|
25
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: No messages were provided. Provide at least one message.'));
|
|
26
|
-
});
|
|
27
|
-
it('should throw an error when a message body is missing the role field', () => {
|
|
28
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ content: 'Hello' }] });
|
|
29
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid role at index 0: . Role must be present and one of [system, user, assistant, tool].'));
|
|
30
|
-
});
|
|
31
|
-
it('should throw an error when a message body has an invalid role type', () => {
|
|
32
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ content: 'Hello', role: 'unknown-role' }] });
|
|
33
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid role at index 0: unknown-role. Role must be present and one of [system, user, assistant, tool].'));
|
|
34
|
-
});
|
|
35
|
-
it('should throw an error when a message body is missing the content field', () => {
|
|
36
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ role: 'user' }] });
|
|
37
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Message content at index 0 must be a non-empty string.'));
|
|
38
|
-
});
|
|
39
|
-
});
|
|
40
|
-
});
|
|
41
|
-
describe('optional fields', () => {
|
|
42
|
-
describe('temperature validation', () => {
|
|
43
|
-
it('should throw an error for a temperature under 0', () => {
|
|
44
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ temperature: -1 });
|
|
45
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature -1: Temperature must be between 0 and 1.'));
|
|
46
|
-
});
|
|
47
|
-
it('should throw an error for a temperature above 1', () => {
|
|
48
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ temperature: 2 });
|
|
49
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature 2: Temperature must be between 0 and 1.'));
|
|
50
|
-
});
|
|
51
|
-
it('should not throw an error for a non-numeric temperature', () => {
|
|
52
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ temperature: undefined });
|
|
53
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
|
|
54
|
-
});
|
|
55
|
-
it('should throw an error for a non-finite temperature', () => {
|
|
56
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ temperature: 'NaN' });
|
|
57
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature NaN: Temperature must be between 0 and 1.'));
|
|
58
|
-
});
|
|
59
|
-
});
|
|
60
|
-
describe('max_completion_tokens validation', () => {
|
|
61
|
-
const invalidValues = [NaN, 0, -5, 10.5];
|
|
62
|
-
invalidValues.forEach((value) => {
|
|
63
|
-
it(`should throw an error for invalid max_completion_tokens value ${value}`, () => {
|
|
64
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: value });
|
|
65
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError(`The payload is invalid: Invalid max_completion_tokens ${value}: max_completion_tokens must be a positive integer.`));
|
|
66
|
-
});
|
|
67
|
-
});
|
|
68
|
-
it('should not throw when max_completion_tokens is undefined', () => {
|
|
69
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: undefined });
|
|
70
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
|
|
71
|
-
});
|
|
72
|
-
it('should not throw when max_completion_tokens is a valid positive integer', () => {
|
|
73
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: 256 });
|
|
74
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
|
|
75
|
-
});
|
|
76
|
-
});
|
|
77
|
-
describe('top_p validation', () => {
|
|
78
|
-
it('should not throw an error when top_p is not provided', () => {
|
|
79
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ top_p: undefined });
|
|
80
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
|
|
81
|
-
});
|
|
82
|
-
it('should not throw an error for a valid top_p', () => {
|
|
83
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ top_p: 0.9 });
|
|
84
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
|
|
85
|
-
});
|
|
86
|
-
const invalidTopPValues = [-0.1, 1.5, NaN, 'string'];
|
|
87
|
-
invalidTopPValues.forEach((value) => {
|
|
88
|
-
it(`should throw an error for invalid top_p value ${value}`, () => {
|
|
89
|
-
const prompt = (0, test_helpers_1.constructPrompt)({ top_p: value });
|
|
90
|
-
expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError(`The payload is invalid: Invalid top_p ${value}: top_p must be between 0 and 1.`));
|
|
91
|
-
});
|
|
92
|
-
});
|
|
93
|
-
});
|
|
94
|
-
});
|
|
95
|
-
});
|