@forge/llm 0.0.1-experimental-994fcd3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE.txt +7 -0
  2. package/README.md +98 -0
  3. package/out/__test__/error-handling.test.d.ts +2 -0
  4. package/out/__test__/error-handling.test.d.ts.map +1 -0
  5. package/out/__test__/error-handling.test.js +105 -0
  6. package/out/__test__/fetch-wrapper-test.d.ts +2 -0
  7. package/out/__test__/fetch-wrapper-test.d.ts.map +1 -0
  8. package/out/__test__/fetch-wrapper-test.js +14 -0
  9. package/out/__test__/index.test.d.ts +2 -0
  10. package/out/__test__/index.test.d.ts.map +1 -0
  11. package/out/__test__/index.test.js +37 -0
  12. package/out/__test__/llm-api.test.d.ts +2 -0
  13. package/out/__test__/llm-api.test.d.ts.map +1 -0
  14. package/out/__test__/llm-api.test.js +112 -0
  15. package/out/__test__/response-mapper.test.d.ts +2 -0
  16. package/out/__test__/response-mapper.test.d.ts.map +1 -0
  17. package/out/__test__/response-mapper.test.js +45 -0
  18. package/out/__test__/test-helpers.d.ts +19 -0
  19. package/out/__test__/test-helpers.d.ts.map +1 -0
  20. package/out/__test__/test-helpers.js +47 -0
  21. package/out/__test__/validators.test.d.ts +2 -0
  22. package/out/__test__/validators.test.d.ts.map +1 -0
  23. package/out/__test__/validators.test.js +95 -0
  24. package/out/errors.d.ts +28 -0
  25. package/out/errors.d.ts.map +1 -0
  26. package/out/errors.js +37 -0
  27. package/out/fetch-wrapper.d.ts +3 -0
  28. package/out/fetch-wrapper.d.ts.map +1 -0
  29. package/out/fetch-wrapper.js +16 -0
  30. package/out/index.d.ts +4 -0
  31. package/out/index.d.ts.map +1 -0
  32. package/out/index.js +11 -0
  33. package/out/interfaces/llm-api.d.ts +78 -0
  34. package/out/interfaces/llm-api.d.ts.map +1 -0
  35. package/out/interfaces/llm-api.js +2 -0
  36. package/out/interfaces/types.d.ts +10 -0
  37. package/out/interfaces/types.d.ts.map +1 -0
  38. package/out/interfaces/types.js +6 -0
  39. package/out/llm-api.d.ts +10 -0
  40. package/out/llm-api.d.ts.map +1 -0
  41. package/out/llm-api.js +28 -0
  42. package/out/response-mapper.d.ts +4 -0
  43. package/out/response-mapper.d.ts.map +1 -0
  44. package/out/response-mapper.js +9 -0
  45. package/out/text.d.ts +15 -0
  46. package/out/text.d.ts.map +1 -0
  47. package/out/text.js +17 -0
  48. package/out/utils/error-handling.d.ts +6 -0
  49. package/out/utils/error-handling.d.ts.map +1 -0
  50. package/out/utils/error-handling.js +44 -0
  51. package/out/validators.d.ts +4 -0
  52. package/out/validators.d.ts.map +1 -0
  53. package/out/validators.js +90 -0
  54. package/package.json +28 -0
package/LICENSE.txt ADDED
@@ -0,0 +1,7 @@
1
+ Copyright (c) 2025 Atlassian
2
+ Permission is hereby granted to use this software in accordance with the terms
3
+ and conditions outlined in the Atlassian Developer Terms, which can be found
4
+ at the following URL:
5
+ https://developer.atlassian.com/platform/marketplace/atlassian-developer-terms/
6
+ By using this software, you agree to comply with these terms and conditions.
7
+ If you do not agree with these terms, you are not permitted to use this software.
package/README.md ADDED
@@ -0,0 +1,98 @@
1
+ # @forge/llm
2
+
3
+ Library for Forge LLM
4
+
5
+
6
+ ## Usage Example
7
+
8
+
9
+ ### Sending a Prompt
10
+
11
+ ```typescript
12
+ import { chat } from '@forge/llm';
13
+
14
+ const prompt = {
15
+ model: 'claude-sonnet-4-20250514',
16
+ messages: [
17
+ {
18
+ role: 'user',
19
+ content: [
20
+ { type: 'text', text: 'What is the weather like in Melbourne?'}
21
+ ]
22
+ },
23
+ {
24
+ role: 'system',
25
+ content: [
26
+ { type: 'text', text: 'You are a helpful assistant.' }
27
+ ]
28
+ }
29
+ ],
30
+ temperature: 0.7,
31
+ max_completion_tokens: 1000,
32
+ top_p: 0.9,
33
+ tools: [
34
+ {
35
+ type: 'function',
36
+ function: {
37
+ name: 'get_current_weather',
38
+ description: 'Get the current weather in a given location',
39
+ parameters: {
40
+ type: 'object',
41
+ properties: {
42
+ location: {
43
+ type: 'string',
44
+ description: 'The city and state, e.g. San Francisco, CA'
45
+ },
46
+ unit: {
47
+ type: 'string',
48
+ enum: ['celsius', 'fahrenheit']
49
+ }
50
+ },
51
+ required: ['location']
52
+ }
53
+ }
54
+ }
55
+ ],
56
+ tool_choice: 'auto'
57
+ };
58
+
59
+ const response = await chat(prompt);
60
+ ```
61
+
62
+ ### Response Structure
63
+ The response is a `ChatResponse` object:
64
+
65
+ ```json
66
+ {
67
+ "choices": [
68
+ {
69
+ "finish_reason": "tool_use",
70
+ "message": {
71
+ "role": "assistant",
72
+ "content": [
73
+ {
74
+ "type": "text",
75
+ "text": "I'll help you check the weather in Melbourne using the WeatherTool."
76
+ }
77
+ ],
78
+ "tool_calls": [
79
+ {
80
+ "id": "toolu_bdrk_019EF362R6946pMuFhpLHLif",
81
+ "type": "function",
82
+ "function": {
83
+ "name": "get_current_weather",
84
+ "arguments": {
85
+ "location": "San Francisco, CA",
86
+ "unit": "celsius"
87
+ }
88
+ }
89
+ }
90
+ ]
91
+ }
92
+ }
93
+ ]
94
+ }
95
+ ```
96
+
97
+ ### Errors
98
+ The SDK throws validation and Forge Errors.
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=error-handling.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"error-handling.test.d.ts","sourceRoot":"","sources":["../../src/__test__/error-handling.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const errors_1 = require("../errors");
4
+ const error_handling_1 = require("../utils/error-handling");
5
+ describe('Error Handling', () => {
6
+ it('should handle a 400 response and return a ForgeLlmAPIError', async () => {
7
+ const code = 'UNSUPPORTED_AI_MODEL';
8
+ const message = 'Requested claude-3-5-sonnet-v2@20241022 not available for vendor: Claude';
9
+ const traceId = 'trace-123';
10
+ const apiResponse = new Response(JSON.stringify({
11
+ code: code,
12
+ message: message
13
+ }), {
14
+ status: 400,
15
+ statusText: 'Bad Request',
16
+ headers: {
17
+ 'x-trace-id': traceId,
18
+ 'Forge-Proxy-Error': 'UNSUPPORTED_AI_MODEL'
19
+ }
20
+ });
21
+ await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
22
+ status: 400,
23
+ statusText: 'Bad Request',
24
+ traceId
25
+ }, { code, message }));
26
+ });
27
+ it('should handle an Unknown error, that is a Forge error, with empty body and return a ForgeLlmAPIError', async () => {
28
+ const code = 'UNKNOWN_ERROR';
29
+ const traceId = 'trace-123';
30
+ const apiResponse = new Response(undefined, {
31
+ status: 500,
32
+ statusText: 'Internal Server Error',
33
+ headers: {
34
+ 'x-trace-id': traceId
35
+ }
36
+ });
37
+ await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
38
+ status: 500,
39
+ statusText: 'Internal Server Error',
40
+ traceId
41
+ }, { code, message: 'Unexpected error in Forge LLM API', context: { responseText: '' } }));
42
+ });
43
+ it('should handle an Unknown error response, that is not a Forge error, with a json body and return a ForgeLlmAPIError', async () => {
44
+ const code = 'UNKNOWN_ERROR';
45
+ const body = JSON.stringify({
46
+ unknownError: 'Unexpected error with a non-json body'
47
+ });
48
+ const traceId = 'trace-123';
49
+ const apiResponse = new Response(body, {
50
+ status: 500,
51
+ statusText: 'Internal Server Error',
52
+ headers: {
53
+ 'x-trace-id': traceId
54
+ }
55
+ });
56
+ await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
57
+ status: 500,
58
+ statusText: 'Internal Server Error',
59
+ traceId
60
+ }, {
61
+ message: 'Unexpected error in Forge LLM API',
62
+ code: code,
63
+ context: {
64
+ responseText: body
65
+ }
66
+ }));
67
+ });
68
+ it('should include context fields in the error when provided by the API', async () => {
69
+ const code = 'QUOTA_EXCEEDED';
70
+ const message = 'You have exceeded your quota';
71
+ const context = { quota: '1000', usage: '1500' };
72
+ const traceId = 'trace-123';
73
+ const apiResponse = new Response(JSON.stringify({ code, message, context }), {
74
+ status: 500,
75
+ statusText: 'Internal Server Error',
76
+ headers: {
77
+ 'x-trace-id': traceId
78
+ }
79
+ });
80
+ await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
81
+ status: 500,
82
+ statusText: 'Internal Server Error',
83
+ traceId
84
+ }, { code, message, context }));
85
+ });
86
+ it('should include any extra fields in the error context when provided by the API', async () => {
87
+ const code = 'QUOTA_EXCEEDED';
88
+ const message = 'You have exceeded your quota';
89
+ const context = { quota: '1000', usage: '1500' };
90
+ const extraFields = { retryAfter: 30, plan: 'pro' };
91
+ const traceId = 'trace-123';
92
+ const apiResponse = new Response(JSON.stringify({ code, message, context, ...extraFields }), {
93
+ status: 500,
94
+ statusText: 'Internal Server Error',
95
+ headers: {
96
+ 'x-trace-id': traceId
97
+ }
98
+ });
99
+ await expect(async () => (0, error_handling_1.checkResponseError)(apiResponse)).rejects.toMatchError(new errors_1.ForgeLlmAPIError({
100
+ status: 500,
101
+ statusText: 'Internal Server Error',
102
+ traceId
103
+ }, { code, message, context, ...extraFields }));
104
+ });
105
+ });
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=fetch-wrapper-test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"fetch-wrapper-test.d.ts","sourceRoot":"","sources":["../../src/__test__/fetch-wrapper-test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,14 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const fetch_wrapper_1 = require("../fetch-wrapper");
4
+ describe('FetchWrapper', () => {
5
+ it('should construct the args correctly', async () => {
6
+ const mockFetch = jest.fn().mockResolvedValue({ status: 200 });
7
+ global.__forge_fetch__ = mockFetch;
8
+ const fetchWrapper = (0, fetch_wrapper_1.getFetchWrapper)();
9
+ await fetchWrapper('https://llm/model-id', { method: 'POST' });
10
+ expect(mockFetch).toHaveBeenCalledTimes(1);
11
+ const [firstArg] = mockFetch.mock.calls[0];
12
+ expect(firstArg).toEqual({ type: 'llm', model: 'model-id' });
13
+ });
14
+ });
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=index.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.test.d.ts","sourceRoot":"","sources":["../../src/__test__/index.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,37 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const index_1 = require("../index");
4
+ const test_helpers_1 = require("./test-helpers");
5
+ const llm_api_1 = require("../llm-api");
6
+ const errors_1 = require("../errors");
7
+ jest.mock('../../src/fetch-wrapper', () => ({
8
+ getFetchWrapper: jest.fn(() => 'FAKE_FETCH_WRAPPER')
9
+ }));
10
+ jest.mock('../../src/llm-api', () => {
11
+ return {
12
+ LlmApiImpl: jest.fn().mockImplementation(() => ({
13
+ chat: jest.fn(async (...promptArgs) => ({ inputArgs: promptArgs }))
14
+ }))
15
+ };
16
+ });
17
+ describe('Chat', () => {
18
+ it('should call LlmApiImpl.chat with provided prompt payload', async () => {
19
+ const promptPayload = (0, test_helpers_1.constructPrompt)();
20
+ const result = await (0, index_1.chat)(promptPayload);
21
+ const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
22
+ expect(llmApi.chat).toHaveBeenCalledWith(promptPayload);
23
+ expect(result).toEqual({ inputArgs: [promptPayload] });
24
+ });
25
+ it('should propagate Forge errors returned', async () => {
26
+ const prompt = (0, test_helpers_1.constructPrompt)();
27
+ const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
28
+ llmApi.chat.mockRejectedValueOnce(new errors_1.ForgeLlmAPIError({ status: 400, statusText: 'bad request' }, { code: 'INVALID_BODY', message: 'Bad request' }));
29
+ await expect((0, index_1.chat)(prompt)).rejects.toThrow(errors_1.ForgeLlmAPIError);
30
+ });
31
+ it('should propagate validation errors', async () => {
32
+ const prompt = 'invalid_prompt';
33
+ const llmApi = llm_api_1.LlmApiImpl.mock.results[0].value;
34
+ llmApi.chat.mockRejectedValueOnce(new errors_1.PromptValidationError('The payload is invalid: Prompt is required and must be a valid object.'));
35
+ await expect((0, index_1.chat)(prompt)).rejects.toThrow(errors_1.PromptValidationError);
36
+ });
37
+ });
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=llm-api.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-api.test.d.ts","sourceRoot":"","sources":["../../src/__test__/llm-api.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,112 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const fetch_wrapper_1 = require("../fetch-wrapper");
4
+ const llm_api_1 = require("../llm-api");
5
+ const errors_1 = require("../errors");
6
+ const test_helpers_1 = require("./test-helpers");
7
+ function setupEnvironment(response) {
8
+ const mockedFetch = jest.fn().mockResolvedValue(response || new Response(null, { status: 200 }));
9
+ global.__forge_fetch__ = jest.fn((_ctx, path, options) => mockedFetch(path, options));
10
+ }
11
+ describe('llm api', () => {
12
+ it('should make ForgeLlm request', async () => {
13
+ const llmResponse = (0, test_helpers_1.constructLlmResponse)();
14
+ const apiResponse = new Response(JSON.stringify(llmResponse), {
15
+ status: 200
16
+ });
17
+ const mockedFetch = jest.fn().mockResolvedValue(apiResponse);
18
+ global.__forge_fetch__ = jest.fn((_ctx, path, options) => mockedFetch(path, options));
19
+ const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
20
+ const promptBody = {
21
+ model: 'claude-sonnet-4-20250514',
22
+ messages: [
23
+ { role: 'user', content: [{ type: 'text', text: 'Hello, how are you?' }] },
24
+ { role: 'system', content: [{ type: 'text', text: 'You are a creative agent' }] }
25
+ ],
26
+ temperature: 0.7,
27
+ max_completion_tokens: 1000,
28
+ top_p: 0.9
29
+ };
30
+ const prompt = (0, test_helpers_1.constructPrompt)(promptBody);
31
+ await llmAPI.chat(prompt);
32
+ const [[path, options]] = mockedFetch.mock.calls;
33
+ expect(path).toBe('https://llm/claude-sonnet-4-20250514');
34
+ expect(options.method).toBe('POST');
35
+ expect(options.headers).toEqual({ 'Content-Type': 'application/json' });
36
+ expect(JSON.parse(options.body)).toEqual({
37
+ messages: [
38
+ {
39
+ role: 'user',
40
+ content: [
41
+ {
42
+ type: 'text',
43
+ text: 'Hello, how are you?'
44
+ }
45
+ ]
46
+ },
47
+ {
48
+ role: 'system',
49
+ content: [
50
+ {
51
+ type: 'text',
52
+ text: 'You are a creative agent'
53
+ }
54
+ ]
55
+ }
56
+ ],
57
+ temperature: 0.7,
58
+ max_completion_tokens: 1000,
59
+ top_p: 0.9
60
+ });
61
+ });
62
+ it('should throw a validation error when prompt is invalid', async () => {
63
+ const promptWithEmptyMessages = (0, test_helpers_1.constructPrompt)({ messages: [] });
64
+ const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
65
+ await expect(llmAPI.chat(promptWithEmptyMessages)).rejects.toThrow(errors_1.PromptValidationError);
66
+ });
67
+ it('should return a ChatResponse for a 200 success response', async () => {
68
+ const llmResponse = (0, test_helpers_1.constructLlmResponse)({
69
+ finish_reason: 'tool_calls',
70
+ index: 19,
71
+ message: {
72
+ role: 'assistant',
73
+ content: [
74
+ {
75
+ type: 'text',
76
+ text: "It's 20 degrees celsius"
77
+ }
78
+ ]
79
+ }
80
+ });
81
+ const apiResponse = new Response(JSON.stringify(llmResponse), {
82
+ status: 200
83
+ });
84
+ setupEnvironment(apiResponse);
85
+ const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
86
+ const prompt = (0, test_helpers_1.constructPrompt)();
87
+ const response = await llmAPI.chat(prompt);
88
+ expect(response.choices[0]).toEqual({
89
+ finish_reason: 'tool_calls',
90
+ message: {
91
+ content: [
92
+ {
93
+ type: 'text',
94
+ text: "It's 20 degrees celsius"
95
+ }
96
+ ],
97
+ role: 'assistant',
98
+ tool_calls: undefined
99
+ },
100
+ index: 19
101
+ });
102
+ });
103
+ it('should throw an error for a non-200 response', async () => {
104
+ const apiResponse = new Response(JSON.stringify({}), {
105
+ status: 400
106
+ });
107
+ setupEnvironment(apiResponse);
108
+ const llmAPI = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
109
+ const prompt = (0, test_helpers_1.constructPrompt)();
110
+ await expect(llmAPI.chat(prompt)).rejects.toThrow(errors_1.ForgeLlmAPIError);
111
+ });
112
+ });
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=response-mapper.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"response-mapper.test.d.ts","sourceRoot":"","sources":["../../src/__test__/response-mapper.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,45 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const response_mapper_1 = require("../response-mapper");
4
+ const test_helpers_1 = require("./test-helpers");
5
+ describe('response mapper', () => {
6
+ it('should map the Forge LLM response to a user response (choices empty)', async () => {
7
+ const llmResponseBody = { choices: [] };
8
+ const apiResponse = (0, test_helpers_1.makeApiResponse)({ json: () => Promise.resolve(llmResponseBody) });
9
+ const mappedResponse = await (0, response_mapper_1.mapForgeLLMResponse)(apiResponse);
10
+ expect(mappedResponse).toEqual({ choices: [] });
11
+ });
12
+ it('should map the Forge LLM response', async () => {
13
+ const llmResponseBody = {
14
+ choices: [
15
+ {
16
+ finish_reason: 'tool_calls',
17
+ index: 19,
18
+ message: {
19
+ role: 'assistant',
20
+ content: [
21
+ { type: 'text', text: 'It is warm outside' },
22
+ { type: 'text', text: "It's 20 degrees celsius" }
23
+ ],
24
+ tool_calls: [
25
+ {
26
+ id: 'tool-call-123',
27
+ type: 'function',
28
+ function: {
29
+ name: 'WeatherTool',
30
+ arguments: {
31
+ location: 'San Francisco, CA',
32
+ unit: 'celsius'
33
+ }
34
+ }
35
+ }
36
+ ]
37
+ }
38
+ }
39
+ ]
40
+ };
41
+ const apiResponse = (0, test_helpers_1.makeApiResponse)({ json: () => Promise.resolve(llmResponseBody) });
42
+ const mappedResponse = await (0, response_mapper_1.mapForgeLLMResponse)(apiResponse);
43
+ expect(mappedResponse).toEqual({ choices: llmResponseBody.choices });
44
+ });
45
+ });
@@ -0,0 +1,19 @@
1
+ import { Prompt } from '../interfaces/types';
2
+ import { APIResponse } from '@forge/api';
3
+ import { Choice } from '../interfaces/llm-api';
4
+ export declare const constructPrompt: (overrides?: {}) => Prompt;
5
+ export declare const makeApiResponse: (overrides: Partial<APIResponse>) => APIResponse;
6
+ export declare const constructLlmResponse: (overrides?: Partial<Choice>) => {
7
+ choices: {
8
+ finish_reason: string;
9
+ index: number;
10
+ message: import("../interfaces/llm-api").AssistantMessage | {
11
+ role: string;
12
+ content: {
13
+ type: string;
14
+ text: string;
15
+ }[];
16
+ };
17
+ }[];
18
+ };
19
+ //# sourceMappingURL=test-helpers.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"test-helpers.d.ts","sourceRoot":"","sources":["../../src/__test__/test-helpers.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,qBAAqB,CAAC;AAC7C,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AACzC,OAAO,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAE/C,eAAO,MAAM,eAAe,sBAAqB,MAU/C,CAAC;AAEH,eAAO,MAAM,eAAe,cAAe,QAAQ,WAAW,CAAC,KAAG,WAWjE,CAAC;AAEF,eAAO,MAAM,oBAAoB,eAAe,QAAQ,MAAM,CAAC;;;;;;;;;;;;CAiB7D,CAAC"}
@@ -0,0 +1,47 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.constructLlmResponse = exports.makeApiResponse = exports.constructPrompt = void 0;
4
+ const constructPrompt = (overrides = {}) => ({
5
+ model: 'claude-sonnet-4-20250514',
6
+ messages: [
7
+ { role: 'user', content: [{ type: 'text', text: 'Hello, how are you?' }] },
8
+ { role: 'system', content: [{ type: 'text', text: 'You are a creative agent' }] }
9
+ ],
10
+ temperature: 0.7,
11
+ max_completion_tokens: 1000,
12
+ top_p: 0.9,
13
+ ...overrides
14
+ });
15
+ exports.constructPrompt = constructPrompt;
16
+ const makeApiResponse = (overrides) => {
17
+ return {
18
+ status: 200,
19
+ statusText: 'OK',
20
+ headers: new Headers({
21
+ 'Content-Type': 'application/json'
22
+ }),
23
+ json: async () => Promise.resolve({}),
24
+ text: async () => JSON.stringify({}),
25
+ ...overrides
26
+ };
27
+ };
28
+ exports.makeApiResponse = makeApiResponse;
29
+ const constructLlmResponse = (overrides = {}) => ({
30
+ choices: [
31
+ {
32
+ finish_reason: 'tool_calls',
33
+ message: {
34
+ role: 'assistant',
35
+ content: [
36
+ {
37
+ type: 'text',
38
+ text: "It's 20 degrees celsius"
39
+ }
40
+ ]
41
+ },
42
+ index: 19,
43
+ ...overrides
44
+ }
45
+ ]
46
+ });
47
+ exports.constructLlmResponse = constructLlmResponse;
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=validators.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"validators.test.d.ts","sourceRoot":"","sources":["../../src/__test__/validators.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,95 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const validators_1 = require("../validators");
4
+ const errors_1 = require("../errors");
5
+ const test_helpers_1 = require("./test-helpers");
6
+ describe('Validators', () => {
7
+ describe('required fields', () => {
8
+ it('should validate a correct prompt', () => {
9
+ const validPrompt = (0, test_helpers_1.constructPrompt)();
10
+ expect(() => (0, validators_1.validatePrompt)(validPrompt)).not.toThrow();
11
+ });
12
+ it('should fail if no prompt is provided', () => {
13
+ const emptyPrompt = undefined;
14
+ expect(() => (0, validators_1.validatePrompt)(emptyPrompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Prompt is required and must be a valid object.'));
15
+ });
16
+ describe('model validation', () => {
17
+ it('should throw an error for a missing model', () => {
18
+ const prompt = (0, test_helpers_1.constructPrompt)({ model: undefined });
19
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Model is required.'));
20
+ });
21
+ });
22
+ describe('messages validation', () => {
23
+ it('should throw an error when no messages are provided', () => {
24
+ const prompt = (0, test_helpers_1.constructPrompt)({ messages: undefined });
25
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: No messages were provided. Please provide at least one message.'));
26
+ });
27
+ it('should throw an error when a message body is missing the role field', () => {
28
+ const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ content: 'Hello' }] });
29
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid role at index 0: . Role must be present and one of [system, user, assistant, tool].'));
30
+ });
31
+ it('should throw an error when a message body has an invalid role type', () => {
32
+ const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ content: 'Hello', role: 'unknown-role' }] });
33
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid role at index 0: unknown-role. Role must be present and one of [system, user, assistant, tool].'));
34
+ });
35
+ it('should throw an error when a message body is missing the content field', () => {
36
+ const prompt = (0, test_helpers_1.constructPrompt)({ messages: [{ role: 'user' }] });
37
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Message content at index 0 must be a non-empty string.'));
38
+ });
39
+ });
40
+ });
41
+ describe('optional fields', () => {
42
+ describe('temperature validation', () => {
43
+ it('should throw an error for a temperature under 0', () => {
44
+ const prompt = (0, test_helpers_1.constructPrompt)({ temperature: -1 });
45
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature -1: Temperature must be between 0 and 1.'));
46
+ });
47
+ it('should throw an error for a temperature above 1', () => {
48
+ const prompt = (0, test_helpers_1.constructPrompt)({ temperature: 2 });
49
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature 2: Temperature must be between 0 and 1.'));
50
+ });
51
+ it('should not throw an error for a non-numeric temperature', () => {
52
+ const prompt = (0, test_helpers_1.constructPrompt)({ temperature: undefined });
53
+ expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
54
+ });
55
+ it('should throw an error for a non-finite temperature', () => {
56
+ const prompt = (0, test_helpers_1.constructPrompt)({ temperature: 'NaN' });
57
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError('The payload is invalid: Invalid temperature NaN: Temperature must be between 0 and 1.'));
58
+ });
59
+ });
60
+ describe('max_completion_tokens validation', () => {
61
+ const invalidValues = [NaN, 0, -5, 10.5];
62
+ invalidValues.forEach((value) => {
63
+ it(`should throw an error for invalid max_completion_tokens value ${value}`, () => {
64
+ const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: value });
65
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError(`The payload is invalid: Invalid max_completion_tokens ${value}: max_completion_tokens must be a positive integer.`));
66
+ });
67
+ });
68
+ it('should not throw when max_completion_tokens is undefined', () => {
69
+ const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: undefined });
70
+ expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
71
+ });
72
+ it('should not throw when max_completion_tokens is a valid positive integer', () => {
73
+ const prompt = (0, test_helpers_1.constructPrompt)({ max_completion_tokens: 256 });
74
+ expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
75
+ });
76
+ });
77
+ describe('top_p validation', () => {
78
+ it('should not throw an error when top_p is not provided', () => {
79
+ const prompt = (0, test_helpers_1.constructPrompt)({ top_p: undefined });
80
+ expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
81
+ });
82
+ it('should not throw an error for a valid top_p', () => {
83
+ const prompt = (0, test_helpers_1.constructPrompt)({ top_p: 0.9 });
84
+ expect(() => (0, validators_1.validatePrompt)(prompt)).not.toThrow();
85
+ });
86
+ const invalidTopPValues = [-0.1, 1.5, NaN, 'string'];
87
+ invalidTopPValues.forEach((value) => {
88
+ it(`should throw an error for invalid top_p value ${value}`, () => {
89
+ const prompt = (0, test_helpers_1.constructPrompt)({ top_p: value });
90
+ expect(() => (0, validators_1.validatePrompt)(prompt)).toMatchError(new errors_1.PromptValidationError(`The payload is invalid: Invalid top_p ${value}: top_p must be between 0 and 1.`));
91
+ });
92
+ });
93
+ });
94
+ });
95
+ });
@@ -0,0 +1,28 @@
1
+ export declare const errorCodes: {
2
+ readonly FORGE_LLM_API_ERROR: "FORGE_API_ERROR";
3
+ readonly UNKNOWN_ERROR: "UNKNOWN_ERROR";
4
+ };
5
+ export interface ForgeError {
6
+ code: string;
7
+ message: string;
8
+ context?: Record<string, unknown>;
9
+ }
10
+ export declare class ForgeLlmError extends Error {
11
+ constructor(message: string);
12
+ }
13
+ export declare class PromptValidationError extends ForgeLlmError {
14
+ constructor(message: string);
15
+ }
16
+ export interface APIErrorResponseDetails {
17
+ status: number;
18
+ statusText: string;
19
+ traceId?: string | null;
20
+ }
21
+ export declare class ForgeLlmAPIError extends ForgeLlmError {
22
+ responseDetails: APIErrorResponseDetails;
23
+ code: string;
24
+ message: string;
25
+ context: Record<string, unknown>;
26
+ constructor(responseDetails: APIErrorResponseDetails, forgeError: ForgeError);
27
+ }
28
+ //# sourceMappingURL=errors.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"errors.d.ts","sourceRoot":"","sources":["../src/errors.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU;;;CAGb,CAAC;AAEX,MAAM,WAAW,UAAU;IACzB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACnC;AAED,qBAAa,aAAc,SAAQ,KAAK;gBAC1B,OAAO,EAAE,MAAM;CAI5B;AAED,qBAAa,qBAAsB,SAAQ,aAAa;gBAC1C,OAAO,EAAE,MAAM;CAI5B;AAED,MAAM,WAAW,uBAAuB;IACtC,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;CACzB;AAED,qBAAa,gBAAiB,SAAQ,aAAa;IACjD,eAAe,EAAE,uBAAuB,CAAC;IACzC,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;gBAErB,eAAe,EAAE,uBAAuB,EAAE,UAAU,EAAE,UAAU;CAU7E"}
package/out/errors.js ADDED
@@ -0,0 +1,37 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ForgeLlmAPIError = exports.PromptValidationError = exports.ForgeLlmError = exports.errorCodes = void 0;
4
+ exports.errorCodes = {
5
+ FORGE_LLM_API_ERROR: 'FORGE_API_ERROR',
6
+ UNKNOWN_ERROR: 'UNKNOWN_ERROR'
7
+ };
8
+ class ForgeLlmError extends Error {
9
+ constructor(message) {
10
+ super(message);
11
+ this.name = 'ForgeLlmError';
12
+ }
13
+ }
14
+ exports.ForgeLlmError = ForgeLlmError;
15
+ class PromptValidationError extends ForgeLlmError {
16
+ constructor(message) {
17
+ super(message);
18
+ this.name = 'PromptValidationError';
19
+ }
20
+ }
21
+ exports.PromptValidationError = PromptValidationError;
22
+ class ForgeLlmAPIError extends ForgeLlmError {
23
+ responseDetails;
24
+ code;
25
+ message;
26
+ context;
27
+ constructor(responseDetails, forgeError) {
28
+ super(forgeError.message);
29
+ const { status, statusText, traceId } = responseDetails;
30
+ this.responseDetails = { status, statusText, traceId };
31
+ const { code, message, context, ...bodyData } = forgeError;
32
+ this.code = code || exports.errorCodes.FORGE_LLM_API_ERROR;
33
+ this.message = message;
34
+ this.context = { ...context, ...bodyData };
35
+ }
36
+ }
37
+ exports.ForgeLlmAPIError = ForgeLlmAPIError;
@@ -0,0 +1,3 @@
1
+ import { FetchMethod } from '@forge/api';
2
+ export declare function getFetchWrapper(): FetchMethod;
3
+ //# sourceMappingURL=fetch-wrapper.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"fetch-wrapper.d.ts","sourceRoot":"","sources":["../src/fetch-wrapper.ts"],"names":[],"mappings":"AAAA,OAAO,EAAe,WAAW,EAAe,MAAM,YAAY,CAAC;AAEnE,wBAAgB,eAAe,IAAI,WAAW,CAW7C"}
@@ -0,0 +1,16 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getFetchWrapper = void 0;
4
+ function getFetchWrapper() {
5
+ return async function (path, options) {
6
+ const model = path?.split('/').pop();
7
+ return await global.__forge_fetch__({ type: 'llm', model: model }, path, {
8
+ ...options,
9
+ headers: {
10
+ ...options?.headers,
11
+ 'Content-Type': 'application/json'
12
+ }
13
+ });
14
+ };
15
+ }
16
+ exports.getFetchWrapper = getFetchWrapper;
package/out/index.d.ts ADDED
@@ -0,0 +1,4 @@
1
+ export * from './interfaces/types';
2
+ export declare const chat: (prompt: import("./interfaces/types").Prompt) => Promise<import("./interfaces/llm-api").LlmResponse>;
3
+ export default chat;
4
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,cAAc,oBAAoB,CAAC;AAInC,eAAO,MAAM,IAAI,sGAAoE,CAAC;AAEtF,eAAe,IAAI,CAAC"}
package/out/index.js ADDED
@@ -0,0 +1,11 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.chat = void 0;
4
+ const tslib_1 = require("tslib");
5
+ const llm_api_1 = require("./llm-api");
6
+ const fetch_wrapper_1 = require("./fetch-wrapper");
7
+ tslib_1.__exportStar(require("./interfaces/types"), exports);
8
+ const llmApi = new llm_api_1.LlmApiImpl((0, fetch_wrapper_1.getFetchWrapper)());
9
+ const chat = (...args) => llmApi.chat(...args);
10
+ exports.chat = chat;
11
+ exports.default = exports.chat;
@@ -0,0 +1,78 @@
1
+ import { ChatResponse, Prompt } from './types';
2
+ export interface LlmApi {
3
+ chat(prompt: Prompt): Promise<ChatResponse>;
4
+ }
5
+ export interface LlmRequest {
6
+ messages: Message[];
7
+ temperature?: number;
8
+ max_completion_tokens?: number;
9
+ top_p?: number;
10
+ tools?: Tool[] | undefined;
11
+ tool_choice?: ToolChoice;
12
+ }
13
+ export declare type Message = SystemMessage | UserMessage | AssistantMessage | ToolMessage;
14
+ export interface Tool {
15
+ type: 'function';
16
+ function: {
17
+ name: string;
18
+ description: string;
19
+ parameters: Record<string, unknown>;
20
+ };
21
+ }
22
+ export declare type ToolChoice = 'auto' | 'none' | 'required' | {
23
+ type: 'function';
24
+ function: {
25
+ name: string;
26
+ };
27
+ };
28
+ export interface LlmResponseTool {
29
+ id: string;
30
+ type: 'function';
31
+ function: {
32
+ name: string;
33
+ arguments: Record<string, unknown>;
34
+ };
35
+ }
36
+ export interface SystemMessage {
37
+ content: Content;
38
+ role: 'system';
39
+ }
40
+ export interface UserMessage {
41
+ content: Content;
42
+ role: 'user';
43
+ }
44
+ export interface ToolMessage {
45
+ content: Content;
46
+ role: 'tool';
47
+ tool_call_id?: string;
48
+ name?: string;
49
+ }
50
+ export interface AssistantMessage {
51
+ content: Content;
52
+ role: 'assistant';
53
+ tool_calls?: ToolCall[];
54
+ }
55
+ export interface ToolCall {
56
+ id: string;
57
+ type: 'function';
58
+ index: number;
59
+ function: {
60
+ name: string;
61
+ arguments: object;
62
+ };
63
+ }
64
+ export interface LlmResponse {
65
+ choices: Choice[];
66
+ }
67
+ export interface Choice {
68
+ finish_reason: string;
69
+ index?: number;
70
+ message: AssistantMessage;
71
+ }
72
+ export declare type Content = string | ContentPart[];
73
+ export declare type ContentPart = TextPart;
74
+ export interface TextPart {
75
+ type: 'text';
76
+ text: string;
77
+ }
78
+ //# sourceMappingURL=llm-api.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-api.d.ts","sourceRoot":"","sources":["../../src/interfaces/llm-api.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,EAAE,MAAM,SAAS,CAAC;AAE/C,MAAM,WAAW,MAAM;IACrB,IAAI,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;CAC7C;AAED,MAAM,WAAW,UAAU;IACzB,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,qBAAqB,CAAC,EAAE,MAAM,CAAC;IAC/B,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,KAAK,CAAC,EAAE,IAAI,EAAE,GAAG,SAAS,CAAC;IAC3B,WAAW,CAAC,EAAE,UAAU,CAAC;CAC1B;AAED,oBAAY,OAAO,GAAG,aAAa,GAAG,WAAW,GAAG,gBAAgB,GAAG,WAAW,CAAC;AAEnF,MAAM,WAAW,IAAI;IACnB,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;KACrC,CAAC;CACH;AAED,oBAAY,UAAU,GAClB,MAAM,GACN,MAAM,GACN,UAAU,GACV;IACE,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;KACd,CAAC;CACH,CAAC;AAEN,MAAM,WAAW,eAAe;IAC9B,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,UAAU,CAAC;IACjB,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;KACpC,CAAC;CACH;AAED,MAAM,WAAW,aAAa;IAC5B,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,QAAQ,CAAC;CAChB;AAED,MAAM,WAAW,WAAW;IAC1B,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;CACd;AACD,MAAM,WAAW,WAAW;IAC1B,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB,IAAI,CAAC,EAAE,MAAM,CAAC;CACf;AAED,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,EAAE,WAAW,CAAC;IAClB,UAAU,CAAC,EAAE,QAAQ,EAAE,CAAC;CACzB;AAED,MAAM,WAAW,QAAQ;IACvB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,UAAU,CAAC;IACjB,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;CACH;AAED,MAAM,WAAW,WAAW;IAC1B,OAAO,EAAE,MAAM,EAAE,CAAC;CACnB;AAED,MAAM,WAAW,MAAM;IACrB,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,gBAAgB,CAAC;CAC3B;AAED,oBAAY,OAAO,GAAG,MAAM,GAAG,WAAW,EAAE,CAAC;AAE7C,oBAAY,WAAW,GAAG,QAAQ,CAAC;AAEnC,MAAM,WAAW,QAAQ;IACvB,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;CACd"}
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,10 @@
1
+ import { ForgeLlmError, PromptValidationError } from '../errors';
2
+ import { LlmRequest, LlmResponse, Message, Tool, ToolCall } from './llm-api';
3
+ export declare type Prompt = LlmRequest & {
4
+ model: string;
5
+ };
6
+ export declare type ChatResponse = LlmResponse;
7
+ export declare type ToolSchema = Tool;
8
+ export type { Message, ToolCall };
9
+ export { ForgeLlmError, PromptValidationError };
10
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/interfaces/types.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,aAAa,EAAE,qBAAqB,EAAE,MAAM,WAAW,CAAC;AACjE,OAAO,EAAE,UAAU,EAAE,WAAW,EAAE,OAAO,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,WAAW,CAAC;AAE7E,oBAAY,MAAM,GAAG,UAAU,GAAG;IAChC,KAAK,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,oBAAY,YAAY,GAAG,WAAW,CAAC;AAEvC,oBAAY,UAAU,GAAG,IAAI,CAAC;AAE9B,YAAY,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC;AAElC,OAAO,EAAE,aAAa,EAAE,qBAAqB,EAAE,CAAC"}
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.PromptValidationError = exports.ForgeLlmError = void 0;
4
+ const errors_1 = require("../errors");
5
+ Object.defineProperty(exports, "ForgeLlmError", { enumerable: true, get: function () { return errors_1.ForgeLlmError; } });
6
+ Object.defineProperty(exports, "PromptValidationError", { enumerable: true, get: function () { return errors_1.PromptValidationError; } });
@@ -0,0 +1,10 @@
1
+ import { FetchMethod } from '@forge/api';
2
+ import { ChatResponse, Prompt } from './interfaces/types';
3
+ import { LlmApi } from './interfaces/llm-api';
4
+ export declare class LlmApiImpl implements LlmApi {
5
+ private readonly apiClient;
6
+ constructor(apiClient: FetchMethod);
7
+ chat: (prompt: Prompt) => Promise<ChatResponse>;
8
+ private buildForgeLlmUrl;
9
+ }
10
+ //# sourceMappingURL=llm-api.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-api.d.ts","sourceRoot":"","sources":["../src/llm-api.ts"],"names":[],"mappings":"AAAA,OAAO,EAAe,WAAW,EAAE,MAAM,YAAY,CAAC;AACtD,OAAO,EAAE,YAAY,EAAE,MAAM,EAAE,MAAM,oBAAoB,CAAC;AAE1D,OAAO,EAAE,MAAM,EAAE,MAAM,sBAAsB,CAAC;AAI9C,qBAAa,UAAW,YAAW,MAAM;IAC3B,OAAO,CAAC,QAAQ,CAAC,SAAS;gBAAT,SAAS,EAAE,WAAW;IAEnD,IAAI,WAAkB,MAAM,KAAG,QAAQ,YAAY,CAAC,CAelD;IAEF,OAAO,CAAC,gBAAgB;CAIzB"}
package/out/llm-api.js ADDED
@@ -0,0 +1,28 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.LlmApiImpl = void 0;
4
+ const validators_1 = require("./validators");
5
+ const response_mapper_1 = require("./response-mapper");
6
+ const error_handling_1 = require("./utils/error-handling");
7
+ class LlmApiImpl {
8
+ apiClient;
9
+ constructor(apiClient) {
10
+ this.apiClient = apiClient;
11
+ }
12
+ chat = async (prompt) => {
13
+ (0, validators_1.validatePrompt)(prompt);
14
+ const { model, ...request } = prompt;
15
+ const forgeLlmUrl = this.buildForgeLlmUrl(model);
16
+ const response = await this.apiClient(forgeLlmUrl, {
17
+ method: 'POST',
18
+ body: JSON.stringify(request)
19
+ });
20
+ await (0, error_handling_1.checkResponseError)(response);
21
+ return await (0, response_mapper_1.mapForgeLLMResponse)(response);
22
+ };
23
+ buildForgeLlmUrl(model) {
24
+ const baseUrl = 'https://llm';
25
+ return `${baseUrl}/${encodeURIComponent(model)}`;
26
+ }
27
+ }
28
+ exports.LlmApiImpl = LlmApiImpl;
@@ -0,0 +1,4 @@
1
+ import { APIResponse } from '@forge/api';
2
+ import { ChatResponse } from './interfaces/types';
3
+ export declare const mapForgeLLMResponse: (response: APIResponse) => Promise<ChatResponse>;
4
+ //# sourceMappingURL=response-mapper.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"response-mapper.d.ts","sourceRoot":"","sources":["../src/response-mapper.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AACzC,OAAO,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAGlD,eAAO,MAAM,mBAAmB,aAAoB,WAAW,KAAG,QAAQ,YAAY,CAIrF,CAAC"}
@@ -0,0 +1,9 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.mapForgeLLMResponse = void 0;
4
+ const mapForgeLLMResponse = async (response) => {
5
+ const jsonBody = await response.json();
6
+ const choices = jsonBody.choices ?? [];
7
+ return { choices };
8
+ };
9
+ exports.mapForgeLLMResponse = mapForgeLLMResponse;
package/out/text.d.ts ADDED
@@ -0,0 +1,15 @@
1
+ export declare const Text: {
2
+ error: {
3
+ missingModel: string;
4
+ invalidTemperature: (temp: number) => string;
5
+ invalidTopP: (topP: number) => string;
6
+ invalidMaxCompletionTokens: (maxCompletionTokens: number) => string;
7
+ invalidRole: (index: number, role: string | undefined, expectedRoles: string) => string;
8
+ emptyMessageContent: (index: number) => string;
9
+ promptRequired: string;
10
+ noMessagesProvided: string;
11
+ invalidPayload: string;
12
+ unKnownError: string;
13
+ };
14
+ };
15
+ //# sourceMappingURL=text.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"text.d.ts","sourceRoot":"","sources":["../src/text.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,IAAI;;;mCAGc,MAAM,KAAG,MAAM;4BACtB,MAAM,KAAG,MAAM;0DACe,MAAM,KAAG,MAAM;6BAE5C,MAAM,QAAQ,MAAM,GAAG,SAAS,iBAAiB,MAAM,KAAG,MAAM;qCAExD,MAAM,KAAG,MAAM;;;;;;CAM/C,CAAC"}
package/out/text.js ADDED
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Text = void 0;
4
+ exports.Text = {
5
+ error: {
6
+ missingModel: `Model is required.`,
7
+ invalidTemperature: (temp) => `Invalid temperature ${temp}: Temperature must be between 0 and 1.`,
8
+ invalidTopP: (topP) => `Invalid top_p ${topP}: top_p must be between 0 and 1.`,
9
+ invalidMaxCompletionTokens: (maxCompletionTokens) => `Invalid max_completion_tokens ${maxCompletionTokens}: max_completion_tokens must be a positive integer.`,
10
+ invalidRole: (index, role, expectedRoles) => `Invalid role at index ${index}: ${role || ''}. Role must be present and one of [${expectedRoles}].`,
11
+ emptyMessageContent: (index) => `Message content at index ${index} must be a non-empty string.`,
12
+ promptRequired: 'Prompt is required and must be a valid object.',
13
+ noMessagesProvided: 'No messages were provided. Please provide at least one message.',
14
+ invalidPayload: 'The payload is invalid:',
15
+ unKnownError: 'Unexpected error in Forge LLM API'
16
+ }
17
+ };
@@ -0,0 +1,6 @@
1
+ import { ForgeError } from '../errors';
2
+ import { APIResponse } from '@forge/api';
3
+ export declare function isForgeError(body: unknown): body is ForgeError;
4
+ export declare function checkResponseError(response: APIResponse): Promise<void>;
5
+ export declare function safeGetParsedBody(text: string): unknown | undefined;
6
+ //# sourceMappingURL=error-handling.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"error-handling.d.ts","sourceRoot":"","sources":["../../src/utils/error-handling.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuC,UAAU,EAAoB,MAAM,WAAW,CAAC;AAE9F,OAAO,EAAE,WAAW,EAAE,MAAM,YAAY,CAAC;AAMzC,wBAAgB,YAAY,CAAC,IAAI,EAAE,OAAO,GAAG,IAAI,IAAI,UAAU,CAE9D;AAED,wBAAsB,kBAAkB,CAAC,QAAQ,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC,CAwB7E;AAED,wBAAgB,iBAAiB,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,GAAG,SAAS,CAMnE"}
@@ -0,0 +1,44 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.safeGetParsedBody = exports.checkResponseError = exports.isForgeError = void 0;
4
+ const errors_1 = require("../errors");
5
+ const text_1 = require("../text");
6
+ function extractTraceId(response) {
7
+ return response.headers.get('x-b3-traceid') || response.headers.get('x-trace-id');
8
+ }
9
+ function isForgeError(body) {
10
+ return typeof body === 'object' && body !== null && 'code' in body && 'message' in body;
11
+ }
12
+ exports.isForgeError = isForgeError;
13
+ async function checkResponseError(response) {
14
+ if (response.ok) {
15
+ return;
16
+ }
17
+ const responseText = await response.text();
18
+ const parsedBody = safeGetParsedBody(responseText);
19
+ const statusText = response.statusText;
20
+ const traceId = extractTraceId(response);
21
+ const responseDetails = {
22
+ status: response.status,
23
+ statusText,
24
+ traceId
25
+ };
26
+ if (parsedBody && isForgeError(parsedBody)) {
27
+ throw new errors_1.ForgeLlmAPIError(responseDetails, parsedBody);
28
+ }
29
+ throw new errors_1.ForgeLlmAPIError(responseDetails, {
30
+ code: errors_1.errorCodes.UNKNOWN_ERROR,
31
+ message: text_1.Text.error.unKnownError,
32
+ context: { responseText }
33
+ });
34
+ }
35
+ exports.checkResponseError = checkResponseError;
36
+ function safeGetParsedBody(text) {
37
+ try {
38
+ return JSON.parse(text);
39
+ }
40
+ catch {
41
+ return undefined;
42
+ }
43
+ }
44
+ exports.safeGetParsedBody = safeGetParsedBody;
@@ -0,0 +1,4 @@
1
+ import { Prompt } from './interfaces/types';
2
+ export declare const ChatRoles: readonly ["system", "user", "assistant", "tool"];
3
+ export declare const validatePrompt: (prompt: Prompt) => void;
4
+ //# sourceMappingURL=validators.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"validators.d.ts","sourceRoot":"","sources":["../src/validators.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,oBAAoB,CAAC;AAK5C,eAAO,MAAM,SAAS,kDAAmD,CAAC;AA4F1E,eAAO,MAAM,cAAc,WAAY,MAAM,KAAG,IAQ/C,CAAC"}
@@ -0,0 +1,90 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.validatePrompt = exports.ChatRoles = void 0;
4
+ const errors_1 = require("./errors");
5
+ const text_1 = require("./text");
6
+ exports.ChatRoles = ['system', 'user', 'assistant', 'tool'];
7
+ function promptIsPresent(value) {
8
+ return value !== null && typeof value === 'object';
9
+ }
10
+ function validateModel(model) {
11
+ const errors = [];
12
+ if (!model) {
13
+ errors.push(text_1.Text.error.missingModel);
14
+ }
15
+ return errors;
16
+ }
17
+ function validateTemperature(temperature) {
18
+ const errors = [];
19
+ if (temperature !== undefined && (!Number.isFinite(temperature) || temperature < 0 || temperature > 1)) {
20
+ errors.push(text_1.Text.error.invalidTemperature(temperature));
21
+ }
22
+ return errors;
23
+ }
24
+ function validateMaxCompletionTokens(maxCompletionTokens) {
25
+ const errors = [];
26
+ if (maxCompletionTokens !== undefined && (!Number.isInteger(maxCompletionTokens) || maxCompletionTokens <= 0)) {
27
+ errors.push(text_1.Text.error.invalidMaxCompletionTokens(maxCompletionTokens));
28
+ }
29
+ return errors;
30
+ }
31
+ function validateMessages(messages) {
32
+ const errors = [];
33
+ if (messages === undefined || !Array.isArray(messages) || messages.length === 0) {
34
+ errors.push(text_1.Text.error.noMessagesProvided);
35
+ }
36
+ else {
37
+ messages.forEach((m, messagesIndex) => {
38
+ if (!isRequestMessage(m)) {
39
+ errors.push(text_1.Text.error.noMessagesProvided);
40
+ return;
41
+ }
42
+ if (!isValidChatRole(m)) {
43
+ errors.push(text_1.Text.error.invalidRole(messagesIndex, m.role, getChatRoles()));
44
+ }
45
+ if (!isValidChatContent(m)) {
46
+ errors.push(text_1.Text.error.emptyMessageContent(messagesIndex));
47
+ }
48
+ });
49
+ }
50
+ return errors;
51
+ }
52
+ function isRequestMessage(value) {
53
+ return value !== null && typeof value === 'object';
54
+ }
55
+ function isValidChatRole(msg) {
56
+ return 'role' in msg && typeof msg.role === 'string' && msg.role.trim().length > 0 && exports.ChatRoles.includes(msg.role);
57
+ }
58
+ function isValidChatContent(msg) {
59
+ return 'content' in msg && typeof msg.content !== undefined;
60
+ }
61
+ function getChatRoles() {
62
+ return exports.ChatRoles.join(', ');
63
+ }
64
+ function validateTopP(topP) {
65
+ const errors = [];
66
+ if (topP !== undefined && (!Number.isFinite(topP) || topP < 0 || topP > 1)) {
67
+ errors.push(text_1.Text.error.invalidTopP(topP));
68
+ }
69
+ return errors;
70
+ }
71
+ const validatorFns = [
72
+ (p) => validateModel(p.model),
73
+ (p) => validateTemperature(p.temperature),
74
+ (p) => validateMaxCompletionTokens(p.max_completion_tokens),
75
+ (p) => validateMessages(p.messages),
76
+ (p) => validateTopP(p.top_p)
77
+ ];
78
+ function getValidationErrors(prompt) {
79
+ return validatorFns.flatMap((fn) => fn(prompt));
80
+ }
81
+ const validatePrompt = (prompt) => {
82
+ if (!promptIsPresent(prompt)) {
83
+ throw new errors_1.PromptValidationError(`${text_1.Text.error.invalidPayload} ${text_1.Text.error.promptRequired}`);
84
+ }
85
+ const errors = getValidationErrors(prompt);
86
+ if (errors.length > 0) {
87
+ throw new errors_1.PromptValidationError(`${text_1.Text.error.invalidPayload} ${errors.join('; ')}`);
88
+ }
89
+ };
90
+ exports.validatePrompt = validatePrompt;
package/package.json ADDED
@@ -0,0 +1,28 @@
1
+ {
2
+ "name": "@forge/llm",
3
+ "version": "0.0.1-experimental-994fcd3",
4
+ "description": "Forge LLM SDK",
5
+ "main": "out/index.js",
6
+ "types": "out/index.d.ts",
7
+ "files": [
8
+ "out"
9
+ ],
10
+ "author": "Atlassian",
11
+ "license": "SEE LICENSE IN LICENSE.txt",
12
+ "dependencies": {
13
+ "@forge/api": "^6.2.0-next.1-experimental-994fcd3"
14
+ },
15
+ "devDependencies": {
16
+ "@types/node": "20.19.1",
17
+ "expect-type": "^0.17.3",
18
+ "jest-when": "^3.6.0"
19
+ },
20
+ "scripts": {
21
+ "build": "yarn run clean && yarn run compile",
22
+ "clean": "rm -rf ./out && rm -f tsconfig.tsbuildinfo",
23
+ "compile": "tsc -b -v"
24
+ },
25
+ "publishConfig": {
26
+ "registry": "https://packages.atlassian.com/api/npm/npm-public/"
27
+ }
28
+ }