@inference-gateway/sdk 0.1.5 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,21 @@
1
+ ## [0.2.0](https://github.com/inference-gateway/typescript-sdk/compare/v0.1.6...v0.2.0) (2025-01-28)
2
+
3
+ ### ✨ Features
4
+
5
+ * add listModelsByProvider method and update README with new model listing features ([a8d7cd9](https://github.com/inference-gateway/typescript-sdk/commit/a8d7cd9e9332f6455271f4d8f2832631b46d2c3d))
6
+
7
+ ### 📚 Documentation
8
+
9
+ * add Contributing section to README with reference to CONTRIBUTING.md ([322baae](https://github.com/inference-gateway/typescript-sdk/commit/322baae9110f270615597e647835ed22e4fdbc65))
10
+ * add CONTRIBUTING.md with guidelines for contributing to the project ([d36b08f](https://github.com/inference-gateway/typescript-sdk/commit/d36b08f1647500795d279dcd5612d5a81c9c4a74))
11
+ * **openapi:** Download the latest openapi spec from inference-gateway ([733ee1e](https://github.com/inference-gateway/typescript-sdk/commit/733ee1e57d9fc6669bb2ec0197db1c2c772a0283))
12
+
13
+ ## [0.1.6](https://github.com/inference-gateway/typescript-sdk/compare/v0.1.5...v0.1.6) (2025-01-23)
14
+
15
+ ### 🐛 Bug Fixes
16
+
17
+ * update main and types paths in package.json ([f1faad3](https://github.com/inference-gateway/typescript-sdk/commit/f1faad3e257891ae8f2a10729c396e1d30d1af96))
18
+
1
19
  ## [0.1.5](https://github.com/inference-gateway/typescript-sdk/compare/v0.1.4...v0.1.5) (2025-01-23)
2
20
 
3
21
  ### 🐛 Bug Fixes
package/README.md CHANGED
@@ -6,9 +6,11 @@ An SDK written in Typescript for the [Inference Gateway](https://github.com/eden
6
6
  - [Installation](#installation)
7
7
  - [Usage](#usage)
8
8
  - [Creating a Client](#creating-a-client)
9
- - [Listing Models](#listing-models)
9
+ - [Listing All Models](#listing-all-models)
10
+ - [List Models by Provider](#list-models-by-provider)
10
11
  - [Generating Content](#generating-content)
11
12
  - [Health Check](#health-check)
13
+ - [Contributing](#contributing)
12
14
  - [License](#license)
13
15
 
14
16
  ## Installation
@@ -39,16 +41,17 @@ async function main() {
39
41
  });
40
42
  });
41
43
 
44
+ // Generate content
42
45
  const response = await client.generateContent({
43
46
  provider: Provider.Ollama,
44
47
  model: 'llama2',
45
48
  messages: [
46
49
  {
47
- role: 'system',
50
+ role: MessageRole.System,
48
51
  content: 'You are a helpful llama',
49
52
  },
50
53
  {
51
- role: 'user',
54
+ role: MessageRole.User,
52
55
  content: 'Tell me a joke',
53
56
  },
54
57
  ],
@@ -63,9 +66,9 @@ async function main() {
63
66
  main();
64
67
  ```
65
68
 
66
- ### Listing Models
69
+ ### Listing All Models
67
70
 
68
- To list available models, use the `listModels` method:
71
+ To list all available models from all providers, use the `listModels` method:
69
72
 
70
73
  ```typescript
71
74
  try {
@@ -81,22 +84,46 @@ try {
81
84
  }
82
85
  ```
83
86
 
87
+ ### List Models by Provider
88
+
89
+ To list all available models from a specific provider, use the `listModelsByProvider` method:
90
+
91
+ ```typescript
92
+ try {
93
+ const providerModels = await client.listModelsByProvider(Provider.OpenAI);
94
+ console.log(`Provider: ${providerModels.provider}`);
95
+ providerModels.models.forEach((model) => {
96
+ console.log(`Model: ${model.name}`);
97
+ });
98
+ } catch (error) {
99
+ console.error('Error:', error);
100
+ }
101
+ ```
102
+
84
103
  ### Generating Content
85
104
 
86
105
  To generate content using a model, use the `generateContent` method:
87
106
 
88
107
  ```typescript
89
- try {
108
+ import {
109
+ InferenceGatewayClient,
110
+ Message,
111
+ MessageRole,
112
+ Provider,
113
+ } from '@inference-gateway/sdk';
114
+
115
+ const client = new InferenceGatewayClient('http://localhost:8080');
116
+
90
117
  const response = await client.generateContent({
91
118
  provider: Provider.Ollama,
92
119
  model: 'llama2',
93
120
  messages: [
94
121
  {
95
- role: 'system',
122
+ role: MessageRole.System,
96
123
  content: 'You are a helpful llama',
97
124
  },
98
125
  {
99
- role: 'user',
126
+ role: MessageRole.User,
100
127
  content: 'Tell me a joke',
101
128
  },
102
129
  ],
@@ -122,6 +149,10 @@ try {
122
149
  }
123
150
  ```
124
151
 
152
+ ## Contributing
153
+
154
+ Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests.
155
+
125
156
  ## License
126
157
 
127
158
  This SDK is distributed under the MIT License, see [LICENSE](LICENSE) for more information.
@@ -1,10 +1,11 @@
1
- import { GenerateContentRequest, GenerateContentResponse, ProviderModels } from './types';
1
+ import { GenerateContentRequest, GenerateContentResponse, Provider, ProviderModels } from './types';
2
2
  export declare class InferenceGatewayClient {
3
3
  private baseUrl;
4
4
  private authToken?;
5
5
  constructor(baseUrl: string, authToken?: string);
6
6
  private request;
7
7
  listModels(): Promise<ProviderModels[]>;
8
+ listModelsByProvider(provider: Provider): Promise<ProviderModels>;
8
9
  generateContent(params: GenerateContentRequest): Promise<GenerateContentResponse>;
9
10
  healthCheck(): Promise<boolean>;
10
11
  }
@@ -29,6 +29,9 @@ class InferenceGatewayClient {
29
29
  async listModels() {
30
30
  return this.request('/llms');
31
31
  }
32
+ async listModelsByProvider(provider) {
33
+ return this.request(`/llms/${provider}`);
34
+ }
32
35
  async generateContent(params) {
33
36
  return this.request(`/llms/${params.provider}/generate`, {
34
37
  method: 'POST',
@@ -7,15 +7,17 @@ export declare enum Provider {
7
7
  Cohere = "cohere",
8
8
  Anthropic = "anthropic"
9
9
  }
10
+ export declare enum MessageRole {
11
+ System = "system",
12
+ User = "user",
13
+ Assistant = "assistant"
14
+ }
10
15
  export interface Message {
11
- role: 'system' | 'user' | 'assistant';
16
+ role: MessageRole;
12
17
  content: string;
13
18
  }
14
19
  export interface Model {
15
- id: string;
16
- object: string;
17
- owned_by: string;
18
- created: number;
20
+ name: string;
19
21
  }
20
22
  export interface ProviderModels {
21
23
  provider: Provider;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Provider = void 0;
3
+ exports.MessageRole = exports.Provider = void 0;
4
4
  var Provider;
5
5
  (function (Provider) {
6
6
  Provider["Ollama"] = "ollama";
@@ -11,3 +11,9 @@ var Provider;
11
11
  Provider["Cohere"] = "cohere";
12
12
  Provider["Anthropic"] = "anthropic";
13
13
  })(Provider || (exports.Provider = Provider = {}));
14
+ var MessageRole;
15
+ (function (MessageRole) {
16
+ MessageRole["System"] = "system";
17
+ MessageRole["User"] = "user";
18
+ MessageRole["Assistant"] = "assistant";
19
+ })(MessageRole || (exports.MessageRole = MessageRole = {}));
@@ -16,10 +16,7 @@ describe('InferenceGatewayClient', () => {
16
16
  provider: types_1.Provider.Ollama,
17
17
  models: [
18
18
  {
19
- id: 'llama2',
20
- object: 'model',
21
- owned_by: 'ollama',
22
- created: 1234567890,
19
+ name: 'llama2',
23
20
  },
24
21
  ],
25
22
  },
@@ -35,20 +32,50 @@ describe('InferenceGatewayClient', () => {
35
32
  }));
36
33
  });
37
34
  });
35
+ describe('listModelsByProvider', () => {
36
+ it('should fetch models for a specific provider', async () => {
37
+ const mockResponse = {
38
+ provider: types_1.Provider.OpenAI,
39
+ models: [
40
+ {
41
+ name: 'gpt-4',
42
+ },
43
+ ],
44
+ };
45
+ global.fetch.mockResolvedValueOnce({
46
+ ok: true,
47
+ json: () => Promise.resolve(mockResponse),
48
+ });
49
+ const result = await client.listModelsByProvider(types_1.Provider.OpenAI);
50
+ expect(result).toEqual(mockResponse);
51
+ expect(global.fetch).toHaveBeenCalledWith(`${mockBaseUrl}/llms/${types_1.Provider.OpenAI}`, expect.objectContaining({
52
+ headers: expect.any(Headers),
53
+ }));
54
+ });
55
+ it('should throw error when provider request fails', async () => {
56
+ const errorMessage = 'Provider not found';
57
+ global.fetch.mockResolvedValueOnce({
58
+ ok: false,
59
+ status: 404,
60
+ json: () => Promise.resolve({ error: errorMessage }),
61
+ });
62
+ await expect(client.listModelsByProvider(types_1.Provider.OpenAI)).rejects.toThrow(errorMessage);
63
+ });
64
+ });
38
65
  describe('generateContent', () => {
39
66
  it('should generate content with the specified provider', async () => {
40
67
  const mockRequest = {
41
68
  provider: types_1.Provider.Ollama,
42
69
  model: 'llama2',
43
70
  messages: [
44
- { role: 'system', content: 'You are a helpful assistant' },
45
- { role: 'user', content: 'Hello' },
71
+ { role: types_1.MessageRole.System, content: 'You are a helpful assistant' },
72
+ { role: types_1.MessageRole.User, content: 'Hello' },
46
73
  ],
47
74
  };
48
75
  const mockResponse = {
49
76
  provider: types_1.Provider.Ollama,
50
77
  response: {
51
- role: 'assistant',
78
+ role: types_1.MessageRole.Assistant,
52
79
  model: 'llama2',
53
80
  content: 'Hi there!',
54
81
  },
package/package.json CHANGED
@@ -1,9 +1,9 @@
1
1
  {
2
2
  "name": "@inference-gateway/sdk",
3
- "version": "0.1.5",
3
+ "version": "0.2.0",
4
4
  "description": "An SDK written in Typescript for the [Inference Gateway](https://github.com/inference-gateway/inference-gateway).",
5
- "main": "dist/index.js",
6
- "types": "dist/index.d.ts",
5
+ "main": "dist/src/index.js",
6
+ "types": "dist/src/index.d.ts",
7
7
  "type": "commonjs",
8
8
  "private": false,
9
9
  "keywords": [