@azure-rest/ai-inference 1.0.0-alpha.20241111.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +550 -0
- package/dist/browser/clientDefinitions.d.ts +47 -0
- package/dist/browser/clientDefinitions.d.ts.map +1 -0
- package/dist/browser/clientDefinitions.js +4 -0
- package/dist/browser/clientDefinitions.js.map +1 -0
- package/dist/browser/constants.d.ts +2 -0
- package/dist/browser/constants.d.ts.map +1 -0
- package/dist/browser/constants.js +4 -0
- package/dist/browser/constants.js.map +1 -0
- package/dist/browser/index.d.ts +10 -0
- package/dist/browser/index.d.ts.map +1 -0
- package/dist/browser/index.js +12 -0
- package/dist/browser/index.js.map +1 -0
- package/dist/browser/isUnexpected.d.ts +6 -0
- package/dist/browser/isUnexpected.d.ts.map +1 -0
- package/dist/browser/isUnexpected.js +73 -0
- package/dist/browser/isUnexpected.js.map +1 -0
- package/dist/browser/logger.d.ts +2 -0
- package/dist/browser/logger.d.ts.map +1 -0
- package/dist/browser/logger.js +5 -0
- package/dist/browser/logger.js.map +1 -0
- package/dist/browser/modelClient.d.ts +16 -0
- package/dist/browser/modelClient.d.ts.map +1 -0
- package/dist/browser/modelClient.js +61 -0
- package/dist/browser/modelClient.js.map +1 -0
- package/dist/browser/models.d.ts +179 -0
- package/dist/browser/models.d.ts.map +1 -0
- package/dist/browser/models.js +4 -0
- package/dist/browser/models.js.map +1 -0
- package/dist/browser/outputModels.d.ts +140 -0
- package/dist/browser/outputModels.d.ts.map +1 -0
- package/dist/browser/outputModels.js +4 -0
- package/dist/browser/outputModels.js.map +1 -0
- package/dist/browser/package.json +3 -0
- package/dist/browser/parameters.d.ts +82 -0
- package/dist/browser/parameters.d.ts.map +1 -0
- package/dist/browser/parameters.js +4 -0
- package/dist/browser/parameters.js.map +1 -0
- package/dist/browser/responses.d.ts +60 -0
- package/dist/browser/responses.d.ts.map +1 -0
- package/dist/browser/responses.js +4 -0
- package/dist/browser/responses.js.map +1 -0
- package/dist/browser/tracingHelper.d.ts +9 -0
- package/dist/browser/tracingHelper.d.ts.map +1 -0
- package/dist/browser/tracingHelper.js +198 -0
- package/dist/browser/tracingHelper.js.map +1 -0
- package/dist/browser/tracingPolicy.d.ts +12 -0
- package/dist/browser/tracingPolicy.d.ts.map +1 -0
- package/dist/browser/tracingPolicy.js +68 -0
- package/dist/browser/tracingPolicy.js.map +1 -0
- package/dist/commonjs/clientDefinitions.d.ts +47 -0
- package/dist/commonjs/clientDefinitions.d.ts.map +1 -0
- package/dist/commonjs/clientDefinitions.js +5 -0
- package/dist/commonjs/clientDefinitions.js.map +1 -0
- package/dist/commonjs/constants.d.ts +2 -0
- package/dist/commonjs/constants.d.ts.map +1 -0
- package/dist/commonjs/constants.js +7 -0
- package/dist/commonjs/constants.js.map +1 -0
- package/dist/commonjs/index.d.ts +10 -0
- package/dist/commonjs/index.d.ts.map +1 -0
- package/dist/commonjs/index.js +15 -0
- package/dist/commonjs/index.js.map +1 -0
- package/dist/commonjs/isUnexpected.d.ts +6 -0
- package/dist/commonjs/isUnexpected.d.ts.map +1 -0
- package/dist/commonjs/isUnexpected.js +76 -0
- package/dist/commonjs/isUnexpected.js.map +1 -0
- package/dist/commonjs/logger.d.ts +2 -0
- package/dist/commonjs/logger.d.ts.map +1 -0
- package/dist/commonjs/logger.js +8 -0
- package/dist/commonjs/logger.js.map +1 -0
- package/dist/commonjs/modelClient.d.ts +16 -0
- package/dist/commonjs/modelClient.d.ts.map +1 -0
- package/dist/commonjs/modelClient.js +64 -0
- package/dist/commonjs/modelClient.js.map +1 -0
- package/dist/commonjs/models.d.ts +179 -0
- package/dist/commonjs/models.d.ts.map +1 -0
- package/dist/commonjs/models.js +5 -0
- package/dist/commonjs/models.js.map +1 -0
- package/dist/commonjs/outputModels.d.ts +140 -0
- package/dist/commonjs/outputModels.d.ts.map +1 -0
- package/dist/commonjs/outputModels.js +5 -0
- package/dist/commonjs/outputModels.js.map +1 -0
- package/dist/commonjs/package.json +3 -0
- package/dist/commonjs/parameters.d.ts +82 -0
- package/dist/commonjs/parameters.d.ts.map +1 -0
- package/dist/commonjs/parameters.js +5 -0
- package/dist/commonjs/parameters.js.map +1 -0
- package/dist/commonjs/responses.d.ts +60 -0
- package/dist/commonjs/responses.d.ts.map +1 -0
- package/dist/commonjs/responses.js +5 -0
- package/dist/commonjs/responses.js.map +1 -0
- package/dist/commonjs/tracingHelper.d.ts +9 -0
- package/dist/commonjs/tracingHelper.d.ts.map +1 -0
- package/dist/commonjs/tracingHelper.js +205 -0
- package/dist/commonjs/tracingHelper.js.map +1 -0
- package/dist/commonjs/tracingPolicy.d.ts +12 -0
- package/dist/commonjs/tracingPolicy.d.ts.map +1 -0
- package/dist/commonjs/tracingPolicy.js +72 -0
- package/dist/commonjs/tracingPolicy.js.map +1 -0
- package/dist/commonjs/tsdoc-metadata.json +11 -0
- package/dist/esm/clientDefinitions.d.ts +47 -0
- package/dist/esm/clientDefinitions.d.ts.map +1 -0
- package/dist/esm/clientDefinitions.js +4 -0
- package/dist/esm/clientDefinitions.js.map +1 -0
- package/dist/esm/constants.d.ts +2 -0
- package/dist/esm/constants.d.ts.map +1 -0
- package/dist/esm/constants.js +4 -0
- package/dist/esm/constants.js.map +1 -0
- package/dist/esm/index.d.ts +10 -0
- package/dist/esm/index.d.ts.map +1 -0
- package/dist/esm/index.js +12 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/isUnexpected.d.ts +6 -0
- package/dist/esm/isUnexpected.d.ts.map +1 -0
- package/dist/esm/isUnexpected.js +73 -0
- package/dist/esm/isUnexpected.js.map +1 -0
- package/dist/esm/logger.d.ts +2 -0
- package/dist/esm/logger.d.ts.map +1 -0
- package/dist/esm/logger.js +5 -0
- package/dist/esm/logger.js.map +1 -0
- package/dist/esm/modelClient.d.ts +16 -0
- package/dist/esm/modelClient.d.ts.map +1 -0
- package/dist/esm/modelClient.js +61 -0
- package/dist/esm/modelClient.js.map +1 -0
- package/dist/esm/models.d.ts +179 -0
- package/dist/esm/models.d.ts.map +1 -0
- package/dist/esm/models.js +4 -0
- package/dist/esm/models.js.map +1 -0
- package/dist/esm/outputModels.d.ts +140 -0
- package/dist/esm/outputModels.d.ts.map +1 -0
- package/dist/esm/outputModels.js +4 -0
- package/dist/esm/outputModels.js.map +1 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/parameters.d.ts +82 -0
- package/dist/esm/parameters.d.ts.map +1 -0
- package/dist/esm/parameters.js +4 -0
- package/dist/esm/parameters.js.map +1 -0
- package/dist/esm/responses.d.ts +60 -0
- package/dist/esm/responses.d.ts.map +1 -0
- package/dist/esm/responses.js +4 -0
- package/dist/esm/responses.js.map +1 -0
- package/dist/esm/tracingHelper.d.ts +9 -0
- package/dist/esm/tracingHelper.d.ts.map +1 -0
- package/dist/esm/tracingHelper.js +198 -0
- package/dist/esm/tracingHelper.js.map +1 -0
- package/dist/esm/tracingPolicy.d.ts +12 -0
- package/dist/esm/tracingPolicy.d.ts.map +1 -0
- package/dist/esm/tracingPolicy.js +68 -0
- package/dist/esm/tracingPolicy.js.map +1 -0
- package/dist/react-native/clientDefinitions.d.ts +47 -0
- package/dist/react-native/clientDefinitions.d.ts.map +1 -0
- package/dist/react-native/clientDefinitions.js +4 -0
- package/dist/react-native/clientDefinitions.js.map +1 -0
- package/dist/react-native/constants.d.ts +2 -0
- package/dist/react-native/constants.d.ts.map +1 -0
- package/dist/react-native/constants.js +4 -0
- package/dist/react-native/constants.js.map +1 -0
- package/dist/react-native/index.d.ts +10 -0
- package/dist/react-native/index.d.ts.map +1 -0
- package/dist/react-native/index.js +12 -0
- package/dist/react-native/index.js.map +1 -0
- package/dist/react-native/isUnexpected.d.ts +6 -0
- package/dist/react-native/isUnexpected.d.ts.map +1 -0
- package/dist/react-native/isUnexpected.js +73 -0
- package/dist/react-native/isUnexpected.js.map +1 -0
- package/dist/react-native/logger.d.ts +2 -0
- package/dist/react-native/logger.d.ts.map +1 -0
- package/dist/react-native/logger.js +5 -0
- package/dist/react-native/logger.js.map +1 -0
- package/dist/react-native/modelClient.d.ts +16 -0
- package/dist/react-native/modelClient.d.ts.map +1 -0
- package/dist/react-native/modelClient.js +61 -0
- package/dist/react-native/modelClient.js.map +1 -0
- package/dist/react-native/models.d.ts +179 -0
- package/dist/react-native/models.d.ts.map +1 -0
- package/dist/react-native/models.js +4 -0
- package/dist/react-native/models.js.map +1 -0
- package/dist/react-native/outputModels.d.ts +140 -0
- package/dist/react-native/outputModels.d.ts.map +1 -0
- package/dist/react-native/outputModels.js +4 -0
- package/dist/react-native/outputModels.js.map +1 -0
- package/dist/react-native/package.json +3 -0
- package/dist/react-native/parameters.d.ts +82 -0
- package/dist/react-native/parameters.d.ts.map +1 -0
- package/dist/react-native/parameters.js +4 -0
- package/dist/react-native/parameters.js.map +1 -0
- package/dist/react-native/responses.d.ts +60 -0
- package/dist/react-native/responses.d.ts.map +1 -0
- package/dist/react-native/responses.js +4 -0
- package/dist/react-native/responses.js.map +1 -0
- package/dist/react-native/tracingHelper.d.ts +9 -0
- package/dist/react-native/tracingHelper.d.ts.map +1 -0
- package/dist/react-native/tracingHelper.js +198 -0
- package/dist/react-native/tracingHelper.js.map +1 -0
- package/dist/react-native/tracingPolicy.d.ts +12 -0
- package/dist/react-native/tracingPolicy.d.ts.map +1 -0
- package/dist/react-native/tracingPolicy.js +68 -0
- package/dist/react-native/tracingPolicy.js.map +1 -0
- package/package.json +141 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
The MIT License (MIT)
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Microsoft
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
1
|
+
# Azure Inference REST client library for JavaScript
|
|
2
|
+
|
|
3
|
+
Inference API for Azure-supported AI models
|
|
4
|
+
|
|
5
|
+
**Please rely heavily on our [REST client docs](https://github.com/Azure/azure-sdk-for-js/blob/main/documentation/rest-clients.md) to use this library**
|
|
6
|
+
|
|
7
|
+
Key links:
|
|
8
|
+
|
|
9
|
+
- [Source code](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/ai/ai-inference-rest)
|
|
10
|
+
- [Package (NPM)](https://aka.ms/npm-azure-rest-ai-inference)
|
|
11
|
+
- [API reference documentation](https://aka.ms/AAp1kxa)
|
|
12
|
+
- [Samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/ai/ai-inference-rest/samples)
|
|
13
|
+
|
|
14
|
+
## Getting started
|
|
15
|
+
|
|
16
|
+
```javascript
|
|
17
|
+
import ModelClient, { isUnexpected } from "@azure-rest/ai-inference";
|
|
18
|
+
import { AzureKeyCredential } from "@azure/core-auth";
|
|
19
|
+
const client = new ModelClient(
|
|
20
|
+
"https://<Azure Model endpoint>",
|
|
21
|
+
new AzureKeyCredential("<Azure API key>")
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
const response = await client.path("/chat/completions").post({
|
|
25
|
+
body: {
|
|
26
|
+
messages: [
|
|
27
|
+
{role: "user", content: "How many feet are in a mile?"},
|
|
28
|
+
],
|
|
29
|
+
}
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
if(isUnexpected(response)) {
|
|
33
|
+
throw response.body.error;
|
|
34
|
+
}
|
|
35
|
+
console.log(response.body.choices[0].message.content);
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Currently supported environments
|
|
39
|
+
|
|
40
|
+
- LTS versions of Node.js
|
|
41
|
+
|
|
42
|
+
### Prerequisites
|
|
43
|
+
|
|
44
|
+
- You must have an [Azure subscription](https://azure.microsoft.com/free/) to use this package.
|
|
45
|
+
|
|
46
|
+
### Install the `@azure-rest/ai-inference` package
|
|
47
|
+
|
|
48
|
+
Install the Azure ModelClient REST client REST client library for JavaScript with `npm`:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
npm install @azure-rest/ai-inference
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Create and authenticate a `ModelClient`
|
|
55
|
+
#### Using an API Key from Azure
|
|
56
|
+
|
|
57
|
+
You can authenticate with an Azure API key using the [Azure Core Auth library][azure_core_auth]. To use the AzureKeyCredential provider shown below, please install the `@azure/core-auth` package:
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
npm install @azure/core-auth
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
Use the [Azure Portal][azure_portal] to browse to your Model deployment and retrieve an API key.
|
|
64
|
+
|
|
65
|
+
**Note:** Sometimes the API key is referred to as a "subscription key" or "subscription API key."
|
|
66
|
+
|
|
67
|
+
Once you have an API key and endpoint, you can use the `AzureKeyCredential` class to authenticate the client as follows:
|
|
68
|
+
|
|
69
|
+
```javascript
|
|
70
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
71
|
+
import { AzureKeyCredential } from "@azure/core-auth";
|
|
72
|
+
|
|
73
|
+
const client = new ModelClient("<endpoint>", new AzureKeyCredential("<API key>"));
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
#### Using an Azure Active Directory Credential
|
|
77
|
+
|
|
78
|
+
You can also authenticate with Azure Active Directory using the [Azure Identity library][azure_identity]. To use the [DefaultAzureCredential][defaultazurecredential] provider shown below,
|
|
79
|
+
or other credential providers provided with the Azure SDK, please install the `@azure/identity` package:
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
npm install @azure/identity
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`.
|
|
86
|
+
|
|
87
|
+
```javascript
|
|
88
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
89
|
+
import { DefaultAzureCredential } from "@azure/identity";
|
|
90
|
+
|
|
91
|
+
const client = new ModelClient("<endpoint>", new DefaultAzureCredential());
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Key concepts
|
|
95
|
+
|
|
96
|
+
The main concept to understand is [Completions][azure_openai_completions_docs]. Briefly explained, completions provides its functionality in the form of a text prompt, which by using a specific [model](https://learn.microsoft.com/azure/cognitive-services/openai/concepts/models), will then attempt to match the context and patterns, providing an output text. The following code snippet provides a rough overview:
|
|
97
|
+
|
|
98
|
+
```javascript
|
|
99
|
+
import ModelClient, { isUnexpected } from "@azure-rest/ai-inference";
|
|
100
|
+
import { AzureKeyCredential } from "@azure/core-auth";
|
|
101
|
+
|
|
102
|
+
async function main(){
|
|
103
|
+
const client = new ModelClient(
|
|
104
|
+
"https://your-model-endpoint/",
|
|
105
|
+
new AzureKeyCredential("your-model-api-key"));
|
|
106
|
+
|
|
107
|
+
const response = await client.path("/chat/completions").post({
|
|
108
|
+
body: {
|
|
109
|
+
messages: [
|
|
110
|
+
{role: "user", content: "Hello, world!"},
|
|
111
|
+
],
|
|
112
|
+
}
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
if(isUnexpected(response)) {
|
|
116
|
+
throw response.body.error;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
console.log(response.body.choices[0].message.content);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
main().catch((err) => {
|
|
123
|
+
console.error("The sample encountered an error:", err);
|
|
124
|
+
});
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## Examples
|
|
128
|
+
|
|
129
|
+
### Generate Chatbot Response
|
|
130
|
+
|
|
131
|
+
Streaming chat with the Inference SDK requires core streaming support; to enable this support, please install the `@azure/core-sse` package:
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
npm install @azure/core-sse
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
This example authenticates using a DefaultAzureCredential, then generates chat responses to input chat question and messages.
|
|
138
|
+
|
|
139
|
+
```javascript
|
|
140
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
141
|
+
import { DefaultAzureCredential } from "@azure/identity";
|
|
142
|
+
import { createSseStream } from "@azure/core-sse";
|
|
143
|
+
|
|
144
|
+
async function main(){
|
|
145
|
+
const endpoint = "https://myaccount.openai.azure.com/";
|
|
146
|
+
const client = new ModelClient(endpoint, new DefaultAzureCredential());
|
|
147
|
+
|
|
148
|
+
const messages = [
|
|
149
|
+
// NOTE: "system" role is not supported on all Azure Models
|
|
150
|
+
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
|
|
151
|
+
{ role: "user", content: "Can you help me?" },
|
|
152
|
+
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
|
|
153
|
+
{ role: "user", content: "What's the best way to train a parrot?" },
|
|
154
|
+
];
|
|
155
|
+
|
|
156
|
+
console.log(`Messages: ${messages.map((m) => m.content).join("\n")}`);
|
|
157
|
+
|
|
158
|
+
const response = await client.path("/chat/completions").post({
|
|
159
|
+
body: {
|
|
160
|
+
messages,
|
|
161
|
+
stream: true,
|
|
162
|
+
max_tokens: 128
|
|
163
|
+
}
|
|
164
|
+
}).asNodeStream();
|
|
165
|
+
|
|
166
|
+
const stream = response.body;
|
|
167
|
+
if (!stream) {
|
|
168
|
+
throw new Error("The response stream is undefined");
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
if (response.status !== "200") {
|
|
172
|
+
throw new Error(`Failed to get chat completions: ${response.body.error}`);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
const sses = createSseStream(stream);
|
|
176
|
+
|
|
177
|
+
for await (const event of sses) {
|
|
178
|
+
if (event.data === "[DONE]") {
|
|
179
|
+
return;
|
|
180
|
+
}
|
|
181
|
+
for (const choice of (JSON.parse(event.data)).choices) {
|
|
182
|
+
console.log(choice.delta?.content ?? "");
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
main().catch((err) => {
|
|
188
|
+
console.error("The sample encountered an error:", err);
|
|
189
|
+
});
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
### Generate Multiple Completions With Subscription Key
|
|
193
|
+
|
|
194
|
+
This example generates text responses to input prompts using an Azure subscription key
|
|
195
|
+
|
|
196
|
+
```javascript
|
|
197
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
198
|
+
import { AzureKeyCredential } from "@azure/core-auth";
|
|
199
|
+
|
|
200
|
+
async function main(){
|
|
201
|
+
// Replace with your Model API key
|
|
202
|
+
const key = "YOUR_MODEL_API_KEY";
|
|
203
|
+
const endpoint = "https://your-model-endpoint/";
|
|
204
|
+
const client = new ModelClient(endpoint, new AzureKeyCredential(key));
|
|
205
|
+
|
|
206
|
+
const messages = [
|
|
207
|
+
{ role: "user", content: "How are you today?" },
|
|
208
|
+
{ role: "user", content: "What is inference in the context of AI?" },
|
|
209
|
+
{ role: "user", content: "Why do children love dinosaurs?" },
|
|
210
|
+
{ role: "user", content: "Generate a proof of Euler's identity" },
|
|
211
|
+
{ role: "user", content: "Describe in single words only the good things that come into your mind about your mother." },
|
|
212
|
+
];
|
|
213
|
+
|
|
214
|
+
let promptIndex = 0;
|
|
215
|
+
const response = await client.path("/chat/completions").post({
|
|
216
|
+
body: {
|
|
217
|
+
messages
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
if(response.status !== "200") {
|
|
222
|
+
throw response.body.error;
|
|
223
|
+
}
|
|
224
|
+
for (const choice of response.body.choices) {
|
|
225
|
+
const completion = choice.message.content;
|
|
226
|
+
console.log(`Input: ${messages[promptIndex++].content}`);
|
|
227
|
+
console.log(`Chatbot: ${completion}`);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
main().catch((err) => {
|
|
232
|
+
console.error("The sample encountered an error:", err);
|
|
233
|
+
});
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
### Summarize Text with Completion
|
|
237
|
+
|
|
238
|
+
This example generates a summarization of the given input prompt.
|
|
239
|
+
|
|
240
|
+
```javascript
|
|
241
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
242
|
+
import { DefaultAzureCredential } from "@azure/identity";
|
|
243
|
+
|
|
244
|
+
async function main(){
|
|
245
|
+
const endpoint = "https://your-model-endpoint/";
|
|
246
|
+
const client = new ModelClient(endpoint, new DefaultAzureCredential());
|
|
247
|
+
|
|
248
|
+
const textToSummarize = `
|
|
249
|
+
Two independent experiments reported their results this morning at CERN, Europe's high-energy physics laboratory near Geneva in Switzerland. Both show convincing evidence of a new boson particle weighing around 125 gigaelectronvolts, which so far fits predictions of the Higgs previously made by theoretical physicists.
|
|
250
|
+
|
|
251
|
+
""As a layman I would say: 'I think we have it'. Would you agree?"" Rolf-Dieter Heuer, CERN's director-general, asked the packed auditorium. The physicists assembled there burst into applause.
|
|
252
|
+
:`;
|
|
253
|
+
|
|
254
|
+
const summarizationPrompt = `
|
|
255
|
+
Summarize the following text.
|
|
256
|
+
|
|
257
|
+
Text:
|
|
258
|
+
""""""
|
|
259
|
+
${textToSummarize}
|
|
260
|
+
""""""
|
|
261
|
+
|
|
262
|
+
Summary:
|
|
263
|
+
`;
|
|
264
|
+
|
|
265
|
+
console.log(`Input: ${summarizationPrompt}`);
|
|
266
|
+
|
|
267
|
+
const response = await client.path("/chat/completions").post({
|
|
268
|
+
body: {
|
|
269
|
+
messages: [
|
|
270
|
+
{ role: "user", content: summarizationPrompt }
|
|
271
|
+
],
|
|
272
|
+
max_tokens: 64
|
|
273
|
+
}
|
|
274
|
+
});
|
|
275
|
+
|
|
276
|
+
if(response.status !== "200") {
|
|
277
|
+
throw response.body.error;
|
|
278
|
+
}
|
|
279
|
+
const completion = response.body.choices[0].message.content;
|
|
280
|
+
console.log(`Summarization: ${completion}`);
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
main().catch((err) => {
|
|
284
|
+
console.error("The sample encountered an error:", err);
|
|
285
|
+
});
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
### Use chat tools
|
|
289
|
+
|
|
290
|
+
**Tools** extend chat completions by allowing an assistant to invoke defined functions and other capabilities in the
|
|
291
|
+
process of fulfilling a chat completions request. To use chat tools, start by defining a function tool:
|
|
292
|
+
|
|
293
|
+
```js
|
|
294
|
+
const getCurrentWeather = {
|
|
295
|
+
name: "get_current_weather",
|
|
296
|
+
description: "Get the current weather in a given location",
|
|
297
|
+
parameters: {
|
|
298
|
+
type: "object",
|
|
299
|
+
properties: {
|
|
300
|
+
location: {
|
|
301
|
+
type: "string",
|
|
302
|
+
description: "The city and state, e.g. San Francisco, CA",
|
|
303
|
+
},
|
|
304
|
+
unit: {
|
|
305
|
+
type: "string",
|
|
306
|
+
enum: ["celsius", "fahrenheit"],
|
|
307
|
+
},
|
|
308
|
+
},
|
|
309
|
+
required: ["location"],
|
|
310
|
+
},
|
|
311
|
+
};
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
With the tool defined, include that new definition in the options for a chat completions request:
|
|
315
|
+
|
|
316
|
+
```js
|
|
317
|
+
const messages = [{ role: "user", content: "What is the weather like in Boston?" }];
|
|
318
|
+
const tools = [
|
|
319
|
+
{
|
|
320
|
+
type: "function",
|
|
321
|
+
function: getCurrentWeather,
|
|
322
|
+
},
|
|
323
|
+
];
|
|
324
|
+
const result = await client.path("/chat/completions").post({
|
|
325
|
+
body: {
|
|
326
|
+
messages,
|
|
327
|
+
tools
|
|
328
|
+
}
|
|
329
|
+
});
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
When the assistant decides that one or more tools should be used, the response message includes one or more "tool
|
|
333
|
+
calls" that must all be resolved via "tool messages" on the subsequent request. This resolution of tool calls into
|
|
334
|
+
new request messages can be thought of as a sort of "callback" for chat completions.
|
|
335
|
+
|
|
336
|
+
```js
|
|
337
|
+
// Purely for convenience and clarity, this function handles tool call responses.
|
|
338
|
+
function applyToolCall({ function: call, id }) {
|
|
339
|
+
if (call.name === "get_current_weather") {
|
|
340
|
+
const { location, unit } = JSON.parse(call.arguments);
|
|
341
|
+
// In a real application, this would be a call to a weather API with location and unit parameters
|
|
342
|
+
return {
|
|
343
|
+
role: "tool",
|
|
344
|
+
content: `The weather in ${location} is 72 degrees ${unit} and sunny.`,
|
|
345
|
+
toolCallId: id,
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
throw new Error(`Unknown tool call: ${call.name}`);
|
|
349
|
+
}
|
|
350
|
+
```
|
|
351
|
+
|
|
352
|
+
To provide tool call resolutions to the assistant to allow the request to continue, provide all prior historical
|
|
353
|
+
context -- including the original system and user messages, the response from the assistant that included the tool
|
|
354
|
+
calls, and the tool messages that resolved each of those tools -- when making a subsequent request.
|
|
355
|
+
|
|
356
|
+
```js
|
|
357
|
+
const choice = result.body.choices[0];
|
|
358
|
+
const responseMessage = choice.message;
|
|
359
|
+
if (responseMessage?.role === "assistant") {
|
|
360
|
+
const requestedToolCalls = responseMessage?.toolCalls;
|
|
361
|
+
if (requestedToolCalls?.length) {
|
|
362
|
+
const toolCallResolutionMessages = [
|
|
363
|
+
...messages,
|
|
364
|
+
responseMessage,
|
|
365
|
+
...requestedToolCalls.map(applyToolCall),
|
|
366
|
+
];
|
|
367
|
+
const toolCallResolutionResult = await client.path("/chat/completions").post({
|
|
368
|
+
body: {
|
|
369
|
+
messages: toolCallResolutionMessages
|
|
370
|
+
}
|
|
371
|
+
});
|
|
372
|
+
// continue handling the response as normal
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
### Chat with images (using models supporting image chat, such as gpt-4o)
|
|
378
|
+
|
|
379
|
+
Some Azure models allow you to use images as input components into chat completions.
|
|
380
|
+
|
|
381
|
+
To do this, provide distinct content items on the user message(s) for the chat completions request:
|
|
382
|
+
|
|
383
|
+
```js
|
|
384
|
+
const url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
|
|
385
|
+
const messages = [{
|
|
386
|
+
role: "user", content: [{
|
|
387
|
+
type: "image_url",
|
|
388
|
+
image_url: {
|
|
389
|
+
url,
|
|
390
|
+
detail: "auto"
|
|
391
|
+
}
|
|
392
|
+
}]},
|
|
393
|
+
{role: "user", content: "describe the image"}];
|
|
394
|
+
```
|
|
395
|
+
|
|
396
|
+
Chat Completions will then proceed as usual, though the model may report the more informative `finish_details` in lieu
|
|
397
|
+
of `finish_reason`:
|
|
398
|
+
|
|
399
|
+
```js
|
|
400
|
+
const response = await client.path("/chat/completions").post({
|
|
401
|
+
body: {
|
|
402
|
+
messages
|
|
403
|
+
});
|
|
404
|
+
console.log(`Chatbot: ${response.choices[0].message?.content}`);
|
|
405
|
+
```
|
|
406
|
+
|
|
407
|
+
### Text Embeddings example
|
|
408
|
+
|
|
409
|
+
This example demonstrates how to get text embeddings with Entra ID authentication.
|
|
410
|
+
|
|
411
|
+
```javascript
|
|
412
|
+
import ModelClient, { isUnexpected } from "@azure-rest/ai-inference";
|
|
413
|
+
import { DefaultAzureCredential } from "@azure/identity";
|
|
414
|
+
|
|
415
|
+
const endpoint = "<your_model_endpoint>";
|
|
416
|
+
const credential = new DefaultAzureCredential();
|
|
417
|
+
|
|
418
|
+
async function main(){
|
|
419
|
+
const client = ModelClient(endpoint, credential);
|
|
420
|
+
const response = await client.path("/embeddings").post({
|
|
421
|
+
body: {
|
|
422
|
+
input: ["first phrase", "second phrase", "third phrase"]
|
|
423
|
+
}
|
|
424
|
+
});
|
|
425
|
+
|
|
426
|
+
if (isUnexpected(response)) {
|
|
427
|
+
throw response.body.error;
|
|
428
|
+
}
|
|
429
|
+
for (const data of response.body.data) {
|
|
430
|
+
console.log(`data length: ${data.length}, [${data[0]}, ${data[1]}, ..., ${data[data.length - 2]}, ${data[data.length - 1]}]`);
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
main().catch((err) => {
|
|
435
|
+
console.error("The sample encountered an error:", err);
|
|
436
|
+
});
|
|
437
|
+
```
|
|
438
|
+
|
|
439
|
+
The length of the embedding vector depends on the model, but you should see something like this:
|
|
440
|
+
|
|
441
|
+
```text
|
|
442
|
+
data: length=1024, [0.0013399124, -0.01576233, ..., 0.007843018, 0.000238657]
|
|
443
|
+
data: length=1024, [0.036590576, -0.0059547424, ..., 0.011405945, 0.004863739]
|
|
444
|
+
data: length=1024, [0.04196167, 0.029083252, ..., -0.0027484894, 0.0073127747]
|
|
445
|
+
```
|
|
446
|
+
|
|
447
|
+
To generate embeddings for additional phrases, simply call `client.path("/embeddings").post` multiple times using the same `client`.
|
|
448
|
+
|
|
449
|
+
### Instrumentation
|
|
450
|
+
Currently instrumentation is only supported for `Chat Completion without streaming`.
|
|
451
|
+
To enable instrumentation, it is required to register exporter(s).
|
|
452
|
+
|
|
453
|
+
Here is an example to add console as a exporter:
|
|
454
|
+
```js
|
|
455
|
+
import { ConsoleSpanExporter, NodeTracerProvider, SimpleSpanProcessor } from "@opentelemetry/sdk-trace-node";
|
|
456
|
+
|
|
457
|
+
const provider = new NodeTracerProvider();
|
|
458
|
+
provider.addSpanProcessor(new SimpleSpanProcessor(new ConsoleSpanExporter()));
|
|
459
|
+
provider.register();
|
|
460
|
+
|
|
461
|
+
```
|
|
462
|
+
|
|
463
|
+
Here is an example to add application insight to be a exporter:
|
|
464
|
+
|
|
465
|
+
```js
|
|
466
|
+
import { NodeTracerProvider, SimpleSpanProcessor } from "@opentelemetry/sdk-trace-node";
|
|
467
|
+
import { AzureMonitorTraceExporter } from "@azure/monitor-opentelemetry-exporter";
|
|
468
|
+
|
|
469
|
+
// provide a connection string
|
|
470
|
+
const connectionString = "<connection string>";
|
|
471
|
+
|
|
472
|
+
const provider = new NodeTracerProvider();
|
|
473
|
+
if (connectionString) {
|
|
474
|
+
const exporter = new AzureMonitorTraceExporter({ connectionString });
|
|
475
|
+
provider.addSpanProcessor(new SimpleSpanProcessor(exporter));
|
|
476
|
+
}
|
|
477
|
+
provider.register();
|
|
478
|
+
```
|
|
479
|
+
|
|
480
|
+
To use instrumentation for Azure SDK, you need to register it before importing any dependencies from `@azure/core-tracing`, such as `@azure-rest/ai-inference`.
|
|
481
|
+
|
|
482
|
+
```js
|
|
483
|
+
import { registerInstrumentations } from "@opentelemetry/instrumentation";
|
|
484
|
+
import { createAzureSdkInstrumentation } from "@azure/opentelemetry-instrumentation-azure-sdk";
|
|
485
|
+
|
|
486
|
+
registerInstrumentations({
|
|
487
|
+
instrumentations: [createAzureSdkInstrumentation()],
|
|
488
|
+
});
|
|
489
|
+
|
|
490
|
+
import ModelClient from "@azure-rest/ai-inference";
|
|
491
|
+
```
|
|
492
|
+
|
|
493
|
+
Finally when you are making a call for chat completion, you need to include
|
|
494
|
+
```js
|
|
495
|
+
tracingOptions: { tracingContext: context.active() }
|
|
496
|
+
```
|
|
497
|
+
Here is an example:
|
|
498
|
+
|
|
499
|
+
```js
|
|
500
|
+
import { context } from "@opentelemetry/api";
|
|
501
|
+
client.path("/chat/completions").post({
|
|
502
|
+
body: {...},
|
|
503
|
+
tracingOptions: { tracingContext: context.active() }
|
|
504
|
+
});
|
|
505
|
+
```
|
|
506
|
+
|
|
507
|
+
### Tracing Your Own Functions
|
|
508
|
+
Open Telemetry provides `startActiveSpan` to instrument you own code. Here is an example:
|
|
509
|
+
|
|
510
|
+
```js
|
|
511
|
+
import { trace } from "@opentelemetry/api";
|
|
512
|
+
const tracer = trace.getTracer("sample", "0.1.0");
|
|
513
|
+
|
|
514
|
+
const getWeatherFunc = (location: string, unit: string): string => {
|
|
515
|
+
return tracer.startActiveSpan("getWeatherFunc", span => {
|
|
516
|
+
if (unit !== "celsius") {
|
|
517
|
+
unit = "fahrenheit";
|
|
518
|
+
}
|
|
519
|
+
const result = `The temperature in ${location} is 72 degrees ${unit}`;
|
|
520
|
+
span.setAttribute("result", result);
|
|
521
|
+
span.end();
|
|
522
|
+
return result;
|
|
523
|
+
});
|
|
524
|
+
}
|
|
525
|
+
```
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
## Troubleshooting
|
|
529
|
+
|
|
530
|
+
### Logging
|
|
531
|
+
|
|
532
|
+
Enabling logging may help uncover useful information about failures. In order to see a log of HTTP requests and responses, set the `AZURE_LOG_LEVEL` environment variable to `info`. Alternatively, logging can be enabled at runtime by calling `setLogLevel` in the `@azure/logger`:
|
|
533
|
+
|
|
534
|
+
```javascript
|
|
535
|
+
const { setLogLevel } = require("@azure/logger");
|
|
536
|
+
|
|
537
|
+
setLogLevel("info");
|
|
538
|
+
```
|
|
539
|
+
|
|
540
|
+
For more detailed instructions on how to enable logs, you can look at the [@azure/logger package docs](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/core/logger).
|
|
541
|
+
|
|
542
|
+
<!-- LINKS -->
|
|
543
|
+
[stream_chat_completion_sample]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/ai/ai-inference-rest/samples/v1-beta/typescript/streamChatCompletions.ts
|
|
544
|
+
[azure_openai_completions_docs]: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/completions
|
|
545
|
+
[defaultazurecredential]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#defaultazurecredential
|
|
546
|
+
[azure_identity]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity
|
|
547
|
+
[azure_core_auth]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/core/core-auth
|
|
548
|
+
[register_aad_app]: https://docs.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal
|
|
549
|
+
[azure_cli]: https://docs.microsoft.com/cli/azure
|
|
550
|
+
[azure_portal]: https://portal.azure.com
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import type { GetChatCompletionsParameters, GetModelInfoParameters, GetEmbeddingsParameters, GetImageEmbeddingsParameters } from "./parameters.js";
|
|
2
|
+
import type { GetChatCompletions200Response, GetChatCompletionsDefaultResponse, GetModelInfo200Response, GetModelInfoDefaultResponse, GetEmbeddings200Response, GetEmbeddingsDefaultResponse, GetImageEmbeddings200Response, GetImageEmbeddingsDefaultResponse } from "./responses.js";
|
|
3
|
+
import type { Client, StreamableMethod } from "@azure-rest/core-client";
|
|
4
|
+
export interface GetChatCompletions {
|
|
5
|
+
/**
|
|
6
|
+
* Gets chat completions for the provided chat messages.
|
|
7
|
+
* Completions support a wide variety of tasks and generate text that continues from or "completes"
|
|
8
|
+
* provided prompt data. The method makes a REST API call to the `/chat/completions` route
|
|
9
|
+
* on the given endpoint.
|
|
10
|
+
*/
|
|
11
|
+
post(options?: GetChatCompletionsParameters): StreamableMethod<GetChatCompletions200Response | GetChatCompletionsDefaultResponse>;
|
|
12
|
+
}
|
|
13
|
+
export interface GetModelInfo {
|
|
14
|
+
/**
|
|
15
|
+
* Returns information about the AI model.
|
|
16
|
+
* The method makes a REST API call to the `/info` route on the given endpoint.
|
|
17
|
+
*/
|
|
18
|
+
get(options?: GetModelInfoParameters): StreamableMethod<GetModelInfo200Response | GetModelInfoDefaultResponse>;
|
|
19
|
+
}
|
|
20
|
+
export interface GetEmbeddings {
|
|
21
|
+
/**
|
|
22
|
+
* Return the embedding vectors for given text prompts.
|
|
23
|
+
* The method makes a REST API call to the `/embeddings` route on the given endpoint.
|
|
24
|
+
*/
|
|
25
|
+
post(options?: GetEmbeddingsParameters): StreamableMethod<GetEmbeddings200Response | GetEmbeddingsDefaultResponse>;
|
|
26
|
+
}
|
|
27
|
+
export interface GetImageEmbeddings {
|
|
28
|
+
/**
|
|
29
|
+
* Return the embedding vectors for given images.
|
|
30
|
+
* The method makes a REST API call to the `/images/embeddings` route on the given endpoint.
|
|
31
|
+
*/
|
|
32
|
+
post(options?: GetImageEmbeddingsParameters): StreamableMethod<GetImageEmbeddings200Response | GetImageEmbeddingsDefaultResponse>;
|
|
33
|
+
}
|
|
34
|
+
export interface Routes {
|
|
35
|
+
/** Resource for '/chat/completions' has methods for the following verbs: post */
|
|
36
|
+
(path: "/chat/completions"): GetChatCompletions;
|
|
37
|
+
/** Resource for '/info' has methods for the following verbs: get */
|
|
38
|
+
(path: "/info"): GetModelInfo;
|
|
39
|
+
/** Resource for '/embeddings' has methods for the following verbs: post */
|
|
40
|
+
(path: "/embeddings"): GetEmbeddings;
|
|
41
|
+
/** Resource for '/images/embeddings' has methods for the following verbs: post */
|
|
42
|
+
(path: "/images/embeddings"): GetImageEmbeddings;
|
|
43
|
+
}
|
|
44
|
+
export type ModelClient = Client & {
|
|
45
|
+
path: Routes;
|
|
46
|
+
};
|
|
47
|
+
//# sourceMappingURL=clientDefinitions.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"clientDefinitions.d.ts","sourceRoot":"","sources":["../../src/clientDefinitions.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EACV,4BAA4B,EAC5B,sBAAsB,EACtB,uBAAuB,EACvB,4BAA4B,EAC7B,MAAM,iBAAiB,CAAC;AACzB,OAAO,KAAK,EACV,6BAA6B,EAC7B,iCAAiC,EACjC,uBAAuB,EACvB,2BAA2B,EAC3B,wBAAwB,EACxB,4BAA4B,EAC5B,6BAA6B,EAC7B,iCAAiC,EAClC,MAAM,gBAAgB,CAAC;AACxB,OAAO,KAAK,EAAE,MAAM,EAAE,gBAAgB,EAAE,MAAM,yBAAyB,CAAC;AAExE,MAAM,WAAW,kBAAkB;IACjC;;;;;OAKG;IACH,IAAI,CACF,OAAO,CAAC,EAAE,4BAA4B,GACrC,gBAAgB,CAAC,6BAA6B,GAAG,iCAAiC,CAAC,CAAC;CACxF;AAED,MAAM,WAAW,YAAY;IAC3B;;;OAGG;IACH,GAAG,CACD,OAAO,CAAC,EAAE,sBAAsB,GAC/B,gBAAgB,CAAC,uBAAuB,GAAG,2BAA2B,CAAC,CAAC;CAC5E;AAED,MAAM,WAAW,aAAa;IAC5B;;;OAGG;IACH,IAAI,CACF,OAAO,CAAC,EAAE,uBAAuB,GAChC,gBAAgB,CAAC,wBAAwB,GAAG,4BAA4B,CAAC,CAAC;CAC9E;AAED,MAAM,WAAW,kBAAkB;IACjC;;;OAGG;IACH,IAAI,CACF,OAAO,CAAC,EAAE,4BAA4B,GACrC,gBAAgB,CAAC,6BAA6B,GAAG,iCAAiC,CAAC,CAAC;CACxF;AAED,MAAM,WAAW,MAAM;IACrB,iFAAiF;IACjF,CAAC,IAAI,EAAE,mBAAmB,GAAG,kBAAkB,CAAC;IAChD,oEAAoE;IACpE,CAAC,IAAI,EAAE,OAAO,GAAG,YAAY,CAAC;IAC9B,2EAA2E;IAC3E,CAAC,IAAI,EAAE,aAAa,GAAG,aAAa,CAAC;IACrC,kFAAkF;IAClF,CAAC,IAAI,EAAE,oBAAoB,GAAG,kBAAkB,CAAC;CAClD;AAED,MAAM,MAAM,WAAW,GAAG,MAAM,GAAG;IACjC,IAAI,EAAE,MAAM,CAAC;CACd,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"clientDefinitions.js","sourceRoot":"","sources":["../../src/clientDefinitions.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type {\n GetChatCompletionsParameters,\n GetModelInfoParameters,\n GetEmbeddingsParameters,\n GetImageEmbeddingsParameters,\n} from \"./parameters.js\";\nimport type {\n GetChatCompletions200Response,\n GetChatCompletionsDefaultResponse,\n GetModelInfo200Response,\n GetModelInfoDefaultResponse,\n GetEmbeddings200Response,\n GetEmbeddingsDefaultResponse,\n GetImageEmbeddings200Response,\n GetImageEmbeddingsDefaultResponse,\n} from \"./responses.js\";\nimport type { Client, StreamableMethod } from \"@azure-rest/core-client\";\n\nexport interface GetChatCompletions {\n /**\n * Gets chat completions for the provided chat messages.\n * Completions support a wide variety of tasks and generate text that continues from or \"completes\"\n * provided prompt data. The method makes a REST API call to the `/chat/completions` route\n * on the given endpoint.\n */\n post(\n options?: GetChatCompletionsParameters,\n ): StreamableMethod<GetChatCompletions200Response | GetChatCompletionsDefaultResponse>;\n}\n\nexport interface GetModelInfo {\n /**\n * Returns information about the AI model.\n * The method makes a REST API call to the `/info` route on the given endpoint.\n */\n get(\n options?: GetModelInfoParameters,\n ): StreamableMethod<GetModelInfo200Response | GetModelInfoDefaultResponse>;\n}\n\nexport interface GetEmbeddings {\n /**\n * Return the embedding vectors for given text prompts.\n * The method makes a REST API call to the `/embeddings` route on the given endpoint.\n */\n post(\n options?: GetEmbeddingsParameters,\n ): StreamableMethod<GetEmbeddings200Response | GetEmbeddingsDefaultResponse>;\n}\n\nexport interface GetImageEmbeddings {\n /**\n * Return the embedding vectors for given images.\n * The method makes a REST API call to the `/images/embeddings` route on the given endpoint.\n */\n post(\n options?: GetImageEmbeddingsParameters,\n ): StreamableMethod<GetImageEmbeddings200Response | GetImageEmbeddingsDefaultResponse>;\n}\n\nexport interface Routes {\n /** Resource for '/chat/completions' has methods for the following verbs: post */\n (path: \"/chat/completions\"): GetChatCompletions;\n /** Resource for '/info' has methods for the following verbs: get */\n (path: \"/info\"): GetModelInfo;\n /** Resource for '/embeddings' has methods for the following verbs: post */\n (path: \"/embeddings\"): GetEmbeddings;\n /** Resource for '/images/embeddings' has methods for the following verbs: post */\n (path: \"/images/embeddings\"): GetImageEmbeddings;\n}\n\nexport type ModelClient = Client & {\n path: Routes;\n};\n"]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"constants.d.ts","sourceRoot":"","sources":["../../src/constants.ts"],"names":[],"mappings":"AAGA,eAAO,MAAM,WAAW,EAAE,MAAuB,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"constants.js","sourceRoot":"","sources":["../../src/constants.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,MAAM,CAAC,MAAM,WAAW,GAAW,cAAc,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nexport const SDK_VERSION: string = \"1.0.0-beta.4\";\n"]}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import ModelClient from "./modelClient.js";
|
|
2
|
+
export * from "./modelClient.js";
|
|
3
|
+
export * from "./parameters.js";
|
|
4
|
+
export * from "./responses.js";
|
|
5
|
+
export * from "./clientDefinitions.js";
|
|
6
|
+
export * from "./isUnexpected.js";
|
|
7
|
+
export * from "./models.js";
|
|
8
|
+
export * from "./outputModels.js";
|
|
9
|
+
export default ModelClient;
|
|
10
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,WAAW,MAAM,kBAAkB,CAAC;AAE3C,cAAc,kBAAkB,CAAC;AACjC,cAAc,iBAAiB,CAAC;AAChC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,wBAAwB,CAAC;AACvC,cAAc,mBAAmB,CAAC;AAClC,cAAc,aAAa,CAAC;AAC5B,cAAc,mBAAmB,CAAC;AAElC,eAAe,WAAW,CAAC"}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
// Copyright (c) Microsoft Corporation.
|
|
2
|
+
// Licensed under the MIT License.
|
|
3
|
+
import ModelClient from "./modelClient.js";
|
|
4
|
+
export * from "./modelClient.js";
|
|
5
|
+
export * from "./parameters.js";
|
|
6
|
+
export * from "./responses.js";
|
|
7
|
+
export * from "./clientDefinitions.js";
|
|
8
|
+
export * from "./isUnexpected.js";
|
|
9
|
+
export * from "./models.js";
|
|
10
|
+
export * from "./outputModels.js";
|
|
11
|
+
export default ModelClient;
|
|
12
|
+
//# sourceMappingURL=index.js.map
|