notdiamond 2.0.0-rc2 → 2.0.0-rc20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +185 -0
- package/LICENSE +1 -1
- package/README.md +261 -168
- package/client.d.mts +26 -43
- package/client.d.mts.map +1 -1
- package/client.d.ts +26 -43
- package/client.d.ts.map +1 -1
- package/client.js +49 -68
- package/client.js.map +1 -1
- package/client.mjs +47 -66
- package/client.mjs.map +1 -1
- package/core/api-promise.d.mts +2 -2
- package/core/api-promise.d.ts +2 -2
- package/core/error.d.mts +2 -2
- package/core/error.d.ts +2 -2
- package/core/error.js +4 -4
- package/core/error.mjs +2 -2
- package/core/resource.d.mts +3 -3
- package/core/resource.d.ts +3 -3
- package/index.d.mts +3 -3
- package/index.d.ts +3 -3
- package/index.js +4 -4
- package/index.mjs +3 -3
- package/internal/parse.d.mts +2 -2
- package/internal/parse.d.ts +2 -2
- package/internal/shims.js +1 -1
- package/internal/shims.mjs +1 -1
- package/internal/uploads.d.mts +4 -4
- package/internal/uploads.d.ts +4 -4
- package/internal/utils/base64.js +2 -2
- package/internal/utils/base64.mjs +3 -3
- package/internal/utils/env.d.mts.map +1 -1
- package/internal/utils/env.d.ts.map +1 -1
- package/internal/utils/env.js +4 -2
- package/internal/utils/env.js.map +1 -1
- package/internal/utils/env.mjs +4 -2
- package/internal/utils/env.mjs.map +1 -1
- package/internal/utils/log.d.mts +3 -3
- package/internal/utils/log.d.ts +3 -3
- package/internal/utils/path.js +1 -1
- package/internal/utils/path.mjs +2 -2
- package/internal/utils/values.js +5 -5
- package/internal/utils/values.mjs +6 -6
- package/package.json +11 -12
- package/resources/custom-router.d.mts +145 -0
- package/resources/custom-router.d.mts.map +1 -0
- package/resources/custom-router.d.ts +145 -0
- package/resources/custom-router.d.ts.map +1 -0
- package/resources/custom-router.js +83 -0
- package/resources/custom-router.js.map +1 -0
- package/resources/custom-router.mjs +79 -0
- package/resources/custom-router.mjs.map +1 -0
- package/resources/index.d.mts +5 -6
- package/resources/index.d.mts.map +1 -1
- package/resources/index.d.ts +5 -6
- package/resources/index.d.ts.map +1 -1
- package/resources/index.js +5 -7
- package/resources/index.js.map +1 -1
- package/resources/index.mjs +2 -3
- package/resources/index.mjs.map +1 -1
- package/resources/model-router.d.mts +194 -0
- package/resources/model-router.d.mts.map +1 -0
- package/resources/model-router.d.ts +194 -0
- package/resources/model-router.d.ts.map +1 -0
- package/resources/model-router.js +68 -0
- package/resources/model-router.js.map +1 -0
- package/resources/model-router.mjs +64 -0
- package/resources/model-router.mjs.map +1 -0
- package/resources/models.d.mts +50 -25
- package/resources/models.d.mts.map +1 -1
- package/resources/models.d.ts +50 -25
- package/resources/models.d.ts.map +1 -1
- package/resources/models.js +5 -0
- package/resources/models.js.map +1 -1
- package/resources/models.mjs +5 -0
- package/resources/models.mjs.map +1 -1
- package/resources/preferences.d.mts +37 -54
- package/resources/preferences.d.mts.map +1 -1
- package/resources/preferences.d.ts +37 -54
- package/resources/preferences.d.ts.map +1 -1
- package/resources/preferences.js +17 -42
- package/resources/preferences.js.map +1 -1
- package/resources/preferences.mjs +17 -42
- package/resources/preferences.mjs.map +1 -1
- package/resources/prompt-adaptation.d.mts +340 -249
- package/resources/prompt-adaptation.d.mts.map +1 -1
- package/resources/prompt-adaptation.d.ts +340 -249
- package/resources/prompt-adaptation.d.ts.map +1 -1
- package/resources/prompt-adaptation.js +36 -79
- package/resources/prompt-adaptation.js.map +1 -1
- package/resources/prompt-adaptation.mjs +36 -79
- package/resources/prompt-adaptation.mjs.map +1 -1
- package/src/client.ts +72 -128
- package/src/core/api-promise.ts +4 -4
- package/src/core/error.ts +2 -2
- package/src/core/resource.ts +3 -3
- package/src/index.ts +3 -3
- package/src/internal/parse.ts +2 -2
- package/src/internal/shims.ts +1 -1
- package/src/internal/uploads.ts +5 -5
- package/src/internal/utils/base64.ts +3 -3
- package/src/internal/utils/env.ts +4 -2
- package/src/internal/utils/log.ts +3 -3
- package/src/internal/utils/path.ts +2 -2
- package/src/internal/utils/values.ts +6 -6
- package/src/resources/custom-router.ts +168 -0
- package/src/resources/index.ts +20 -32
- package/src/resources/model-router.ts +222 -0
- package/src/resources/models.ts +55 -32
- package/src/resources/preferences.ts +43 -83
- package/src/resources/prompt-adaptation.ts +358 -300
- package/src/version.ts +1 -1
- package/version.d.mts +1 -1
- package/version.d.mts.map +1 -1
- package/version.d.ts +1 -1
- package/version.d.ts.map +1 -1
- package/version.js +1 -1
- package/version.js.map +1 -1
- package/version.mjs +1 -1
- package/version.mjs.map +1 -1
- package/resources/admin.d.mts +0 -4
- package/resources/admin.d.mts.map +0 -1
- package/resources/admin.d.ts +0 -4
- package/resources/admin.d.ts.map +0 -1
- package/resources/admin.js +0 -9
- package/resources/admin.js.map +0 -1
- package/resources/admin.mjs +0 -5
- package/resources/admin.mjs.map +0 -1
- package/resources/report.d.mts +0 -245
- package/resources/report.d.mts.map +0 -1
- package/resources/report.d.ts +0 -245
- package/resources/report.d.ts.map +0 -1
- package/resources/report.js +0 -86
- package/resources/report.js.map +0 -1
- package/resources/report.mjs +0 -82
- package/resources/report.mjs.map +0 -1
- package/resources/routing.d.mts +0 -391
- package/resources/routing.d.mts.map +0 -1
- package/resources/routing.d.ts +0 -391
- package/resources/routing.d.ts.map +0 -1
- package/resources/routing.js +0 -163
- package/resources/routing.js.map +0 -1
- package/resources/routing.mjs +0 -159
- package/resources/routing.mjs.map +0 -1
- package/src/resources/admin.ts +0 -5
- package/src/resources/report.ts +0 -300
- package/src/resources/routing.ts +0 -476
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import { APIResource } from '../core/resource';
|
|
4
4
|
import * as PromptAdaptationAPI from './prompt-adaptation';
|
|
5
5
|
import { APIPromise } from '../core/api-promise';
|
|
6
|
-
import { buildHeaders } from '../internal/headers';
|
|
7
6
|
import { RequestOptions } from '../internal/request-options';
|
|
8
7
|
import { path } from '../internal/utils/path';
|
|
9
8
|
|
|
@@ -12,14 +11,14 @@ export class PromptAdaptation extends APIResource {
|
|
|
12
11
|
* Adapt your prompt from one LLM to work optimally across different target LLMs.
|
|
13
12
|
*
|
|
14
13
|
* This endpoint automatically optimizes your prompt (system prompt + user message
|
|
15
|
-
* template) to
|
|
16
|
-
*
|
|
17
|
-
*
|
|
14
|
+
* template) to improve accuracy on your use case across various models. Each model
|
|
15
|
+
* has unique characteristics, and what works well for GPT-5 might not work as well
|
|
16
|
+
* for Claude or Gemini.
|
|
18
17
|
*
|
|
19
18
|
* **How Prompt Adaptation Works:**
|
|
20
19
|
*
|
|
21
|
-
* 1. You provide your current prompt
|
|
22
|
-
* 2. You specify target models you want to adapt to
|
|
20
|
+
* 1. You provide your current prompt and optionally your current origin model
|
|
21
|
+
* 2. You specify the target models you want to adapt your prompt to
|
|
23
22
|
* 3. You provide evaluation examples (golden records) with expected answers
|
|
24
23
|
* 4. The system runs optimization to find the best prompt for each target model
|
|
25
24
|
* 5. You receive adapted prompts that perform well on your target models
|
|
@@ -27,14 +26,19 @@ export class PromptAdaptation extends APIResource {
|
|
|
27
26
|
* **Evaluation Metrics:** Choose either a standard metric or provide custom
|
|
28
27
|
* evaluation:
|
|
29
28
|
*
|
|
30
|
-
* - **Standard metrics**: LLMaaJ:
|
|
31
|
-
* JSON_Match
|
|
29
|
+
* - **Standard metrics**: LLMaaJ:Sem_Sim_1 (semantic similarity), JSON_Match
|
|
32
30
|
* - **Custom evaluation**: Provide evaluation_config with your own LLM judge,
|
|
33
31
|
* prompt, and cutoff
|
|
34
32
|
*
|
|
35
33
|
* **Dataset Requirements:**
|
|
36
34
|
*
|
|
37
|
-
* - Minimum
|
|
35
|
+
* - Minimum 25 examples in train_goldens (more examples = better adaptation)
|
|
36
|
+
* - **Prototype mode**: Set `prototype_mode: true` to use as few as 3 examples for
|
|
37
|
+
* prototyping
|
|
38
|
+
* - Recommended when you don't have enough data yet to build a proof-of-concept
|
|
39
|
+
* - Note: Performance may be degraded compared to standard mode (25+ examples)
|
|
40
|
+
* - Trade-off: Faster iteration with less data vs. potentially less
|
|
41
|
+
* generalizability
|
|
38
42
|
* - Each example must have fields matching your template placeholders
|
|
39
43
|
* - Supervised evaluation requires 'answer' field in each golden record
|
|
40
44
|
* - Unsupervised evaluation can work without answers
|
|
@@ -45,21 +49,6 @@ export class PromptAdaptation extends APIResource {
|
|
|
45
49
|
* - Time depends on: number of target models, dataset size, model availability
|
|
46
50
|
* - Use the returned adaptation_run_id to check status and retrieve results
|
|
47
51
|
*
|
|
48
|
-
* **Subscription Tiers:**
|
|
49
|
-
*
|
|
50
|
-
* - Free: 1 target model
|
|
51
|
-
* - Starter: 3 target models
|
|
52
|
-
* - Startup: 5 target models
|
|
53
|
-
* - Enterprise: 10 target models
|
|
54
|
-
*
|
|
55
|
-
* **Best Practices:**
|
|
56
|
-
*
|
|
57
|
-
* 1. Use diverse, representative examples from your production workload
|
|
58
|
-
* 2. Include 10-20 examples for best results (5 minimum)
|
|
59
|
-
* 3. Ensure consistent evaluation across all examples
|
|
60
|
-
* 4. Test both train_goldens and test_goldens split for validation
|
|
61
|
-
* 5. Use the same model versions you'll use in production
|
|
62
|
-
*
|
|
63
52
|
* **Example Workflow:**
|
|
64
53
|
*
|
|
65
54
|
* ```
|
|
@@ -80,7 +69,7 @@ export class PromptAdaptation extends APIResource {
|
|
|
80
69
|
* system_prompt: 'You are a helpful assistant that answers questions accurately.',
|
|
81
70
|
* target_models: [
|
|
82
71
|
* { provider: 'anthropic', model: 'claude-sonnet-4-5-20250929' },
|
|
83
|
-
* { provider: 'google', model: 'gemini-
|
|
72
|
+
* { provider: 'google', model: 'gemini-2.5-flash' },
|
|
84
73
|
* ],
|
|
85
74
|
* template: 'Question: {question}\nAnswer:',
|
|
86
75
|
* evaluation_metric: 'LLMaaJ:Sem_Sim_3',
|
|
@@ -155,15 +144,6 @@ export class PromptAdaptation extends APIResource {
|
|
|
155
144
|
* 3. Apply the optimized prompts when calling the respective target models
|
|
156
145
|
* 4. Compare pre/post optimization scores to see improvement
|
|
157
146
|
*
|
|
158
|
-
* **Evaluation Scores:**
|
|
159
|
-
*
|
|
160
|
-
* - Scores range from 0-10 (higher is better)
|
|
161
|
-
* - Compare origin_model score with target_models pre_optimization_score for
|
|
162
|
-
* baseline
|
|
163
|
-
* - Compare pre_optimization_score with post_optimization_score to see improvement
|
|
164
|
-
* from adaptation
|
|
165
|
-
* - Typical improvements range from 5-30% on evaluation metrics
|
|
166
|
-
*
|
|
167
147
|
* **Status Handling:**
|
|
168
148
|
*
|
|
169
149
|
* - If adaptation is still processing, target model results will have
|
|
@@ -187,61 +167,17 @@ export class PromptAdaptation extends APIResource {
|
|
|
187
167
|
*
|
|
188
168
|
* @example
|
|
189
169
|
* ```ts
|
|
190
|
-
* const
|
|
170
|
+
* const response =
|
|
191
171
|
* await client.promptAdaptation.getAdaptResults(
|
|
192
172
|
* 'adaptation_run_id',
|
|
193
173
|
* );
|
|
194
174
|
* ```
|
|
195
175
|
*/
|
|
196
|
-
getAdaptResults(
|
|
197
|
-
return this._client.get(path`/v2/prompt/adaptResults/${adaptationRunID}`, options);
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
/**
|
|
201
|
-
* Get Adapt Run Results
|
|
202
|
-
*
|
|
203
|
-
* @example
|
|
204
|
-
* ```ts
|
|
205
|
-
* const adaptationRunResults =
|
|
206
|
-
* await client.promptAdaptation.getAdaptRunResults(
|
|
207
|
-
* 'adaptation_run_id',
|
|
208
|
-
* { user_id: 'user_id', 'x-token': 'x-token' },
|
|
209
|
-
* );
|
|
210
|
-
* ```
|
|
211
|
-
*/
|
|
212
|
-
getAdaptRunResults(
|
|
176
|
+
getAdaptResults(
|
|
213
177
|
adaptationRunID: string,
|
|
214
|
-
params: PromptAdaptationGetAdaptRunResultsParams,
|
|
215
|
-
options?: RequestOptions,
|
|
216
|
-
): APIPromise<AdaptationRunResults> {
|
|
217
|
-
const { user_id, 'x-token': xToken } = params;
|
|
218
|
-
return this._client.get(path`/v2/prompt/frontendAdaptRunResults/${user_id}/${adaptationRunID}`, {
|
|
219
|
-
...options,
|
|
220
|
-
headers: buildHeaders([{ 'x-token': xToken }, options?.headers]),
|
|
221
|
-
});
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
/**
|
|
225
|
-
* Get Adapt Runs
|
|
226
|
-
*
|
|
227
|
-
* @example
|
|
228
|
-
* ```ts
|
|
229
|
-
* const adaptationRunResults =
|
|
230
|
-
* await client.promptAdaptation.getAdaptRuns('user_id', {
|
|
231
|
-
* 'x-token': 'x-token',
|
|
232
|
-
* });
|
|
233
|
-
* ```
|
|
234
|
-
*/
|
|
235
|
-
getAdaptRuns(
|
|
236
|
-
userID: string,
|
|
237
|
-
params: PromptAdaptationGetAdaptRunsParams,
|
|
238
178
|
options?: RequestOptions,
|
|
239
|
-
): APIPromise<
|
|
240
|
-
|
|
241
|
-
return this._client.get(path`/v2/prompt/frontendAdaptRuns/${userID}`, {
|
|
242
|
-
...options,
|
|
243
|
-
headers: buildHeaders([{ 'x-token': xToken }, options?.headers]),
|
|
244
|
-
});
|
|
179
|
+
): APIPromise<PromptAdaptationGetAdaptResultsResponse> {
|
|
180
|
+
return this._client.get(path`/v2/prompt/adaptResults/${adaptationRunID}`, options);
|
|
245
181
|
}
|
|
246
182
|
|
|
247
183
|
/**
|
|
@@ -292,28 +228,143 @@ export class PromptAdaptation extends APIResource {
|
|
|
292
228
|
}
|
|
293
229
|
|
|
294
230
|
/**
|
|
295
|
-
* Get LLM costs for a specific adaptation run
|
|
231
|
+
* Get LLM usage costs for a specific prompt adaptation run.
|
|
232
|
+
*
|
|
233
|
+
* This endpoint returns the total cost and detailed usage records for all LLM
|
|
234
|
+
* requests made during a prompt adaptation run. Use this to track costs associated
|
|
235
|
+
* with optimizing prompts for different target models.
|
|
236
|
+
*
|
|
237
|
+
* **Cost Breakdown:**
|
|
238
|
+
*
|
|
239
|
+
* - Total cost across all models used in the adaptation
|
|
240
|
+
* - Individual usage records with provider, model, tokens, and costs
|
|
241
|
+
* - Timestamps for each LLM request
|
|
242
|
+
*
|
|
243
|
+
* **Access Control:**
|
|
244
|
+
*
|
|
245
|
+
* - Only accessible by the user who created the adaptation run
|
|
246
|
+
* - Requires prompt adaptation access
|
|
296
247
|
*
|
|
297
248
|
* @example
|
|
298
249
|
* ```ts
|
|
299
|
-
* const response =
|
|
300
|
-
*
|
|
301
|
-
*
|
|
302
|
-
* );
|
|
250
|
+
* const response = await client.promptAdaptation.getCost(
|
|
251
|
+
* 'adaptation_run_id',
|
|
252
|
+
* );
|
|
303
253
|
* ```
|
|
304
254
|
*/
|
|
305
|
-
|
|
306
|
-
adaptationRunID
|
|
307
|
-
options?: RequestOptions,
|
|
308
|
-
): APIPromise<PromptAdaptationRetrieveCostsResponse> {
|
|
309
|
-
return this._client.get(path`/v1/adaptation-runs/${adaptationRunID}/costs`, options);
|
|
255
|
+
getCost(adaptationRunID: string, options?: RequestOptions): APIPromise<PromptAdaptationGetCostResponse> {
|
|
256
|
+
return this._client.get(path`/v2/prompt/adapt/${adaptationRunID}/costs`, options);
|
|
310
257
|
}
|
|
311
258
|
}
|
|
312
259
|
|
|
313
260
|
/**
|
|
314
|
-
*
|
|
261
|
+
* A training or test example for prompt adaptation.
|
|
315
262
|
*/
|
|
316
|
-
export interface
|
|
263
|
+
export interface GoldenRecord {
|
|
264
|
+
/**
|
|
265
|
+
* Dictionary mapping field names to their values. Keys must match the fields
|
|
266
|
+
* specified in the template
|
|
267
|
+
*/
|
|
268
|
+
fields: { [key: string]: string };
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Expected answer for supervised evaluation. Required for supervised metrics,
|
|
272
|
+
* optional for unsupervised
|
|
273
|
+
*/
|
|
274
|
+
answer?: string | null;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Status enum for asynchronous jobs (prompt adaptation, custom router training,
|
|
279
|
+
* etc.).
|
|
280
|
+
*
|
|
281
|
+
* Represents the current state of a long-running operation:
|
|
282
|
+
*
|
|
283
|
+
* - **created**: Job has been initialized but not yet queued
|
|
284
|
+
* - **queued**: Job is waiting in the queue to be processed
|
|
285
|
+
* - **processing**: Job is currently being executed
|
|
286
|
+
* - **completed**: Job finished successfully and results are available
|
|
287
|
+
* - **failed**: Job encountered an error and did not complete
|
|
288
|
+
*/
|
|
289
|
+
export type JobStatus = 'created' | 'queued' | 'processing' | 'completed' | 'failed';
|
|
290
|
+
|
|
291
|
+
/**
|
|
292
|
+
* Model for specifying an LLM provider in API requests.
|
|
293
|
+
*/
|
|
294
|
+
export interface RequestProvider {
|
|
295
|
+
/**
|
|
296
|
+
* Model name (e.g., 'gpt-4o', 'claude-sonnet-4-5-20250929')
|
|
297
|
+
*/
|
|
298
|
+
model: string;
|
|
299
|
+
|
|
300
|
+
/**
|
|
301
|
+
* Provider name (e.g., 'openai', 'anthropic', 'google')
|
|
302
|
+
*/
|
|
303
|
+
provider: string;
|
|
304
|
+
|
|
305
|
+
/**
|
|
306
|
+
* Maximum context length for the model (required for custom models)
|
|
307
|
+
*/
|
|
308
|
+
context_length?: number | null;
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Input token price per million tokens in USD (required for custom models)
|
|
312
|
+
*/
|
|
313
|
+
input_price?: number | null;
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Whether this is a custom model not in Not Diamond's supported model list
|
|
317
|
+
*/
|
|
318
|
+
is_custom?: boolean;
|
|
319
|
+
|
|
320
|
+
/**
|
|
321
|
+
* Average latency in seconds (required for custom models)
|
|
322
|
+
*/
|
|
323
|
+
latency?: number | null;
|
|
324
|
+
|
|
325
|
+
/**
|
|
326
|
+
* Output token price per million tokens in USD (required for custom models)
|
|
327
|
+
*/
|
|
328
|
+
output_price?: number | null;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
/**
|
|
332
|
+
* Response model for POST /v2/prompt/adapt endpoint.
|
|
333
|
+
*
|
|
334
|
+
* Returned immediately after submitting a prompt adaptation request. The
|
|
335
|
+
* adaptation process runs asynchronously, so use the returned adaptation_run_id to
|
|
336
|
+
* track progress and retrieve results when complete.
|
|
337
|
+
*
|
|
338
|
+
* **Next steps:**
|
|
339
|
+
*
|
|
340
|
+
* 1. Store the adaptation_run_id
|
|
341
|
+
* 2. Poll GET /v2/prompt/adaptStatus/{adaptation_run_id} to check progress
|
|
342
|
+
* 3. When status is 'completed', retrieve optimized prompts from GET
|
|
343
|
+
* /v2/prompt/adaptResults/{adaptation_run_id}
|
|
344
|
+
* 4. Use the optimized prompts with your target models
|
|
345
|
+
*/
|
|
346
|
+
export interface PromptAdaptationAdaptResponse {
|
|
347
|
+
/**
|
|
348
|
+
* Unique identifier for this adaptation run. Use this to poll status and retrieve
|
|
349
|
+
* optimized prompts when complete
|
|
350
|
+
*/
|
|
351
|
+
adaptation_run_id: string;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
/**
|
|
355
|
+
* Response model for GET /v2/prompt/adaptResults/{adaptation_run_id} endpoint.
|
|
356
|
+
*
|
|
357
|
+
* Contains the complete results of a prompt adaptation run, including optimized
|
|
358
|
+
* prompts and evaluation metrics for all target models. Use this to retrieve your
|
|
359
|
+
* adapted prompts after the adaptation status is 'completed'.
|
|
360
|
+
*
|
|
361
|
+
* The response includes:
|
|
362
|
+
*
|
|
363
|
+
* - Baseline performance of your original prompt on the origin model
|
|
364
|
+
* - Optimized prompts for each target model with pre/post optimization scores
|
|
365
|
+
* - Evaluation metrics and cost information for each model
|
|
366
|
+
*/
|
|
367
|
+
export interface PromptAdaptationGetAdaptResultsResponse {
|
|
317
368
|
/**
|
|
318
369
|
* Unique ID for this adaptation run
|
|
319
370
|
*/
|
|
@@ -325,14 +376,14 @@ export interface AdaptationRunResults {
|
|
|
325
376
|
created_at: string;
|
|
326
377
|
|
|
327
378
|
/**
|
|
328
|
-
* Overall status of the adaptation run
|
|
379
|
+
* Overall status of the adaptation run (queued, running, completed, failed)
|
|
329
380
|
*/
|
|
330
381
|
job_status: JobStatus;
|
|
331
382
|
|
|
332
383
|
/**
|
|
333
|
-
* Results for each target model with optimized prompts
|
|
384
|
+
* Results for each target model with optimized prompts and improvement scores
|
|
334
385
|
*/
|
|
335
|
-
target_models: Array<
|
|
386
|
+
target_models: Array<PromptAdaptationGetAdaptResultsResponse.TargetModel>;
|
|
336
387
|
|
|
337
388
|
/**
|
|
338
389
|
* Timestamp of last update to this adaptation run
|
|
@@ -344,19 +395,58 @@ export interface AdaptationRunResults {
|
|
|
344
395
|
evaluation_metric?: string | null;
|
|
345
396
|
|
|
346
397
|
/**
|
|
347
|
-
* Metrics for the LLM requests made during the adaptation run
|
|
398
|
+
* Metrics for the LLM requests made during the adaptation run (e.g.,
|
|
399
|
+
* total_requests, avg_latency)
|
|
348
400
|
*/
|
|
349
401
|
llm_request_metrics?: { [key: string]: number };
|
|
350
402
|
|
|
351
403
|
/**
|
|
352
|
-
*
|
|
404
|
+
* Baseline results for the origin model in prompt adaptation.
|
|
405
|
+
*
|
|
406
|
+
* Part of AdaptationRunResultsResponse. Contains the performance metrics and
|
|
407
|
+
* prompt configuration for your original prompt on the origin model. This serves
|
|
408
|
+
* as the baseline to compare against optimized prompts for target models.
|
|
409
|
+
*
|
|
410
|
+
* **Fields include:**
|
|
411
|
+
*
|
|
412
|
+
* - Original system prompt and user message template
|
|
413
|
+
* - Baseline performance score and evaluation metrics
|
|
414
|
+
* - Cost of running the baseline evaluation
|
|
415
|
+
* - Job status for the origin model evaluation
|
|
353
416
|
*/
|
|
354
|
-
origin_model?:
|
|
417
|
+
origin_model?: PromptAdaptationGetAdaptResultsResponse.OriginModel | null;
|
|
418
|
+
|
|
419
|
+
/**
|
|
420
|
+
* Whether this adaptation run was created with prototype mode (3-24 training
|
|
421
|
+
* examples allowed). Prototype mode may have degraded performance compared to
|
|
422
|
+
* standard mode (25+ examples)
|
|
423
|
+
*/
|
|
424
|
+
prototype_mode?: boolean;
|
|
355
425
|
}
|
|
356
426
|
|
|
357
|
-
export namespace
|
|
427
|
+
export namespace PromptAdaptationGetAdaptResultsResponse {
|
|
358
428
|
/**
|
|
359
|
-
*
|
|
429
|
+
* Optimized prompt results for a single target model in prompt adaptation.
|
|
430
|
+
*
|
|
431
|
+
* Part of AdaptationRunResultsResponse. Contains the optimized system prompt and
|
|
432
|
+
* user message template for a specific target model, along with performance scores
|
|
433
|
+
* before and after optimization. Use these optimized prompts with the target model
|
|
434
|
+
* to achieve better performance than the original prompt.
|
|
435
|
+
*
|
|
436
|
+
* **Key metrics:**
|
|
437
|
+
*
|
|
438
|
+
* - **pre_optimization_score**: Performance with original prompt on this target
|
|
439
|
+
* model
|
|
440
|
+
* - **post_optimization_score**: Performance with optimized prompt on this target
|
|
441
|
+
* model
|
|
442
|
+
* - **Score improvement**: post - pre shows how much optimization helped
|
|
443
|
+
*
|
|
444
|
+
* **Usage:**
|
|
445
|
+
*
|
|
446
|
+
* 1. Extract the optimized system_prompt and user_message_template
|
|
447
|
+
* 2. Replace placeholders in user_message_template using fields from your data
|
|
448
|
+
* 3. Use these prompts when calling this target model
|
|
449
|
+
* 4. Compare pre/post scores to see improvement gained
|
|
360
450
|
*/
|
|
361
451
|
export interface TargetModel {
|
|
362
452
|
cost: number | null;
|
|
@@ -371,31 +461,54 @@ export namespace AdaptationRunResults {
|
|
|
371
461
|
|
|
372
462
|
pre_optimization_score: number | null;
|
|
373
463
|
|
|
464
|
+
task_type: string | null;
|
|
465
|
+
|
|
374
466
|
/**
|
|
375
|
-
* Status
|
|
467
|
+
* Status enum for asynchronous jobs (prompt adaptation, custom router training,
|
|
468
|
+
* etc.).
|
|
469
|
+
*
|
|
470
|
+
* Represents the current state of a long-running operation:
|
|
471
|
+
*
|
|
472
|
+
* - **created**: Job has been initialized but not yet queued
|
|
473
|
+
* - **queued**: Job is waiting in the queue to be processed
|
|
474
|
+
* - **processing**: Job is currently being executed
|
|
475
|
+
* - **completed**: Job finished successfully and results are available
|
|
476
|
+
* - **failed**: Job encountered an error and did not complete
|
|
376
477
|
*/
|
|
377
|
-
result_status
|
|
478
|
+
result_status?: PromptAdaptationAPI.JobStatus | null;
|
|
378
479
|
|
|
379
480
|
/**
|
|
380
|
-
* Optimized system prompt for this target model
|
|
481
|
+
* Optimized system prompt for this target model. Use this as the system message in
|
|
482
|
+
* your LLM calls
|
|
381
483
|
*/
|
|
382
|
-
system_prompt
|
|
383
|
-
|
|
384
|
-
task_type: string | null;
|
|
484
|
+
system_prompt?: string | null;
|
|
385
485
|
|
|
386
486
|
/**
|
|
387
|
-
* Optimized user message template
|
|
487
|
+
* Optimized user message template with placeholders. Substitute fields using your
|
|
488
|
+
* data before calling the LLM
|
|
388
489
|
*/
|
|
389
|
-
user_message_template
|
|
490
|
+
user_message_template?: string | null;
|
|
390
491
|
|
|
391
492
|
/**
|
|
392
|
-
*
|
|
493
|
+
* List of field names to substitute in the template (e.g., ['question',
|
|
494
|
+
* 'context']). These match the curly-brace placeholders in user_message_template
|
|
393
495
|
*/
|
|
394
|
-
user_message_template_fields
|
|
496
|
+
user_message_template_fields?: Array<string> | null;
|
|
395
497
|
}
|
|
396
498
|
|
|
397
499
|
/**
|
|
398
|
-
*
|
|
500
|
+
* Baseline results for the origin model in prompt adaptation.
|
|
501
|
+
*
|
|
502
|
+
* Part of AdaptationRunResultsResponse. Contains the performance metrics and
|
|
503
|
+
* prompt configuration for your original prompt on the origin model. This serves
|
|
504
|
+
* as the baseline to compare against optimized prompts for target models.
|
|
505
|
+
*
|
|
506
|
+
* **Fields include:**
|
|
507
|
+
*
|
|
508
|
+
* - Original system prompt and user message template
|
|
509
|
+
* - Baseline performance score and evaluation metrics
|
|
510
|
+
* - Cost of running the baseline evaluation
|
|
511
|
+
* - Job status for the origin model evaluation
|
|
399
512
|
*/
|
|
400
513
|
export interface OriginModel {
|
|
401
514
|
cost: number | null;
|
|
@@ -404,85 +517,170 @@ export namespace AdaptationRunResults {
|
|
|
404
517
|
|
|
405
518
|
model_name: string | null;
|
|
406
519
|
|
|
407
|
-
result_status: PromptAdaptationAPI.JobStatus | null;
|
|
408
|
-
|
|
409
520
|
score: number | null;
|
|
410
521
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
522
|
+
/**
|
|
523
|
+
* Status enum for asynchronous jobs (prompt adaptation, custom router training,
|
|
524
|
+
* etc.).
|
|
525
|
+
*
|
|
526
|
+
* Represents the current state of a long-running operation:
|
|
527
|
+
*
|
|
528
|
+
* - **created**: Job has been initialized but not yet queued
|
|
529
|
+
* - **queued**: Job is waiting in the queue to be processed
|
|
530
|
+
* - **processing**: Job is currently being executed
|
|
531
|
+
* - **completed**: Job finished successfully and results are available
|
|
532
|
+
* - **failed**: Job encountered an error and did not complete
|
|
533
|
+
*/
|
|
534
|
+
result_status?: PromptAdaptationAPI.JobStatus | null;
|
|
416
535
|
|
|
417
|
-
|
|
536
|
+
/**
|
|
537
|
+
* Original system prompt used for the origin model
|
|
538
|
+
*/
|
|
539
|
+
system_prompt?: string | null;
|
|
418
540
|
|
|
419
|
-
/**
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
* Unique ID for this adaptation run. Use this to check status and retrieve results
|
|
425
|
-
*/
|
|
426
|
-
adaptation_run_id: string;
|
|
541
|
+
/**
|
|
542
|
+
* Original user message template used for the origin model
|
|
543
|
+
*/
|
|
544
|
+
user_message_template?: string | null;
|
|
545
|
+
}
|
|
427
546
|
}
|
|
428
547
|
|
|
429
|
-
export type PromptAdaptationGetAdaptRunsResponse = Array<AdaptationRunResults>;
|
|
430
|
-
|
|
431
548
|
/**
|
|
432
|
-
*
|
|
549
|
+
* Response model for GET /v2/prompt/adaptStatus/{adaptation_run_id} endpoint.
|
|
550
|
+
*
|
|
551
|
+
* Returns the current status of an asynchronous prompt adaptation job. Poll this
|
|
552
|
+
* endpoint periodically to track progress. When status is 'completed', you can
|
|
553
|
+
* retrieve the optimized prompts using the /adaptResults endpoint.
|
|
554
|
+
*
|
|
555
|
+
* **Status values:**
|
|
556
|
+
*
|
|
557
|
+
* - **created**: Job has been initialized
|
|
558
|
+
* - **queued**: Waiting in queue (check queue_position for your place in line)
|
|
559
|
+
* - **processing**: Currently running optimization
|
|
560
|
+
* - **completed**: Finished successfully, results available via /adaptResults
|
|
561
|
+
* - **failed**: Encountered an error during processing
|
|
562
|
+
*
|
|
563
|
+
* **Polling recommendations:**
|
|
564
|
+
*
|
|
565
|
+
* - Poll every 30-60 seconds while status is incomplete
|
|
566
|
+
* - Stop polling once status is 'completed' or 'failed'
|
|
567
|
+
* - Adaptation typically takes 10-30 minutes total
|
|
433
568
|
*/
|
|
434
569
|
export interface PromptAdaptationGetAdaptStatusResponse {
|
|
435
570
|
/**
|
|
436
|
-
* Unique
|
|
571
|
+
* Unique identifier for this adaptation run. Use this to poll status and retrieve
|
|
572
|
+
* optimized prompts when complete
|
|
437
573
|
*/
|
|
438
574
|
adaptation_run_id: string;
|
|
439
575
|
|
|
440
576
|
/**
|
|
441
|
-
* Current status of the adaptation run
|
|
442
|
-
* failed)
|
|
577
|
+
* Current status of the adaptation run. Poll until this is 'completed' or 'failed'
|
|
443
578
|
*/
|
|
444
579
|
status: JobStatus;
|
|
445
580
|
|
|
446
581
|
/**
|
|
447
|
-
* Position in queue
|
|
582
|
+
* Position in queue when status is 'queued'. Lower numbers process sooner. Null
|
|
583
|
+
* when not queued
|
|
448
584
|
*/
|
|
449
585
|
queue_position?: number | null;
|
|
450
586
|
}
|
|
451
587
|
|
|
452
|
-
|
|
588
|
+
/**
|
|
589
|
+
* Response model for GET /v2/prompt/adapt/{adaptation_run_id}/costs endpoint.
|
|
590
|
+
*
|
|
591
|
+
* Contains the total LLM costs and detailed usage records for a prompt adaptation
|
|
592
|
+
* run. Use this to track costs associated with optimizing prompts for different
|
|
593
|
+
* target models.
|
|
594
|
+
*/
|
|
595
|
+
export interface PromptAdaptationGetCostResponse {
|
|
596
|
+
/**
|
|
597
|
+
* Unique identifier for the adaptation run
|
|
598
|
+
*/
|
|
453
599
|
adaptation_run_id: string;
|
|
454
600
|
|
|
601
|
+
/**
|
|
602
|
+
* Total cost in USD across all LLM requests in this adaptation run
|
|
603
|
+
*/
|
|
455
604
|
total_cost: number;
|
|
456
605
|
|
|
457
|
-
|
|
606
|
+
/**
|
|
607
|
+
* Detailed usage records for each LLM request made during the adaptation
|
|
608
|
+
*/
|
|
609
|
+
usage_records: Array<PromptAdaptationGetCostResponse.UsageRecord>;
|
|
458
610
|
}
|
|
459
611
|
|
|
460
|
-
export namespace
|
|
612
|
+
export namespace PromptAdaptationGetCostResponse {
|
|
613
|
+
/**
|
|
614
|
+
* Individual LLM usage record with token counts and cost breakdown.
|
|
615
|
+
*
|
|
616
|
+
* Returned by GET /llm-usage endpoint and included in AdaptationRunCostResponse.
|
|
617
|
+
* Each record represents a single LLM API call with detailed usage metrics.
|
|
618
|
+
*/
|
|
461
619
|
export interface UsageRecord {
|
|
620
|
+
/**
|
|
621
|
+
* Unique identifier for this usage record
|
|
622
|
+
*/
|
|
462
623
|
id: string;
|
|
463
624
|
|
|
625
|
+
/**
|
|
626
|
+
* Adaptation run ID this usage is associated with
|
|
627
|
+
*/
|
|
464
628
|
adaptation_run_id: string;
|
|
465
629
|
|
|
630
|
+
/**
|
|
631
|
+
* Cost of input tokens in USD
|
|
632
|
+
*/
|
|
466
633
|
input_cost: number;
|
|
467
634
|
|
|
635
|
+
/**
|
|
636
|
+
* Number of input tokens consumed
|
|
637
|
+
*/
|
|
468
638
|
input_tokens: number;
|
|
469
639
|
|
|
640
|
+
/**
|
|
641
|
+
* Model name (e.g., 'gpt-4', 'claude-3-opus-20240229')
|
|
642
|
+
*/
|
|
470
643
|
model: string;
|
|
471
644
|
|
|
645
|
+
/**
|
|
646
|
+
* Organization ID associated with the request
|
|
647
|
+
*/
|
|
472
648
|
organization_id: string;
|
|
473
649
|
|
|
650
|
+
/**
|
|
651
|
+
* Cost of output tokens in USD
|
|
652
|
+
*/
|
|
474
653
|
output_cost: number;
|
|
475
654
|
|
|
655
|
+
/**
|
|
656
|
+
* Number of output tokens generated
|
|
657
|
+
*/
|
|
476
658
|
output_tokens: number;
|
|
477
659
|
|
|
660
|
+
/**
|
|
661
|
+
* LLM provider (e.g., 'openai', 'anthropic', 'google')
|
|
662
|
+
*/
|
|
478
663
|
provider: string;
|
|
479
664
|
|
|
665
|
+
/**
|
|
666
|
+
* Type of task: 'pre-optimization evaluation', 'optimization', or
|
|
667
|
+
* 'post-optimization evaluation'
|
|
668
|
+
*/
|
|
480
669
|
task_type: string;
|
|
481
670
|
|
|
671
|
+
/**
|
|
672
|
+
* Unix timestamp when the request was made
|
|
673
|
+
*/
|
|
482
674
|
timestamp: number;
|
|
483
675
|
|
|
676
|
+
/**
|
|
677
|
+
* Total cost (input + output) in USD
|
|
678
|
+
*/
|
|
484
679
|
total_cost: number;
|
|
485
680
|
|
|
681
|
+
/**
|
|
682
|
+
* User ID who made the request
|
|
683
|
+
*/
|
|
486
684
|
user_id: string;
|
|
487
685
|
}
|
|
488
686
|
}
|
|
@@ -502,9 +700,9 @@ export interface PromptAdaptationAdaptParams {
|
|
|
502
700
|
|
|
503
701
|
/**
|
|
504
702
|
* List of models to adapt the prompt for. Maximum count depends on your
|
|
505
|
-
* subscription tier
|
|
703
|
+
* subscription tier (Free: 1, Starter: 3, Startup: 5, Enterprise: 10)
|
|
506
704
|
*/
|
|
507
|
-
target_models: Array<
|
|
705
|
+
target_models: Array<RequestProvider>;
|
|
508
706
|
|
|
509
707
|
/**
|
|
510
708
|
* User message template with placeholders for fields. Use curly braces for field
|
|
@@ -518,190 +716,50 @@ export interface PromptAdaptationAdaptParams {
|
|
|
518
716
|
|
|
519
717
|
/**
|
|
520
718
|
* Training examples (legacy parameter). Use train_goldens and test_goldens for
|
|
521
|
-
* better control
|
|
719
|
+
* better control. Minimum 25 examples (or 3 with prototype_mode=true)
|
|
522
720
|
*/
|
|
523
|
-
goldens?: Array<
|
|
721
|
+
goldens?: Array<GoldenRecord> | null;
|
|
524
722
|
|
|
525
723
|
/**
|
|
526
724
|
* Model for specifying an LLM provider in API requests.
|
|
527
725
|
*/
|
|
528
|
-
origin_model?:
|
|
726
|
+
origin_model?: RequestProvider | null;
|
|
529
727
|
|
|
530
728
|
/**
|
|
531
|
-
* Optional baseline score for the origin model
|
|
729
|
+
* Optional baseline score for the origin model. If provided, can skip origin model
|
|
730
|
+
* evaluation
|
|
532
731
|
*/
|
|
533
732
|
origin_model_evaluation_score?: number | null;
|
|
534
733
|
|
|
535
734
|
/**
|
|
536
|
-
*
|
|
735
|
+
* Enable prototype mode to use as few as 3 training examples (instead of 25).
|
|
736
|
+
* Note: Performance may be degraded with fewer examples. Recommended for
|
|
737
|
+
* prototyping AI applications when you don't have enough data yet
|
|
537
738
|
*/
|
|
538
|
-
|
|
739
|
+
prototype_mode?: boolean;
|
|
539
740
|
|
|
540
741
|
/**
|
|
541
|
-
*
|
|
742
|
+
* Test examples for evaluation. Required if train_goldens is provided. Used to
|
|
743
|
+
* measure final performance on held-out data
|
|
542
744
|
*/
|
|
543
|
-
|
|
544
|
-
}
|
|
745
|
+
test_goldens?: Array<GoldenRecord> | null;
|
|
545
746
|
|
|
546
|
-
export namespace PromptAdaptationAdaptParams {
|
|
547
747
|
/**
|
|
548
|
-
*
|
|
748
|
+
* Training examples for prompt optimization. Minimum 25 examples required (or 3
|
|
749
|
+
* with prototype_mode=true). Cannot be used with 'goldens' parameter
|
|
549
750
|
*/
|
|
550
|
-
|
|
551
|
-
/**
|
|
552
|
-
* Model name (e.g., 'gpt-4o', 'claude-sonnet-4-5-20250929')
|
|
553
|
-
*/
|
|
554
|
-
model: string;
|
|
555
|
-
|
|
556
|
-
/**
|
|
557
|
-
* Provider name (e.g., 'openai', 'anthropic', 'google')
|
|
558
|
-
*/
|
|
559
|
-
provider: string;
|
|
560
|
-
|
|
561
|
-
/**
|
|
562
|
-
* Maximum context length for the model (required for custom models)
|
|
563
|
-
*/
|
|
564
|
-
context_length?: number | null;
|
|
565
|
-
|
|
566
|
-
/**
|
|
567
|
-
* Input token price per million tokens in USD (required for custom models)
|
|
568
|
-
*/
|
|
569
|
-
input_price?: number | null;
|
|
570
|
-
|
|
571
|
-
/**
|
|
572
|
-
* Whether this is a custom model not in Not Diamond's supported model list
|
|
573
|
-
*/
|
|
574
|
-
is_custom?: boolean;
|
|
575
|
-
|
|
576
|
-
/**
|
|
577
|
-
* Average latency in seconds (required for custom models)
|
|
578
|
-
*/
|
|
579
|
-
latency?: number | null;
|
|
580
|
-
|
|
581
|
-
/**
|
|
582
|
-
* Output token price per million tokens in USD (required for custom models)
|
|
583
|
-
*/
|
|
584
|
-
output_price?: number | null;
|
|
585
|
-
}
|
|
586
|
-
|
|
587
|
-
/**
|
|
588
|
-
* A training or test example for prompt adaptation.
|
|
589
|
-
*/
|
|
590
|
-
export interface Golden {
|
|
591
|
-
/**
|
|
592
|
-
* Dictionary mapping field names to their values. Keys must match the fields
|
|
593
|
-
* specified in the template
|
|
594
|
-
*/
|
|
595
|
-
fields: { [key: string]: string };
|
|
596
|
-
|
|
597
|
-
/**
|
|
598
|
-
* Expected answer for supervised evaluation. Required for supervised metrics,
|
|
599
|
-
* optional for unsupervised
|
|
600
|
-
*/
|
|
601
|
-
answer?: string | null;
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
/**
|
|
605
|
-
* Model for specifying an LLM provider in API requests.
|
|
606
|
-
*/
|
|
607
|
-
export interface OriginModel {
|
|
608
|
-
/**
|
|
609
|
-
* Model name (e.g., 'gpt-4o', 'claude-sonnet-4-5-20250929')
|
|
610
|
-
*/
|
|
611
|
-
model: string;
|
|
612
|
-
|
|
613
|
-
/**
|
|
614
|
-
* Provider name (e.g., 'openai', 'anthropic', 'google')
|
|
615
|
-
*/
|
|
616
|
-
provider: string;
|
|
617
|
-
|
|
618
|
-
/**
|
|
619
|
-
* Maximum context length for the model (required for custom models)
|
|
620
|
-
*/
|
|
621
|
-
context_length?: number | null;
|
|
622
|
-
|
|
623
|
-
/**
|
|
624
|
-
* Input token price per million tokens in USD (required for custom models)
|
|
625
|
-
*/
|
|
626
|
-
input_price?: number | null;
|
|
627
|
-
|
|
628
|
-
/**
|
|
629
|
-
* Whether this is a custom model not in Not Diamond's supported model list
|
|
630
|
-
*/
|
|
631
|
-
is_custom?: boolean;
|
|
632
|
-
|
|
633
|
-
/**
|
|
634
|
-
* Average latency in seconds (required for custom models)
|
|
635
|
-
*/
|
|
636
|
-
latency?: number | null;
|
|
637
|
-
|
|
638
|
-
/**
|
|
639
|
-
* Output token price per million tokens in USD (required for custom models)
|
|
640
|
-
*/
|
|
641
|
-
output_price?: number | null;
|
|
642
|
-
}
|
|
643
|
-
|
|
644
|
-
/**
|
|
645
|
-
* A training or test example for prompt adaptation.
|
|
646
|
-
*/
|
|
647
|
-
export interface TestGolden {
|
|
648
|
-
/**
|
|
649
|
-
* Dictionary mapping field names to their values. Keys must match the fields
|
|
650
|
-
* specified in the template
|
|
651
|
-
*/
|
|
652
|
-
fields: { [key: string]: string };
|
|
653
|
-
|
|
654
|
-
/**
|
|
655
|
-
* Expected answer for supervised evaluation. Required for supervised metrics,
|
|
656
|
-
* optional for unsupervised
|
|
657
|
-
*/
|
|
658
|
-
answer?: string | null;
|
|
659
|
-
}
|
|
660
|
-
|
|
661
|
-
/**
|
|
662
|
-
* A training or test example for prompt adaptation.
|
|
663
|
-
*/
|
|
664
|
-
export interface TrainGolden {
|
|
665
|
-
/**
|
|
666
|
-
* Dictionary mapping field names to their values. Keys must match the fields
|
|
667
|
-
* specified in the template
|
|
668
|
-
*/
|
|
669
|
-
fields: { [key: string]: string };
|
|
670
|
-
|
|
671
|
-
/**
|
|
672
|
-
* Expected answer for supervised evaluation. Required for supervised metrics,
|
|
673
|
-
* optional for unsupervised
|
|
674
|
-
*/
|
|
675
|
-
answer?: string | null;
|
|
676
|
-
}
|
|
677
|
-
}
|
|
678
|
-
|
|
679
|
-
export interface PromptAdaptationGetAdaptRunResultsParams {
|
|
680
|
-
/**
|
|
681
|
-
* Path param:
|
|
682
|
-
*/
|
|
683
|
-
user_id: string;
|
|
684
|
-
|
|
685
|
-
/**
|
|
686
|
-
* Header param:
|
|
687
|
-
*/
|
|
688
|
-
'x-token': string;
|
|
689
|
-
}
|
|
690
|
-
|
|
691
|
-
export interface PromptAdaptationGetAdaptRunsParams {
|
|
692
|
-
'x-token': string;
|
|
751
|
+
train_goldens?: Array<GoldenRecord> | null;
|
|
693
752
|
}
|
|
694
753
|
|
|
695
754
|
export declare namespace PromptAdaptation {
|
|
696
755
|
export {
|
|
697
|
-
type
|
|
756
|
+
type GoldenRecord as GoldenRecord,
|
|
698
757
|
type JobStatus as JobStatus,
|
|
758
|
+
type RequestProvider as RequestProvider,
|
|
699
759
|
type PromptAdaptationAdaptResponse as PromptAdaptationAdaptResponse,
|
|
700
|
-
type
|
|
760
|
+
type PromptAdaptationGetAdaptResultsResponse as PromptAdaptationGetAdaptResultsResponse,
|
|
701
761
|
type PromptAdaptationGetAdaptStatusResponse as PromptAdaptationGetAdaptStatusResponse,
|
|
702
|
-
type
|
|
762
|
+
type PromptAdaptationGetCostResponse as PromptAdaptationGetCostResponse,
|
|
703
763
|
type PromptAdaptationAdaptParams as PromptAdaptationAdaptParams,
|
|
704
|
-
type PromptAdaptationGetAdaptRunResultsParams as PromptAdaptationGetAdaptRunResultsParams,
|
|
705
|
-
type PromptAdaptationGetAdaptRunsParams as PromptAdaptationGetAdaptRunsParams,
|
|
706
764
|
};
|
|
707
765
|
}
|