notdiamond 2.0.0-rc5 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. package/CHANGELOG.md +170 -0
  2. package/LICENSE +1 -1
  3. package/README.md +276 -148
  4. package/client.d.mts +16 -19
  5. package/client.d.mts.map +1 -1
  6. package/client.d.ts +16 -19
  7. package/client.d.ts.map +1 -1
  8. package/client.js +11 -14
  9. package/client.js.map +1 -1
  10. package/client.mjs +11 -14
  11. package/client.mjs.map +1 -1
  12. package/internal/tslib.js +17 -17
  13. package/package.json +3 -3
  14. package/resources/{pzn.d.mts → custom-router.d.mts} +16 -96
  15. package/resources/custom-router.d.mts.map +1 -0
  16. package/resources/{pzn.d.ts → custom-router.d.ts} +16 -96
  17. package/resources/custom-router.d.ts.map +1 -0
  18. package/resources/{pzn.js → custom-router.js} +13 -50
  19. package/resources/custom-router.js.map +1 -0
  20. package/resources/{pzn.mjs → custom-router.mjs} +11 -48
  21. package/resources/custom-router.mjs.map +1 -0
  22. package/resources/index.d.mts +4 -5
  23. package/resources/index.d.mts.map +1 -1
  24. package/resources/index.d.ts +4 -5
  25. package/resources/index.d.ts.map +1 -1
  26. package/resources/index.js +5 -7
  27. package/resources/index.js.map +1 -1
  28. package/resources/index.mjs +2 -3
  29. package/resources/index.mjs.map +1 -1
  30. package/resources/model-router.d.mts +6 -123
  31. package/resources/model-router.d.mts.map +1 -1
  32. package/resources/model-router.d.ts +6 -123
  33. package/resources/model-router.d.ts.map +1 -1
  34. package/resources/model-router.js +3 -32
  35. package/resources/model-router.js.map +1 -1
  36. package/resources/model-router.mjs +3 -32
  37. package/resources/model-router.mjs.map +1 -1
  38. package/resources/preferences.d.mts +4 -23
  39. package/resources/preferences.d.mts.map +1 -1
  40. package/resources/preferences.d.ts +4 -23
  41. package/resources/preferences.d.ts.map +1 -1
  42. package/resources/preferences.js +3 -25
  43. package/resources/preferences.js.map +1 -1
  44. package/resources/preferences.mjs +3 -25
  45. package/resources/preferences.mjs.map +1 -1
  46. package/resources/prompt-adaptation.d.mts +687 -0
  47. package/resources/prompt-adaptation.d.mts.map +1 -0
  48. package/resources/prompt-adaptation.d.ts +687 -0
  49. package/resources/prompt-adaptation.d.ts.map +1 -0
  50. package/resources/prompt-adaptation.js +258 -0
  51. package/resources/prompt-adaptation.js.map +1 -0
  52. package/resources/prompt-adaptation.mjs +254 -0
  53. package/resources/prompt-adaptation.mjs.map +1 -0
  54. package/src/client.ts +37 -44
  55. package/src/resources/{pzn.ts → custom-router.ts} +17 -122
  56. package/src/resources/index.ts +14 -16
  57. package/src/resources/model-router.ts +5 -139
  58. package/src/resources/preferences.ts +3 -34
  59. package/src/resources/prompt-adaptation.ts +780 -0
  60. package/src/version.ts +1 -1
  61. package/version.d.mts +1 -1
  62. package/version.d.mts.map +1 -1
  63. package/version.d.ts +1 -1
  64. package/version.d.ts.map +1 -1
  65. package/version.js +1 -1
  66. package/version.js.map +1 -1
  67. package/version.mjs +1 -1
  68. package/version.mjs.map +1 -1
  69. package/resources/prompt/adapt.d.mts +0 -352
  70. package/resources/prompt/adapt.d.mts.map +0 -1
  71. package/resources/prompt/adapt.d.ts +0 -352
  72. package/resources/prompt/adapt.d.ts.map +0 -1
  73. package/resources/prompt/adapt.js +0 -154
  74. package/resources/prompt/adapt.js.map +0 -1
  75. package/resources/prompt/adapt.mjs +0 -150
  76. package/resources/prompt/adapt.mjs.map +0 -1
  77. package/resources/prompt/index.d.mts +0 -3
  78. package/resources/prompt/index.d.mts.map +0 -1
  79. package/resources/prompt/index.d.ts +0 -3
  80. package/resources/prompt/index.d.ts.map +0 -1
  81. package/resources/prompt/index.js +0 -9
  82. package/resources/prompt/index.js.map +0 -1
  83. package/resources/prompt/index.mjs +0 -4
  84. package/resources/prompt/index.mjs.map +0 -1
  85. package/resources/prompt/prompt.d.mts +0 -338
  86. package/resources/prompt/prompt.d.mts.map +0 -1
  87. package/resources/prompt/prompt.d.ts +0 -338
  88. package/resources/prompt/prompt.d.ts.map +0 -1
  89. package/resources/prompt/prompt.js +0 -128
  90. package/resources/prompt/prompt.js.map +0 -1
  91. package/resources/prompt/prompt.mjs +0 -123
  92. package/resources/prompt/prompt.mjs.map +0 -1
  93. package/resources/prompt.d.mts +0 -2
  94. package/resources/prompt.d.mts.map +0 -1
  95. package/resources/prompt.d.ts +0 -2
  96. package/resources/prompt.d.ts.map +0 -1
  97. package/resources/prompt.js +0 -6
  98. package/resources/prompt.js.map +0 -1
  99. package/resources/prompt.mjs +0 -3
  100. package/resources/prompt.mjs.map +0 -1
  101. package/resources/pzn.d.mts.map +0 -1
  102. package/resources/pzn.d.ts.map +0 -1
  103. package/resources/pzn.js.map +0 -1
  104. package/resources/pzn.mjs.map +0 -1
  105. package/resources/report/index.d.mts +0 -3
  106. package/resources/report/index.d.mts.map +0 -1
  107. package/resources/report/index.d.ts +0 -3
  108. package/resources/report/index.d.ts.map +0 -1
  109. package/resources/report/index.js +0 -9
  110. package/resources/report/index.js.map +0 -1
  111. package/resources/report/index.mjs +0 -4
  112. package/resources/report/index.mjs.map +0 -1
  113. package/resources/report/metrics.d.mts +0 -87
  114. package/resources/report/metrics.d.mts.map +0 -1
  115. package/resources/report/metrics.d.ts +0 -87
  116. package/resources/report/metrics.d.ts.map +0 -1
  117. package/resources/report/metrics.js +0 -57
  118. package/resources/report/metrics.js.map +0 -1
  119. package/resources/report/metrics.mjs +0 -53
  120. package/resources/report/metrics.mjs.map +0 -1
  121. package/resources/report/report.d.mts +0 -10
  122. package/resources/report/report.d.mts.map +0 -1
  123. package/resources/report/report.d.ts +0 -10
  124. package/resources/report/report.d.ts.map +0 -1
  125. package/resources/report/report.js +0 -17
  126. package/resources/report/report.js.map +0 -1
  127. package/resources/report/report.mjs +0 -12
  128. package/resources/report/report.mjs.map +0 -1
  129. package/resources/report.d.mts +0 -2
  130. package/resources/report.d.mts.map +0 -1
  131. package/resources/report.d.ts +0 -2
  132. package/resources/report.d.ts.map +0 -1
  133. package/resources/report.js +0 -6
  134. package/resources/report.js.map +0 -1
  135. package/resources/report.mjs +0 -3
  136. package/resources/report.mjs.map +0 -1
  137. package/src/resources/prompt/adapt.ts +0 -402
  138. package/src/resources/prompt/index.ts +0 -16
  139. package/src/resources/prompt/prompt.ts +0 -398
  140. package/src/resources/prompt.ts +0 -3
  141. package/src/resources/report/index.ts +0 -4
  142. package/src/resources/report/metrics.ts +0 -99
  143. package/src/resources/report/report.ts +0 -19
  144. package/src/resources/report.ts +0 -3
@@ -0,0 +1,258 @@
1
+ "use strict";
2
+ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.PromptAdaptation = void 0;
5
+ const resource_1 = require("../core/resource.js");
6
+ const path_1 = require("../internal/utils/path.js");
7
+ class PromptAdaptation extends resource_1.APIResource {
8
+ /**
9
+ * Adapt your prompt from one LLM to work optimally across different target LLMs.
10
+ *
11
+ * This endpoint automatically optimizes your prompt (system prompt + user message
12
+ * template) to improve accuracy on your use case across various models. Each model
13
+ * has unique characteristics, and what works well for GPT-5 might not work as well
14
+ * for Claude or Gemini.
15
+ *
16
+ * **How Prompt Adaptation Works:**
17
+ *
18
+ * 1. You provide your current prompt and optionally your current origin model
19
+ * 2. You specify the target models you want to adapt your prompt to
20
+ * 3. You provide evaluation examples (golden records) with expected answers
21
+ * 4. The system runs optimization to find the best prompt for each target model
22
+ * 5. You receive adapted prompts that perform well on your target models
23
+ *
24
+ * **Evaluation Metrics:** Choose either a standard metric or provide custom
25
+ * evaluation:
26
+ *
27
+ * - **Standard metrics**: LLMaaJ:Sem_Sim_1 (semantic similarity), JSON_Match
28
+ * - **Custom evaluation**: Provide evaluation_config with your own LLM judge,
29
+ * prompt, and cutoff
30
+ *
31
+ * **Dataset Requirements:**
32
+ *
33
+ * - Minimum 25 examples in train_goldens (more examples = better adaptation)
34
+ * - **Prototype mode**: Set `prototype_mode: true` to use as few as 3 examples for
35
+ * prototyping
36
+ * - Recommended when you don't have enough data yet to build a proof-of-concept
37
+ * - Note: Performance may be degraded compared to standard mode (25+ examples)
38
+ * - Trade-off: Faster iteration with less data vs. potentially less
39
+ * generalizability
40
+ * - Each example must have fields matching your template placeholders
41
+ * - Supervised evaluation requires 'answer' field in each golden record
42
+ * - Unsupervised evaluation can work without answers
43
+ *
44
+ * **Training Time:**
45
+ *
46
+ * - Processing is asynchronous and typically takes 10-30 minutes
47
+ * - Time depends on: number of target models, dataset size, model availability
48
+ * - Use the returned adaptation_run_id to check status and retrieve results
49
+ *
50
+ * **Example Workflow:**
51
+ *
52
+ * ```
53
+ * 1. POST /v2/prompt/adapt - Submit adaptation request
54
+ * 2. GET /v2/prompt/adaptStatus/{id} - Poll status until completed
55
+ * 3. GET /v2/prompt/adaptResults/{id} - Retrieve optimized prompts
56
+ * 4. Use optimized prompts in production with target models
57
+ * ```
58
+ *
59
+ * **Related Documentation:** See
60
+ * https://docs.notdiamond.ai/docs/adapting-prompts-to-new-models for detailed
61
+ * guide.
62
+ *
63
+ * @example
64
+ * ```ts
65
+ * const response = await client.promptAdaptation.adapt({
66
+ * fields: ['question'],
67
+ * system_prompt: 'You are a mathematical assistant that counts digits accurately.',
68
+ * target_models: [
69
+ * { model: 'claude-sonnet-4-5-20250929', provider: 'anthropic' },
70
+ * { model: 'gemini-2.5-flash', provider: 'google' },
71
+ * ],
72
+ * template: 'Question: {question}\nAnswer:',
73
+ * evaluation_metric: 'LLMaaJ:Sem_Sim_1',
74
+ * prototype_mode: true,
75
+ * test_goldens: [
76
+ * {
77
+ * fields: { ... },
78
+ * answer: '15',
79
+ * },
80
+ * {
81
+ * fields: { ... },
82
+ * answer: '8',
83
+ * },
84
+ * {
85
+ * fields: { ... },
86
+ * answer: '1',
87
+ * },
88
+ * {
89
+ * fields: { ... },
90
+ * answer: '10',
91
+ * },
92
+ * {
93
+ * fields: { ... },
94
+ * answer: '11',
95
+ * },
96
+ * ],
97
+ * train_goldens: [
98
+ * {
99
+ * fields: { ... },
100
+ * answer: '20',
101
+ * },
102
+ * {
103
+ * fields: { ... },
104
+ * answer: '10',
105
+ * },
106
+ * {
107
+ * fields: { ... },
108
+ * answer: '0',
109
+ * },
110
+ * {
111
+ * fields: { ... },
112
+ * answer: '16',
113
+ * },
114
+ * {
115
+ * fields: { ... },
116
+ * answer: '2',
117
+ * },
118
+ * ],
119
+ * });
120
+ * ```
121
+ */
122
+ adapt(body, options) {
123
+ return this._client.post('/v2/prompt/adapt', { body, ...options });
124
+ }
125
+ /**
126
+ * Retrieve the complete results of a prompt adaptation run, including optimized
127
+ * prompts for all target models.
128
+ *
129
+ * This endpoint returns the adapted prompts and evaluation metrics for each target
130
+ * model in your adaptation request. Call this endpoint after the adaptation status
131
+ * is 'completed' to get your optimized prompts.
132
+ *
133
+ * **Response Structure:**
134
+ *
135
+ * - **origin_model**: Baseline performance of your original prompt on the origin
136
+ * model
137
+ * - Includes: system_prompt, user_message_template, score, evaluation metrics,
138
+ * cost
139
+ * - **target_models**: Array of results for each target model
140
+ * - Includes: optimized system_prompt, user_message_template, template_fields
141
+ * - pre_optimization_score: Performance before adaptation
142
+ * - post_optimization_score: Performance after adaptation
143
+ * - Evaluation metrics and cost information
144
+ *
145
+ * **Using Adapted Prompts:**
146
+ *
147
+ * 1. Extract the `system_prompt` and `user_message_template` from each target
148
+ * model result
149
+ * 2. Use `user_message_template_fields` to know which fields to substitute
150
+ * 3. Apply the optimized prompts when calling the respective target models
151
+ * 4. Compare pre/post optimization scores to see improvement
152
+ *
153
+ * **Status Handling:**
154
+ *
155
+ * - If adaptation is still processing, target model results will have
156
+ * `result_status: "processing"`
157
+ * - Only completed target models will have system_prompt and template values
158
+ * - Failed target models will have `result_status: "failed"` with null values
159
+ *
160
+ * **Cost Information:**
161
+ *
162
+ * - Each model result includes cost in USD for the adaptation process
163
+ * - Costs vary based on model pricing and number of evaluation examples
164
+ * - Typical range: $0.10 - $2.00 per target model
165
+ *
166
+ * **Best Practices:**
167
+ *
168
+ * 1. Wait for status 'completed' before calling this endpoint
169
+ * 2. Check result_status for each target model
170
+ * 3. Validate that post_optimization_score > pre_optimization_score
171
+ * 4. Save optimized prompts for production use
172
+ * 5. A/B test adapted prompts against originals in production
173
+ *
174
+ * @example
175
+ * ```ts
176
+ * const response =
177
+ * await client.promptAdaptation.getAdaptResults(
178
+ * 'adaptation_run_id',
179
+ * );
180
+ * ```
181
+ */
182
+ getAdaptResults(adaptationRunID, options) {
183
+ return this._client.get((0, path_1.path) `/v2/prompt/adaptResults/${adaptationRunID}`, options);
184
+ }
185
+ /**
186
+ * Check the status of a prompt adaptation run.
187
+ *
188
+ * Use this endpoint to poll the status of your adaptation request. Processing is
189
+ * asynchronous, so you'll need to check periodically until the status indicates
190
+ * completion.
191
+ *
192
+ * **Status Values:**
193
+ *
194
+ * - `created`: Initial state, not yet processing
195
+ * - `queued`: Waiting for processing capacity (check queue_position)
196
+ * - `processing`: Currently optimizing prompts
197
+ * - `completed`: All target models have been processed successfully
198
+ * - `failed`: One or more target models failed to process
199
+ *
200
+ * **Polling Recommendations:**
201
+ *
202
+ * - Poll every 30-60 seconds during processing
203
+ * - Check queue_position if status is 'queued' to estimate wait time
204
+ * - Stop polling once status is 'completed' or 'failed'
205
+ * - Use GET /v2/prompt/adaptResults to retrieve results after completion
206
+ *
207
+ * **Queue Position:**
208
+ *
209
+ * - Only present when status is 'queued'
210
+ * - Lower numbers mean earlier processing (position 1 is next)
211
+ * - Typical wait time: 1-5 minutes per position
212
+ *
213
+ * **Note:** This endpoint only returns status information. To get the actual
214
+ * adapted prompts and evaluation results, use GET /v2/prompt/adaptResults once
215
+ * status is 'completed'.
216
+ *
217
+ * @example
218
+ * ```ts
219
+ * const response =
220
+ * await client.promptAdaptation.getAdaptStatus(
221
+ * 'adaptation_run_id',
222
+ * );
223
+ * ```
224
+ */
225
+ getAdaptStatus(adaptationRunID, options) {
226
+ return this._client.get((0, path_1.path) `/v2/prompt/adaptStatus/${adaptationRunID}`, options);
227
+ }
228
+ /**
229
+ * Get LLM usage costs for a specific prompt adaptation run.
230
+ *
231
+ * This endpoint returns the total cost and detailed usage records for all LLM
232
+ * requests made during a prompt adaptation run. Use this to track costs associated
233
+ * with optimizing prompts for different target models.
234
+ *
235
+ * **Cost Breakdown:**
236
+ *
237
+ * - Total cost across all models used in the adaptation
238
+ * - Individual usage records with provider, model, tokens, and costs
239
+ * - Timestamps for each LLM request
240
+ *
241
+ * **Access Control:**
242
+ *
243
+ * - Only accessible by the user who created the adaptation run
244
+ * - Requires prompt adaptation access
245
+ *
246
+ * @example
247
+ * ```ts
248
+ * const response = await client.promptAdaptation.getCost(
249
+ * 'adaptation_run_id',
250
+ * );
251
+ * ```
252
+ */
253
+ getCost(adaptationRunID, options) {
254
+ return this._client.get((0, path_1.path) `/v2/prompt/adapt/${adaptationRunID}/costs`, options);
255
+ }
256
+ }
257
+ exports.PromptAdaptation = PromptAdaptation;
258
+ //# sourceMappingURL=prompt-adaptation.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-adaptation.js","sourceRoot":"","sources":["../src/resources/prompt-adaptation.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;AAEtF,kDAA+C;AAI/C,oDAA8C;AAE9C,MAAa,gBAAiB,SAAQ,sBAAW;IAC/C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAiHG;IACH,KAAK,CACH,IAAiC,EACjC,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,kBAAkB,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACrE,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAwDG;IACH,eAAe,CACb,eAAuB,EACvB,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAA,WAAI,EAAA,2BAA2B,eAAe,EAAE,EAAE,OAAO,CAAC,CAAC;IACrF,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAuCG;IACH,cAAc,CACZ,eAAuB,EACvB,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAA,WAAI,EAAA,0BAA0B,eAAe,EAAE,EAAE,OAAO,CAAC,CAAC;IACpF,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;OAwBG;IACH,OAAO,CAAC,eAAuB,EAAE,OAAwB;QACvD,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAA,WAAI,EAAA,oBAAoB,eAAe,QAAQ,EAAE,OAAO,CAAC,CAAC;IACpF,CAAC;CACF;AArQD,4CAqQC"}
@@ -0,0 +1,254 @@
1
+ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+ import { APIResource } from "../core/resource.mjs";
3
+ import { path } from "../internal/utils/path.mjs";
4
+ export class PromptAdaptation extends APIResource {
5
+ /**
6
+ * Adapt your prompt from one LLM to work optimally across different target LLMs.
7
+ *
8
+ * This endpoint automatically optimizes your prompt (system prompt + user message
9
+ * template) to improve accuracy on your use case across various models. Each model
10
+ * has unique characteristics, and what works well for GPT-5 might not work as well
11
+ * for Claude or Gemini.
12
+ *
13
+ * **How Prompt Adaptation Works:**
14
+ *
15
+ * 1. You provide your current prompt and optionally your current origin model
16
+ * 2. You specify the target models you want to adapt your prompt to
17
+ * 3. You provide evaluation examples (golden records) with expected answers
18
+ * 4. The system runs optimization to find the best prompt for each target model
19
+ * 5. You receive adapted prompts that perform well on your target models
20
+ *
21
+ * **Evaluation Metrics:** Choose either a standard metric or provide custom
22
+ * evaluation:
23
+ *
24
+ * - **Standard metrics**: LLMaaJ:Sem_Sim_1 (semantic similarity), JSON_Match
25
+ * - **Custom evaluation**: Provide evaluation_config with your own LLM judge,
26
+ * prompt, and cutoff
27
+ *
28
+ * **Dataset Requirements:**
29
+ *
30
+ * - Minimum 25 examples in train_goldens (more examples = better adaptation)
31
+ * - **Prototype mode**: Set `prototype_mode: true` to use as few as 3 examples for
32
+ * prototyping
33
+ * - Recommended when you don't have enough data yet to build a proof-of-concept
34
+ * - Note: Performance may be degraded compared to standard mode (25+ examples)
35
+ * - Trade-off: Faster iteration with less data vs. potentially less
36
+ * generalizability
37
+ * - Each example must have fields matching your template placeholders
38
+ * - Supervised evaluation requires 'answer' field in each golden record
39
+ * - Unsupervised evaluation can work without answers
40
+ *
41
+ * **Training Time:**
42
+ *
43
+ * - Processing is asynchronous and typically takes 10-30 minutes
44
+ * - Time depends on: number of target models, dataset size, model availability
45
+ * - Use the returned adaptation_run_id to check status and retrieve results
46
+ *
47
+ * **Example Workflow:**
48
+ *
49
+ * ```
50
+ * 1. POST /v2/prompt/adapt - Submit adaptation request
51
+ * 2. GET /v2/prompt/adaptStatus/{id} - Poll status until completed
52
+ * 3. GET /v2/prompt/adaptResults/{id} - Retrieve optimized prompts
53
+ * 4. Use optimized prompts in production with target models
54
+ * ```
55
+ *
56
+ * **Related Documentation:** See
57
+ * https://docs.notdiamond.ai/docs/adapting-prompts-to-new-models for detailed
58
+ * guide.
59
+ *
60
+ * @example
61
+ * ```ts
62
+ * const response = await client.promptAdaptation.adapt({
63
+ * fields: ['question'],
64
+ * system_prompt: 'You are a mathematical assistant that counts digits accurately.',
65
+ * target_models: [
66
+ * { model: 'claude-sonnet-4-5-20250929', provider: 'anthropic' },
67
+ * { model: 'gemini-2.5-flash', provider: 'google' },
68
+ * ],
69
+ * template: 'Question: {question}\nAnswer:',
70
+ * evaluation_metric: 'LLMaaJ:Sem_Sim_1',
71
+ * prototype_mode: true,
72
+ * test_goldens: [
73
+ * {
74
+ * fields: { ... },
75
+ * answer: '15',
76
+ * },
77
+ * {
78
+ * fields: { ... },
79
+ * answer: '8',
80
+ * },
81
+ * {
82
+ * fields: { ... },
83
+ * answer: '1',
84
+ * },
85
+ * {
86
+ * fields: { ... },
87
+ * answer: '10',
88
+ * },
89
+ * {
90
+ * fields: { ... },
91
+ * answer: '11',
92
+ * },
93
+ * ],
94
+ * train_goldens: [
95
+ * {
96
+ * fields: { ... },
97
+ * answer: '20',
98
+ * },
99
+ * {
100
+ * fields: { ... },
101
+ * answer: '10',
102
+ * },
103
+ * {
104
+ * fields: { ... },
105
+ * answer: '0',
106
+ * },
107
+ * {
108
+ * fields: { ... },
109
+ * answer: '16',
110
+ * },
111
+ * {
112
+ * fields: { ... },
113
+ * answer: '2',
114
+ * },
115
+ * ],
116
+ * });
117
+ * ```
118
+ */
119
+ adapt(body, options) {
120
+ return this._client.post('/v2/prompt/adapt', { body, ...options });
121
+ }
122
+ /**
123
+ * Retrieve the complete results of a prompt adaptation run, including optimized
124
+ * prompts for all target models.
125
+ *
126
+ * This endpoint returns the adapted prompts and evaluation metrics for each target
127
+ * model in your adaptation request. Call this endpoint after the adaptation status
128
+ * is 'completed' to get your optimized prompts.
129
+ *
130
+ * **Response Structure:**
131
+ *
132
+ * - **origin_model**: Baseline performance of your original prompt on the origin
133
+ * model
134
+ * - Includes: system_prompt, user_message_template, score, evaluation metrics,
135
+ * cost
136
+ * - **target_models**: Array of results for each target model
137
+ * - Includes: optimized system_prompt, user_message_template, template_fields
138
+ * - pre_optimization_score: Performance before adaptation
139
+ * - post_optimization_score: Performance after adaptation
140
+ * - Evaluation metrics and cost information
141
+ *
142
+ * **Using Adapted Prompts:**
143
+ *
144
+ * 1. Extract the `system_prompt` and `user_message_template` from each target
145
+ * model result
146
+ * 2. Use `user_message_template_fields` to know which fields to substitute
147
+ * 3. Apply the optimized prompts when calling the respective target models
148
+ * 4. Compare pre/post optimization scores to see improvement
149
+ *
150
+ * **Status Handling:**
151
+ *
152
+ * - If adaptation is still processing, target model results will have
153
+ * `result_status: "processing"`
154
+ * - Only completed target models will have system_prompt and template values
155
+ * - Failed target models will have `result_status: "failed"` with null values
156
+ *
157
+ * **Cost Information:**
158
+ *
159
+ * - Each model result includes cost in USD for the adaptation process
160
+ * - Costs vary based on model pricing and number of evaluation examples
161
+ * - Typical range: $0.10 - $2.00 per target model
162
+ *
163
+ * **Best Practices:**
164
+ *
165
+ * 1. Wait for status 'completed' before calling this endpoint
166
+ * 2. Check result_status for each target model
167
+ * 3. Validate that post_optimization_score > pre_optimization_score
168
+ * 4. Save optimized prompts for production use
169
+ * 5. A/B test adapted prompts against originals in production
170
+ *
171
+ * @example
172
+ * ```ts
173
+ * const response =
174
+ * await client.promptAdaptation.getAdaptResults(
175
+ * 'adaptation_run_id',
176
+ * );
177
+ * ```
178
+ */
179
+ getAdaptResults(adaptationRunID, options) {
180
+ return this._client.get(path `/v2/prompt/adaptResults/${adaptationRunID}`, options);
181
+ }
182
+ /**
183
+ * Check the status of a prompt adaptation run.
184
+ *
185
+ * Use this endpoint to poll the status of your adaptation request. Processing is
186
+ * asynchronous, so you'll need to check periodically until the status indicates
187
+ * completion.
188
+ *
189
+ * **Status Values:**
190
+ *
191
+ * - `created`: Initial state, not yet processing
192
+ * - `queued`: Waiting for processing capacity (check queue_position)
193
+ * - `processing`: Currently optimizing prompts
194
+ * - `completed`: All target models have been processed successfully
195
+ * - `failed`: One or more target models failed to process
196
+ *
197
+ * **Polling Recommendations:**
198
+ *
199
+ * - Poll every 30-60 seconds during processing
200
+ * - Check queue_position if status is 'queued' to estimate wait time
201
+ * - Stop polling once status is 'completed' or 'failed'
202
+ * - Use GET /v2/prompt/adaptResults to retrieve results after completion
203
+ *
204
+ * **Queue Position:**
205
+ *
206
+ * - Only present when status is 'queued'
207
+ * - Lower numbers mean earlier processing (position 1 is next)
208
+ * - Typical wait time: 1-5 minutes per position
209
+ *
210
+ * **Note:** This endpoint only returns status information. To get the actual
211
+ * adapted prompts and evaluation results, use GET /v2/prompt/adaptResults once
212
+ * status is 'completed'.
213
+ *
214
+ * @example
215
+ * ```ts
216
+ * const response =
217
+ * await client.promptAdaptation.getAdaptStatus(
218
+ * 'adaptation_run_id',
219
+ * );
220
+ * ```
221
+ */
222
+ getAdaptStatus(adaptationRunID, options) {
223
+ return this._client.get(path `/v2/prompt/adaptStatus/${adaptationRunID}`, options);
224
+ }
225
+ /**
226
+ * Get LLM usage costs for a specific prompt adaptation run.
227
+ *
228
+ * This endpoint returns the total cost and detailed usage records for all LLM
229
+ * requests made during a prompt adaptation run. Use this to track costs associated
230
+ * with optimizing prompts for different target models.
231
+ *
232
+ * **Cost Breakdown:**
233
+ *
234
+ * - Total cost across all models used in the adaptation
235
+ * - Individual usage records with provider, model, tokens, and costs
236
+ * - Timestamps for each LLM request
237
+ *
238
+ * **Access Control:**
239
+ *
240
+ * - Only accessible by the user who created the adaptation run
241
+ * - Requires prompt adaptation access
242
+ *
243
+ * @example
244
+ * ```ts
245
+ * const response = await client.promptAdaptation.getCost(
246
+ * 'adaptation_run_id',
247
+ * );
248
+ * ```
249
+ */
250
+ getCost(adaptationRunID, options) {
251
+ return this._client.get(path `/v2/prompt/adapt/${adaptationRunID}/costs`, options);
252
+ }
253
+ }
254
+ //# sourceMappingURL=prompt-adaptation.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"prompt-adaptation.mjs","sourceRoot":"","sources":["../src/resources/prompt-adaptation.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,WAAW,EAAE;OAIf,EAAE,IAAI,EAAE;AAEf,MAAM,OAAO,gBAAiB,SAAQ,WAAW;IAC/C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAiHG;IACH,KAAK,CACH,IAAiC,EACjC,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,kBAAkB,EAAE,EAAE,IAAI,EAAE,GAAG,OAAO,EAAE,CAAC,CAAC;IACrE,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAwDG;IACH,eAAe,CACb,eAAuB,EACvB,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAA,2BAA2B,eAAe,EAAE,EAAE,OAAO,CAAC,CAAC;IACrF,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAuCG;IACH,cAAc,CACZ,eAAuB,EACvB,OAAwB;QAExB,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAA,0BAA0B,eAAe,EAAE,EAAE,OAAO,CAAC,CAAC;IACpF,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;OAwBG;IACH,OAAO,CAAC,eAAuB,EAAE,OAAwB;QACvD,OAAO,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAA,oBAAoB,eAAe,QAAQ,EAAE,OAAO,CAAC,CAAC;IACpF,CAAC;CACF"}