vellum-ai 0.0.32 → 0.0.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,6 +31,7 @@ export declare class Documents {
31
31
  *
32
32
  */
33
33
  partialUpdate(id: string, request?: Vellum.PatchedDocumentUpdateRequest): Promise<Vellum.DocumentRead>;
34
+ destroy(id: string): Promise<void>;
34
35
  /**
35
36
  * <strong style="background-color:#4caf50; color:white; padding:4px; border-radius:4px">Stable</strong>
36
37
  *
@@ -147,6 +147,38 @@ class Documents {
147
147
  });
148
148
  }
149
149
  }
150
+ async destroy(id) {
151
+ const _response = await core.fetcher({
152
+ url: (0, url_join_1.default)((this.options.environment ?? environments.VellumEnvironment.Production).default, `v1/documents/${id}`),
153
+ method: "DELETE",
154
+ headers: {
155
+ X_API_KEY: await core.Supplier.get(this.options.apiKey),
156
+ },
157
+ contentType: "application/json",
158
+ });
159
+ if (_response.ok) {
160
+ return;
161
+ }
162
+ if (_response.error.reason === "status-code") {
163
+ throw new errors.VellumError({
164
+ statusCode: _response.error.statusCode,
165
+ body: _response.error.body,
166
+ });
167
+ }
168
+ switch (_response.error.reason) {
169
+ case "non-json":
170
+ throw new errors.VellumError({
171
+ statusCode: _response.error.statusCode,
172
+ body: _response.error.rawBody,
173
+ });
174
+ case "timeout":
175
+ throw new errors.VellumTimeoutError();
176
+ case "unknown":
177
+ throw new errors.VellumError({
178
+ message: _response.error.errorMessage,
179
+ });
180
+ }
181
+ }
150
182
  /**
151
183
  * <strong style="background-color:#4caf50; color:white; padding:4px; border-radius:4px">Stable</strong>
152
184
  *
@@ -3,12 +3,14 @@
3
3
  */
4
4
  /**
5
5
  * * `WORKFLOW_INITIALIZATION` - WORKFLOW_INITIALIZATION
6
- * * `NODE_EXECUTION` - NODE_EXECUTION
7
6
  * * `NODE_EXECUTION_COUNT_LIMIT_REACHED` - NODE_EXECUTION_COUNT_LIMIT_REACHED
7
+ * * `NODE_EXECUTION` - NODE_EXECUTION
8
+ * * `LLM_PROVIDER` - LLM_PROVIDER
8
9
  */
9
- export declare type WorkflowExecutionEventErrorCode = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED";
10
+ export declare type WorkflowExecutionEventErrorCode = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED" | "NODE_EXECUTION" | "LLM_PROVIDER";
10
11
  export declare const WorkflowExecutionEventErrorCode: {
11
12
  readonly WorkflowInitialization: "WORKFLOW_INITIALIZATION";
12
- readonly NodeExecution: "NODE_EXECUTION";
13
13
  readonly NodeExecutionCountLimitReached: "NODE_EXECUTION_COUNT_LIMIT_REACHED";
14
+ readonly NodeExecution: "NODE_EXECUTION";
15
+ readonly LlmProvider: "LLM_PROVIDER";
14
16
  };
@@ -6,6 +6,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.WorkflowExecutionEventErrorCode = void 0;
7
7
  exports.WorkflowExecutionEventErrorCode = {
8
8
  WorkflowInitialization: "WORKFLOW_INITIALIZATION",
9
- NodeExecution: "NODE_EXECUTION",
10
9
  NodeExecutionCountLimitReached: "NODE_EXECUTION_COUNT_LIMIT_REACHED",
10
+ NodeExecution: "NODE_EXECUTION",
11
+ LlmProvider: "LLM_PROVIDER",
11
12
  };
@@ -31,6 +31,7 @@ export declare class Documents {
31
31
  *
32
32
  */
33
33
  partialUpdate(id: string, request?: Vellum.PatchedDocumentUpdateRequest): Promise<Vellum.DocumentRead>;
34
+ destroy(id: string): Promise<void>;
34
35
  /**
35
36
  * <strong style="background-color:#4caf50; color:white; padding:4px; border-radius:4px">Stable</strong>
36
37
  *
@@ -147,6 +147,38 @@ class Documents {
147
147
  });
148
148
  }
149
149
  }
150
+ async destroy(id) {
151
+ const _response = await core.fetcher({
152
+ url: (0, url_join_1.default)((this.options.environment ?? environments.VellumEnvironment.Production).default, `v1/documents/${id}`),
153
+ method: "DELETE",
154
+ headers: {
155
+ X_API_KEY: await core.Supplier.get(this.options.apiKey),
156
+ },
157
+ contentType: "application/json",
158
+ });
159
+ if (_response.ok) {
160
+ return;
161
+ }
162
+ if (_response.error.reason === "status-code") {
163
+ throw new errors.VellumError({
164
+ statusCode: _response.error.statusCode,
165
+ body: _response.error.body,
166
+ });
167
+ }
168
+ switch (_response.error.reason) {
169
+ case "non-json":
170
+ throw new errors.VellumError({
171
+ statusCode: _response.error.statusCode,
172
+ body: _response.error.rawBody,
173
+ });
174
+ case "timeout":
175
+ throw new errors.VellumTimeoutError();
176
+ case "unknown":
177
+ throw new errors.VellumError({
178
+ message: _response.error.errorMessage,
179
+ });
180
+ }
181
+ }
150
182
  /**
151
183
  * <strong style="background-color:#4caf50; color:white; padding:4px; border-radius:4px">Stable</strong>
152
184
  *
@@ -3,12 +3,14 @@
3
3
  */
4
4
  /**
5
5
  * * `WORKFLOW_INITIALIZATION` - WORKFLOW_INITIALIZATION
6
- * * `NODE_EXECUTION` - NODE_EXECUTION
7
6
  * * `NODE_EXECUTION_COUNT_LIMIT_REACHED` - NODE_EXECUTION_COUNT_LIMIT_REACHED
7
+ * * `NODE_EXECUTION` - NODE_EXECUTION
8
+ * * `LLM_PROVIDER` - LLM_PROVIDER
8
9
  */
9
- export declare type WorkflowExecutionEventErrorCode = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED";
10
+ export declare type WorkflowExecutionEventErrorCode = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED" | "NODE_EXECUTION" | "LLM_PROVIDER";
10
11
  export declare const WorkflowExecutionEventErrorCode: {
11
12
  readonly WorkflowInitialization: "WORKFLOW_INITIALIZATION";
12
- readonly NodeExecution: "NODE_EXECUTION";
13
13
  readonly NodeExecutionCountLimitReached: "NODE_EXECUTION_COUNT_LIMIT_REACHED";
14
+ readonly NodeExecution: "NODE_EXECUTION";
15
+ readonly LlmProvider: "LLM_PROVIDER";
14
16
  };
@@ -6,6 +6,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.WorkflowExecutionEventErrorCode = void 0;
7
7
  exports.WorkflowExecutionEventErrorCode = {
8
8
  WorkflowInitialization: "WORKFLOW_INITIALIZATION",
9
- NodeExecution: "NODE_EXECUTION",
10
9
  NodeExecutionCountLimitReached: "NODE_EXECUTION_COUNT_LIMIT_REACHED",
10
+ NodeExecution: "NODE_EXECUTION",
11
+ LlmProvider: "LLM_PROVIDER",
11
12
  };
@@ -6,5 +6,5 @@ import * as Vellum from "../../api";
6
6
  import * as core from "../../core";
7
7
  export declare const WorkflowExecutionEventErrorCode: core.serialization.Schema<serializers.WorkflowExecutionEventErrorCode.Raw, Vellum.WorkflowExecutionEventErrorCode>;
8
8
  export declare namespace WorkflowExecutionEventErrorCode {
9
- type Raw = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED";
9
+ type Raw = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED" | "NODE_EXECUTION" | "LLM_PROVIDER";
10
10
  }
@@ -28,4 +28,9 @@ var __importStar = (this && this.__importStar) || function (mod) {
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
29
  exports.WorkflowExecutionEventErrorCode = void 0;
30
30
  const core = __importStar(require("../../core"));
31
- exports.WorkflowExecutionEventErrorCode = core.serialization.enum_(["WORKFLOW_INITIALIZATION", "NODE_EXECUTION", "NODE_EXECUTION_COUNT_LIMIT_REACHED"]);
31
+ exports.WorkflowExecutionEventErrorCode = core.serialization.enum_([
32
+ "WORKFLOW_INITIALIZATION",
33
+ "NODE_EXECUTION_COUNT_LIMIT_REACHED",
34
+ "NODE_EXECUTION",
35
+ "LLM_PROVIDER",
36
+ ]);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "vellum-ai",
3
- "version": "v0.0.32",
3
+ "version": "v0.0.34",
4
4
  "private": false,
5
5
  "repository": "https://github.com/vellum-ai/vellum-client-node",
6
6
  "main": "./index.js",
@@ -6,5 +6,5 @@ import * as Vellum from "../../api";
6
6
  import * as core from "../../core";
7
7
  export declare const WorkflowExecutionEventErrorCode: core.serialization.Schema<serializers.WorkflowExecutionEventErrorCode.Raw, Vellum.WorkflowExecutionEventErrorCode>;
8
8
  export declare namespace WorkflowExecutionEventErrorCode {
9
- type Raw = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED";
9
+ type Raw = "WORKFLOW_INITIALIZATION" | "NODE_EXECUTION_COUNT_LIMIT_REACHED" | "NODE_EXECUTION" | "LLM_PROVIDER";
10
10
  }
@@ -28,4 +28,9 @@ var __importStar = (this && this.__importStar) || function (mod) {
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
29
  exports.WorkflowExecutionEventErrorCode = void 0;
30
30
  const core = __importStar(require("../../core"));
31
- exports.WorkflowExecutionEventErrorCode = core.serialization.enum_(["WORKFLOW_INITIALIZATION", "NODE_EXECUTION", "NODE_EXECUTION_COUNT_LIMIT_REACHED"]);
31
+ exports.WorkflowExecutionEventErrorCode = core.serialization.enum_([
32
+ "WORKFLOW_INITIALIZATION",
33
+ "NODE_EXECUTION_COUNT_LIMIT_REACHED",
34
+ "NODE_EXECUTION",
35
+ "LLM_PROVIDER",
36
+ ]);