@liqhtworks/sophon-sdk 0.1.0 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. package/.github/workflows/open-generated-pr.yml +63 -0
  2. package/.github/workflows/publish.yml +42 -6
  3. package/dist/apis/DownloadsApi.d.ts +1 -1
  4. package/dist/apis/DownloadsApi.js +1 -1
  5. package/dist/apis/HealthApi.d.ts +1 -1
  6. package/dist/apis/HealthApi.js +1 -1
  7. package/dist/apis/JobsApi.d.ts +5 -5
  8. package/dist/apis/JobsApi.js +3 -3
  9. package/dist/apis/UploadsApi.d.ts +1 -1
  10. package/dist/apis/UploadsApi.js +1 -1
  11. package/dist/apis/WebhooksApi.d.ts +1 -1
  12. package/dist/apis/WebhooksApi.js +1 -1
  13. package/dist/esm/apis/DownloadsApi.d.ts +1 -1
  14. package/dist/esm/apis/DownloadsApi.js +1 -1
  15. package/dist/esm/apis/HealthApi.d.ts +1 -1
  16. package/dist/esm/apis/HealthApi.js +1 -1
  17. package/dist/esm/apis/JobsApi.d.ts +5 -5
  18. package/dist/esm/apis/JobsApi.js +3 -3
  19. package/dist/esm/apis/UploadsApi.d.ts +1 -1
  20. package/dist/esm/apis/UploadsApi.js +1 -1
  21. package/dist/esm/apis/WebhooksApi.d.ts +1 -1
  22. package/dist/esm/apis/WebhooksApi.js +1 -1
  23. package/dist/esm/helpers/uploads.d.ts +0 -3
  24. package/dist/esm/helpers/uploads.js +9 -2
  25. package/dist/esm/models/CompleteUploadResponse.d.ts +1 -1
  26. package/dist/esm/models/CompleteUploadResponse.js +1 -1
  27. package/dist/esm/models/CreateJobOutputOptions.d.ts +1 -1
  28. package/dist/esm/models/CreateJobOutputOptions.js +1 -1
  29. package/dist/esm/models/CreateJobRequest.d.ts +1 -1
  30. package/dist/esm/models/CreateJobRequest.js +1 -1
  31. package/dist/esm/models/CreateUploadRequest.d.ts +1 -1
  32. package/dist/esm/models/CreateUploadRequest.js +1 -1
  33. package/dist/esm/models/CreateUploadResponse.d.ts +1 -1
  34. package/dist/esm/models/CreateUploadResponse.js +1 -1
  35. package/dist/esm/models/CreateWebhookRequest.d.ts +1 -1
  36. package/dist/esm/models/CreateWebhookRequest.js +1 -1
  37. package/dist/esm/models/ErrorBody.d.ts +1 -1
  38. package/dist/esm/models/ErrorBody.js +1 -1
  39. package/dist/esm/models/ErrorEnvelope.d.ts +1 -1
  40. package/dist/esm/models/ErrorEnvelope.js +1 -1
  41. package/dist/esm/models/JobOutputInfo.d.ts +1 -1
  42. package/dist/esm/models/JobOutputInfo.js +1 -1
  43. package/dist/esm/models/JobProfile.d.ts +53 -9
  44. package/dist/esm/models/JobProfile.js +53 -9
  45. package/dist/esm/models/JobProgress.d.ts +1 -1
  46. package/dist/esm/models/JobProgress.js +1 -1
  47. package/dist/esm/models/JobResponse.d.ts +6 -3
  48. package/dist/esm/models/JobResponse.js +1 -1
  49. package/dist/esm/models/JobSourceInfo.d.ts +1 -1
  50. package/dist/esm/models/JobSourceInfo.js +1 -1
  51. package/dist/esm/models/JobSourceType.d.ts +1 -1
  52. package/dist/esm/models/JobSourceType.js +1 -1
  53. package/dist/esm/models/JobStatus.d.ts +1 -1
  54. package/dist/esm/models/JobStatus.js +1 -1
  55. package/dist/esm/models/ListJobsResponse.d.ts +1 -1
  56. package/dist/esm/models/ListJobsResponse.js +1 -1
  57. package/dist/esm/models/OutputContainer.d.ts +1 -1
  58. package/dist/esm/models/OutputContainer.js +1 -1
  59. package/dist/esm/models/ReadyResponse.d.ts +1 -1
  60. package/dist/esm/models/ReadyResponse.js +1 -1
  61. package/dist/esm/models/UploadJobSource.d.ts +1 -1
  62. package/dist/esm/models/UploadJobSource.js +1 -1
  63. package/dist/esm/models/UploadPartResponse.d.ts +1 -1
  64. package/dist/esm/models/UploadPartResponse.js +1 -1
  65. package/dist/esm/models/UploadStatusResponse.d.ts +1 -1
  66. package/dist/esm/models/UploadStatusResponse.js +1 -1
  67. package/dist/esm/models/WebhookDeliveryPayload.d.ts +1 -1
  68. package/dist/esm/models/WebhookDeliveryPayload.js +1 -1
  69. package/dist/esm/models/WebhookListItem.d.ts +1 -1
  70. package/dist/esm/models/WebhookListItem.js +1 -1
  71. package/dist/esm/models/WebhookListResponse.d.ts +1 -1
  72. package/dist/esm/models/WebhookListResponse.js +1 -1
  73. package/dist/esm/models/WebhookResponse.d.ts +1 -1
  74. package/dist/esm/models/WebhookResponse.js +1 -1
  75. package/dist/esm/runtime.d.ts +1 -1
  76. package/dist/esm/runtime.js +1 -1
  77. package/dist/helpers/uploads.d.ts +0 -3
  78. package/dist/helpers/uploads.js +9 -2
  79. package/dist/models/CompleteUploadResponse.d.ts +1 -1
  80. package/dist/models/CompleteUploadResponse.js +1 -1
  81. package/dist/models/CreateJobOutputOptions.d.ts +1 -1
  82. package/dist/models/CreateJobOutputOptions.js +1 -1
  83. package/dist/models/CreateJobRequest.d.ts +1 -1
  84. package/dist/models/CreateJobRequest.js +1 -1
  85. package/dist/models/CreateUploadRequest.d.ts +1 -1
  86. package/dist/models/CreateUploadRequest.js +1 -1
  87. package/dist/models/CreateUploadResponse.d.ts +1 -1
  88. package/dist/models/CreateUploadResponse.js +1 -1
  89. package/dist/models/CreateWebhookRequest.d.ts +1 -1
  90. package/dist/models/CreateWebhookRequest.js +1 -1
  91. package/dist/models/ErrorBody.d.ts +1 -1
  92. package/dist/models/ErrorBody.js +1 -1
  93. package/dist/models/ErrorEnvelope.d.ts +1 -1
  94. package/dist/models/ErrorEnvelope.js +1 -1
  95. package/dist/models/JobOutputInfo.d.ts +1 -1
  96. package/dist/models/JobOutputInfo.js +1 -1
  97. package/dist/models/JobProfile.d.ts +53 -9
  98. package/dist/models/JobProfile.js +53 -9
  99. package/dist/models/JobProgress.d.ts +1 -1
  100. package/dist/models/JobProgress.js +1 -1
  101. package/dist/models/JobResponse.d.ts +6 -3
  102. package/dist/models/JobResponse.js +1 -1
  103. package/dist/models/JobSourceInfo.d.ts +1 -1
  104. package/dist/models/JobSourceInfo.js +1 -1
  105. package/dist/models/JobSourceType.d.ts +1 -1
  106. package/dist/models/JobSourceType.js +1 -1
  107. package/dist/models/JobStatus.d.ts +1 -1
  108. package/dist/models/JobStatus.js +1 -1
  109. package/dist/models/ListJobsResponse.d.ts +1 -1
  110. package/dist/models/ListJobsResponse.js +1 -1
  111. package/dist/models/OutputContainer.d.ts +1 -1
  112. package/dist/models/OutputContainer.js +1 -1
  113. package/dist/models/ReadyResponse.d.ts +1 -1
  114. package/dist/models/ReadyResponse.js +1 -1
  115. package/dist/models/UploadJobSource.d.ts +1 -1
  116. package/dist/models/UploadJobSource.js +1 -1
  117. package/dist/models/UploadPartResponse.d.ts +1 -1
  118. package/dist/models/UploadPartResponse.js +1 -1
  119. package/dist/models/UploadStatusResponse.d.ts +1 -1
  120. package/dist/models/UploadStatusResponse.js +1 -1
  121. package/dist/models/WebhookDeliveryPayload.d.ts +1 -1
  122. package/dist/models/WebhookDeliveryPayload.js +1 -1
  123. package/dist/models/WebhookListItem.d.ts +1 -1
  124. package/dist/models/WebhookListItem.js +1 -1
  125. package/dist/models/WebhookListResponse.d.ts +1 -1
  126. package/dist/models/WebhookListResponse.js +1 -1
  127. package/dist/models/WebhookResponse.d.ts +1 -1
  128. package/dist/models/WebhookResponse.js +1 -1
  129. package/dist/runtime.d.ts +1 -1
  130. package/dist/runtime.js +1 -1
  131. package/docs/JobProfile.md +1 -1
  132. package/docs/JobsApi.md +1 -1
  133. package/package.json +1 -1
  134. package/src/apis/DownloadsApi.ts +1 -1
  135. package/src/apis/HealthApi.ts +1 -1
  136. package/src/apis/JobsApi.ts +5 -5
  137. package/src/apis/UploadsApi.ts +1 -1
  138. package/src/apis/WebhooksApi.ts +1 -1
  139. package/src/helpers/uploads.ts +15 -5
  140. package/src/models/CompleteUploadResponse.ts +1 -1
  141. package/src/models/CreateJobOutputOptions.ts +1 -1
  142. package/src/models/CreateJobRequest.ts +1 -1
  143. package/src/models/CreateUploadRequest.ts +1 -1
  144. package/src/models/CreateUploadResponse.ts +1 -1
  145. package/src/models/CreateWebhookRequest.ts +1 -1
  146. package/src/models/ErrorBody.ts +1 -1
  147. package/src/models/ErrorEnvelope.ts +1 -1
  148. package/src/models/JobOutputInfo.ts +1 -1
  149. package/src/models/JobProfile.ts +53 -9
  150. package/src/models/JobProgress.ts +1 -1
  151. package/src/models/JobResponse.ts +6 -3
  152. package/src/models/JobSourceInfo.ts +1 -1
  153. package/src/models/JobSourceType.ts +1 -1
  154. package/src/models/JobStatus.ts +1 -1
  155. package/src/models/ListJobsResponse.ts +1 -1
  156. package/src/models/OutputContainer.ts +1 -1
  157. package/src/models/ReadyResponse.ts +1 -1
  158. package/src/models/UploadJobSource.ts +1 -1
  159. package/src/models/UploadPartResponse.ts +1 -1
  160. package/src/models/UploadStatusResponse.ts +1 -1
  161. package/src/models/WebhookDeliveryPayload.ts +1 -1
  162. package/src/models/WebhookListItem.ts +1 -1
  163. package/src/models/WebhookListResponse.ts +1 -1
  164. package/src/models/WebhookResponse.ts +1 -1
  165. package/src/runtime.ts +1 -1
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -98,7 +98,7 @@ export interface JobsApiInterface {
98
98
  createJobRequestOpts(requestParameters: CreateJobOperationRequest): Promise<runtime.RequestOpts>;
99
99
 
100
100
  /**
101
- * Creates a queued encoding job from a completed upload source. The `profile` field accepts explicit coffee profiles or `sophon-auto`, and `output.target_height` can request aspect-preserving downscale.
101
+ * Creates a queued encoding job from a completed upload source. **Picking `profile`:** - Use `sophon-auto` unless you have a specific reason not to. It picks per-source settings tuned for consistent output and re-encodes at stricter settings if the first pass doesn\'t hold up. - Use an explicit coffee profile (`sophon-espresso` / `-cortado` / `-americano`) when you want deterministic encoder behavior — same settings regardless of source. - Use an `-hq` variant when the source is a heavy format (ProRes, DNxHD, high-bitrate camera originals). Larger output files, maximum detail preservation. - Use an `-hq-10bit` variant when the source is 10-bit and you want to preserve that depth end-to-end (ProRes 422/4444, DNxHD, BRAW, camera masters). See `JobProfile` for the full enum. `output.target_height` requests an aspect-preserving downscale (width derived from source, both dims rounded to even). If absent or larger than source, output uses source dimensions.
102
102
  * @summary Submit an encoding job
103
103
  * @param {string} idempotencyKey Client-generated UUID or string for exactly-once semantics. Required on all POST endpoints. Replaying the same key with the same request body returns the original response without side effects.
104
104
  * @param {CreateJobRequest} createJobRequest
@@ -109,7 +109,7 @@ export interface JobsApiInterface {
109
109
  createJobRaw(requestParameters: CreateJobOperationRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<JobResponse>>;
110
110
 
111
111
  /**
112
- * Creates a queued encoding job from a completed upload source. The `profile` field accepts explicit coffee profiles or `sophon-auto`, and `output.target_height` can request aspect-preserving downscale.
112
+ * Creates a queued encoding job from a completed upload source. **Picking `profile`:** - Use `sophon-auto` unless you have a specific reason not to. It picks per-source settings tuned for consistent output and re-encodes at stricter settings if the first pass doesn\'t hold up. - Use an explicit coffee profile (`sophon-espresso` / `-cortado` / `-americano`) when you want deterministic encoder behavior — same settings regardless of source. - Use an `-hq` variant when the source is a heavy format (ProRes, DNxHD, high-bitrate camera originals). Larger output files, maximum detail preservation. - Use an `-hq-10bit` variant when the source is 10-bit and you want to preserve that depth end-to-end (ProRes 422/4444, DNxHD, BRAW, camera masters). See `JobProfile` for the full enum. `output.target_height` requests an aspect-preserving downscale (width derived from source, both dims rounded to even). If absent or larger than source, output uses source dimensions.
113
113
  * Submit an encoding job
114
114
  */
115
115
  createJob(requestParameters: CreateJobOperationRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<JobResponse>;
@@ -301,7 +301,7 @@ export class JobsApi extends runtime.BaseAPI implements JobsApiInterface {
301
301
  }
302
302
 
303
303
  /**
304
- * Creates a queued encoding job from a completed upload source. The `profile` field accepts explicit coffee profiles or `sophon-auto`, and `output.target_height` can request aspect-preserving downscale.
304
+ * Creates a queued encoding job from a completed upload source. **Picking `profile`:** - Use `sophon-auto` unless you have a specific reason not to. It picks per-source settings tuned for consistent output and re-encodes at stricter settings if the first pass doesn\'t hold up. - Use an explicit coffee profile (`sophon-espresso` / `-cortado` / `-americano`) when you want deterministic encoder behavior — same settings regardless of source. - Use an `-hq` variant when the source is a heavy format (ProRes, DNxHD, high-bitrate camera originals). Larger output files, maximum detail preservation. - Use an `-hq-10bit` variant when the source is 10-bit and you want to preserve that depth end-to-end (ProRes 422/4444, DNxHD, BRAW, camera masters). See `JobProfile` for the full enum. `output.target_height` requests an aspect-preserving downscale (width derived from source, both dims rounded to even). If absent or larger than source, output uses source dimensions.
305
305
  * Submit an encoding job
306
306
  */
307
307
  async createJobRaw(requestParameters: CreateJobOperationRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<runtime.ApiResponse<JobResponse>> {
@@ -312,7 +312,7 @@ export class JobsApi extends runtime.BaseAPI implements JobsApiInterface {
312
312
  }
313
313
 
314
314
  /**
315
- * Creates a queued encoding job from a completed upload source. The `profile` field accepts explicit coffee profiles or `sophon-auto`, and `output.target_height` can request aspect-preserving downscale.
315
+ * Creates a queued encoding job from a completed upload source. **Picking `profile`:** - Use `sophon-auto` unless you have a specific reason not to. It picks per-source settings tuned for consistent output and re-encodes at stricter settings if the first pass doesn\'t hold up. - Use an explicit coffee profile (`sophon-espresso` / `-cortado` / `-americano`) when you want deterministic encoder behavior — same settings regardless of source. - Use an `-hq` variant when the source is a heavy format (ProRes, DNxHD, high-bitrate camera originals). Larger output files, maximum detail preservation. - Use an `-hq-10bit` variant when the source is 10-bit and you want to preserve that depth end-to-end (ProRes 422/4444, DNxHD, BRAW, camera masters). See `JobProfile` for the full enum. `output.target_height` requests an aspect-preserving downscale (width derived from source, both dims rounded to even). If absent or larger than source, output uses source dimensions.
316
316
  * Submit an encoding job
317
317
  */
318
318
  async createJob(requestParameters: CreateJobOperationRequest, initOverrides?: RequestInit | runtime.InitOverrideFunction): Promise<JobResponse> {
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -4,11 +4,15 @@
4
4
  // separate calls; this wrapper handles chunk slicing, bounded concurrency,
5
5
  // per-part retry, resume against existing sessions, and progress reporting.
6
6
 
7
+ // Narrow structural type — only the fields the helper actually reads.
8
+ // Keeping it tight lets the generated UploadsApi (which carries Date /
9
+ // nullable / extra metadata fields) satisfy this interface without
10
+ // explicit casts at the call site.
7
11
  export interface UploadsApiLike {
8
12
  createUpload(params: {
9
13
  createUploadRequest: { file_name: string; file_size: number; mime_type: string };
10
14
  idempotencyKey: string;
11
- }): Promise<{ id: string; chunk_size: number; total_chunks: number; expires_at: string }>;
15
+ }): Promise<{ id: string; chunk_size: number; total_chunks: number }>;
12
16
 
13
17
  uploadPart(params: {
14
18
  id: string;
@@ -19,11 +23,10 @@ export interface UploadsApiLike {
19
23
  completeUpload(params: {
20
24
  id: string;
21
25
  idempotencyKey: string;
22
- }): Promise<{ id: string; status: string; sha256: string; bytes: number }>;
26
+ }): Promise<{ id: string; sha256: string; bytes: number }>;
23
27
 
24
28
  getUpload(params: { id: string }): Promise<{
25
29
  id: string;
26
- status: string;
27
30
  total_chunks: number;
28
31
  received_chunks: number[];
29
32
  }>;
@@ -94,13 +97,17 @@ export async function uploadFile(params: UploadFileParams): Promise<UploadFileRe
94
97
  // we only need the head size to slice, so use ceil.
95
98
  chunkSize = Math.ceil(source.size / totalChunks);
96
99
  } else {
100
+ // SOPHON scopes idempotency keys per-route. createUpload and
101
+ // completeUpload are different routes, so a single key on both
102
+ // returns 409. Derive distinct per-route keys from the caller's
103
+ // (or auto-generated) seed so retries still work.
97
104
  const session = await api.createUpload({
98
105
  createUploadRequest: {
99
106
  file_name: fileName,
100
107
  file_size: source.size,
101
108
  mime_type: mimeType,
102
109
  },
103
- idempotencyKey,
110
+ idempotencyKey: `${idempotencyKey}/create`,
104
111
  });
105
112
  uploadId = session.id;
106
113
  chunkSize = session.chunk_size;
@@ -159,7 +166,10 @@ export async function uploadFile(params: UploadFileParams): Promise<UploadFileRe
159
166
 
160
167
  await Promise.all(workers);
161
168
 
162
- const done = await api.completeUpload({ id: uploadId, idempotencyKey });
169
+ const done = await api.completeUpload({
170
+ id: uploadId,
171
+ idempotencyKey: `${idempotencyKey}/complete`,
172
+ });
163
173
 
164
174
  return { uploadId: done.id, sha256: done.sha256, bytes: done.bytes };
165
175
  }
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -13,25 +13,63 @@
13
13
  */
14
14
 
15
15
  /**
16
- * Encoding profile ID. Coffee-themed naming: prep time maps to encode speed,
17
- * bare name is the 8-bit default (universal decoder compatibility),
18
- * `-10bit` suffix opts into HEVC Main10 for higher-quality playback on
19
- * newer decoders.
16
+ * Encoding profile ID. Coffee-themed naming: prep time maps to encode
17
+ * speed (espresso = fast, cortado = medium, americano = slow). The
18
+ * naming suffixes compose:
20
19
  *
21
- * **8-bit (default):**
20
+ * - bare name → 8-bit HEVC Main (universal decoder compatibility,
21
+ * default)
22
+ * - `-10bit` suffix → HEVC Main10 output. Requires a decoder that
23
+ * supports Main10 (modern phones, modern TVs, Safari, Chrome
24
+ * with hardware decode). Older / low-end devices may not play
25
+ * Main10 output. Pick only when you know the downstream
26
+ * pipeline supports it.
27
+ * - `-hq` suffix → quality-biased 8-bit variant for heavy source
28
+ * formats (ProRes, DNxHD, high-bitrate camera originals,
29
+ * mastering-grade intermediates). Files will be larger than
30
+ * the standard tier; pick when preserving detail matters more
31
+ * than bitrate. Broad device compatibility (8-bit Main).
32
+ * - `-hq-10bit` suffix → combines HQ with Main10 to preserve
33
+ * 10-bit depth end-to-end. Same Main10 compatibility caveat
34
+ * as above; pick for ProRes 422/4444, DNxHD, BRAW, or camera
35
+ * masters where detail AND bit depth matter and you control
36
+ * the downstream pipeline.
37
+ *
38
+ * **For broad audience playback, pick `sophon-auto` or an explicit
39
+ * 8-bit coffee profile.** `sophon-auto` produces 8-bit Main output
40
+ * regardless of source bit depth.
41
+ *
42
+ * If you're not sure which to pick, use `sophon-auto` — the API
43
+ * picks per-source settings tuned for consistent output regardless
44
+ * of what you submit, and automatically re-encodes at stricter
45
+ * settings if the first pass doesn't hold up.
46
+ *
47
+ * **8-bit (standard, default):**
22
48
  * - `sophon-espresso` — fastest, lowest compression
23
49
  * - `sophon-cortado` — balanced speed and quality
24
50
  * - `sophon-americano` — slowest, highest compression
25
51
  *
52
+ * **8-bit HQ** (max quality preservation for heavy formats):
53
+ * - `sophon-espresso-hq`
54
+ * - `sophon-cortado-hq`
55
+ * - `sophon-americano-hq`
56
+ *
26
57
  * **10-bit (HEVC Main10):**
27
58
  * - `sophon-espresso-10bit`
28
59
  * - `sophon-cortado-10bit`
29
60
  * - `sophon-americano-10bit`
30
61
  *
62
+ * **10-bit HQ** (max quality preservation AND preserves 10-bit depth):
63
+ * - `sophon-espresso-hq-10bit`
64
+ * - `sophon-cortado-hq-10bit`
65
+ * - `sophon-americano-hq-10bit`
66
+ *
31
67
  * **Adaptive dispatcher:**
32
- * - `sophon-auto` — public opt-in profile. The worker classifies
33
- * the source and records the concrete `effective_profile_id` on
34
- * the job once dispatch resolves.
68
+ * - `sophon-auto` — content-adaptive. The API probes each source,
69
+ * picks tuned settings, and re-encodes at stricter settings if
70
+ * the first pass doesn't hold up. `profile` on the job response
71
+ * stays `sophon-auto`; `effective_profile_id` records the
72
+ * concrete variant the API actually ran.
35
73
  *
36
74
  * @export
37
75
  * @enum {string}
@@ -40,9 +78,15 @@ export enum JobProfile {
40
78
  SOPHON_ESPRESSO = 'sophon-espresso',
41
79
  SOPHON_CORTADO = 'sophon-cortado',
42
80
  SOPHON_AMERICANO = 'sophon-americano',
81
+ SOPHON_ESPRESSO_HQ = 'sophon-espresso-hq',
82
+ SOPHON_CORTADO_HQ = 'sophon-cortado-hq',
83
+ SOPHON_AMERICANO_HQ = 'sophon-americano-hq',
43
84
  SOPHON_ESPRESSO_10BIT = 'sophon-espresso-10bit',
44
85
  SOPHON_CORTADO_10BIT = 'sophon-cortado-10bit',
45
86
  SOPHON_AMERICANO_10BIT = 'sophon-americano-10bit',
87
+ SOPHON_ESPRESSO_HQ_10BIT = 'sophon-espresso-hq-10bit',
88
+ SOPHON_CORTADO_HQ_10BIT = 'sophon-cortado-hq-10bit',
89
+ SOPHON_AMERICANO_HQ_10BIT = 'sophon-americano-hq-10bit',
46
90
  SOPHON_AUTO = 'sophon-auto'
47
91
  }
48
92
 
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -2,7 +2,7 @@
2
2
  /* eslint-disable */
3
3
  /**
4
4
  * SOPHON Encoding API
5
- * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination.
5
+ * REST API for submitting, monitoring, and retrieving SOPHON encoding jobs. Authentication is via Bearer API key or session cookie. All POST endpoints require an Idempotency-Key header. List endpoints use opaque cursor-based pagination. --- ## Integration example A real-world walkthrough of how [Daisy](https://daisy.so) wires SOPHON into two production flows — user-uploaded video compression and automatic post-generation encoding after video rendering. Both converge on the same adapter and state machine; only the source differs. The patterns below are the ones that transfer cleanly to any integration. ### 1. One thin adapter, one method per endpoint Keep the HTTP surface boring. Axios (or your stack\'s equivalent), a per-endpoint idempotency key, and no enum for profile names: ```ts @Injectable() export class SophonService { private client() { return axios.create({ baseURL: process.env.SOPHON_BASE_URL, headers: { Authorization: `Bearer ${process.env.SOPHON_API_KEY}` }, timeout: 60_000, }); } async createUploadSession(req, idempotencyKey) { /_* POST /v1/uploads *_/ } async uploadChunk(uploadId, partNumber, bytes) { /_* PUT /v1/uploads/{id}/parts/{n} *_/ } async completeUpload(uploadId, idempotencyKey) { /_* POST /v1/uploads/{id}/complete *_/ } async createJob(req, idempotencyKey) { /_* POST /v1/jobs *_/ } async getJob(id) { /_* GET /v1/jobs/{id} *_/ } async downloadOutputStream(jobId) { /_* GET /v1/jobs/{id}/output *_/ } } ``` **Suffix idempotency keys per endpoint.** SOPHON scopes dedupe per route but a shared key collides across retries that hit different endpoints. Do this: ```ts const base = `video:${video.id}:v1`; await sophon.createUploadSession(req, `${base}:create-upload`); await sophon.completeUpload(uploadId, `${base}:complete-upload`); await sophon.createJob(req, `${base}:create-job`); ``` **Profile names are strings, not an enum.** We add and rename profiles (`sophon-espresso` → `sophon-auto` → future variants). A TypeScript union will drift; let the server validate. ### 2. Model your pipeline as a state machine Persist a single `sophonState` JSON column per row. `jobId === null` routes to dispatch; anything else polls that job: ```ts interface SophonState { jobId: string | null; // null = not dispatched; string = poll it uploadId?: string; // persist between upload + createJob profile?: string; // sophon-auto | sophon-espresso | ... dispatchRetries: number; // 3 strikes → fallback downloadRetries: number; lastError?: { stage, code, message, at }; } // In your cron (5-second tick is plenty): if (state.jobId === null) { await dispatch(video, state); // upload + createJob } else { await poll(video, state); // getJob + (if completed) downloadAndComplete } ``` Persisting `uploadId` between the upload completion and the `createJob` call matters — a crash in that window otherwise re-uploads the file. ### 3. Stream for large sources; buffer for small User-uploaded sources can be 1 GB+. Stream S3 → SOPHON in chunks equal to `session.chunk_size` from the createUploadSession response: ```ts async uploadStream(stream, fileName, mimeType, fileSize) { const session = await this.createUploadSession({ file_name: fileName, file_size: fileSize, mime_type: mimeType, }); let partIndex = 0, buffer = Buffer.alloc(0); for await (const chunk of stream) { buffer = Buffer.concat([buffer, chunk]); while (buffer.length >= session.chunk_size) { await this.uploadChunk(session.id, partIndex++, buffer.subarray(0, session.chunk_size)); buffer = buffer.subarray(session.chunk_size); } } if (buffer.length > 0) { await this.uploadChunk(session.id, partIndex, buffer); } return this.completeUpload(session.id); } ``` Generated outputs from a model run are typically <30 MB — for those, a buffered upload path is simpler and avoids managing a stream lifetime. ### 4. Always keep a fallback URL Before a row enters your encoding state, make sure the source is already playable from your CDN. Every SOPHON failure then degrades to \"use the original\" — the user\'s video never disappears because SOPHON is slow or down. This is the single most important invariant: ```ts await videoRepository.update({ id: video.id }, { videoUrl: sourceCloudfrontUrl, // fallback URL, stays intact status: VideoStatus.EncodingPending, sophonState: { jobId: null, profile, dispatchRetries: 0, downloadRetries: 0 }, sourceFileSize: sourceBytes, }); ``` On any terminal failure (structured `retryable: false`, retry budget exhausted, 404 on getJob, 23h stuck-row guard), flip status back to `Done` with `videoUrl` unchanged. SOPHON is enhancement, not a delivery dependency. ### 5. Handle the \"no-gain\" success path `sophon-auto` runs a pre-probe and, when it decides the output wouldn\'t be smaller than the source, returns `final_artifact: \"original\"` and `saved_percent: 0`. Skip the output download — the source already lives in your bucket: ```ts if (job.status === \'completed\') { if (job.final_artifact === \'original\') { // Persist outputFileSize = sourceFileSize so your UI shows // \"no reduction\" instead of a missing value. await completeWithFallbackOutput(video, job.output?.bytes ?? null); return; } await downloadAndComplete(video, state, job.output?.bytes ?? null); } ``` ### 6. Finalize by streaming into your own storage `GET /v1/jobs/{id}/output` returns a 302 to a presigned URL with a 24h TTL. Stream that directly into your bucket — no temp file, no buffering: ```ts const { stream } = await sophon.downloadOutputStream(state.jobId); const outputKey = `encoded/${video.userId}/${video.id}.mp4`; await fileService.uploadStream(outputKey, stream, \'video/mp4\'); await videoRepository.update({ id: video.id }, { videoUrl: fileService.cloudfrontUrl(outputKey), outputFileSize: sophonOutputBytes, status: VideoStatus.Done, }); ``` ### 7. Failure taxonomy | Error | Handling | |---|---| | Structured `retryable: false` from SOPHON | Terminal. Fall back to `Done` with source URL. | | Retryable upload / createJob failure | Increment `dispatchRetries`; after 3, fall back. | | Retryable download failure | Increment `downloadRetries`; after 3, fall back. | | `getJob` → HTTP 404 | Terminal. Job expired or never created. Fall back. | | Transient poll network error | Do nothing; next tick retries. Don\'t burn retry budget. | | Row stuck in encode state > 23h | Fall back (safety net against orphans). | ### Minimal config ```bash SOPHON_API_KEY=sk_live_... SOPHON_BASE_URL=https://api.liqhtworks.xyz ```
6
6
  *
7
7
  * The version of the OpenAPI document: 1.0.0
8
8
  *
@@ -96,8 +96,11 @@ export interface JobResponse {
96
96
  profile: JobProfile;
97
97
  /**
98
98
  * Concrete profile resolved by the worker. Omitted until dispatch
99
- * resolves. On explicit-profile jobs this equals `profile`; on
100
- * `sophon-auto` jobs it is an internal adaptive profile ID.
99
+ * resolves. On explicit-profile jobs this equals `profile`. On
100
+ * `sophon-auto` jobs this is a variant identifier recording
101
+ * which path the API routed the source through; exact encoder
102
+ * settings for a given variant may be updated between releases
103
+ * as the adaptive logic is tuned.
101
104
  *
102
105
  * @type {string}
103
106
  * @memberof JobResponse