@adobe/spacecat-shared-scrape-client 2.2.2 → 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ # [@adobe/spacecat-shared-scrape-client-v2.3.0](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-scrape-client-v2.2.2...@adobe/spacecat-shared-scrape-client-v2.3.0) (2025-11-05)
2
+
3
+
4
+ ### Features
5
+
6
+ * **scrapeClient:** change baseUrl composing to match sites ([#1089](https://github.com/adobe/spacecat-shared/issues/1089)) ([0ffe0af](https://github.com/adobe/spacecat-shared/commit/0ffe0af0c0ca7fc2fe537d0fb735743c0079a9c4))
7
+
1
8
  # [@adobe/spacecat-shared-scrape-client-v2.2.2](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-scrape-client-v2.2.1...@adobe/spacecat-shared-scrape-client-v2.2.2) (2025-11-04)
2
9
 
3
10
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adobe/spacecat-shared-scrape-client",
3
- "version": "2.2.2",
3
+ "version": "2.3.0",
4
4
  "description": "Shared modules of the Spacecat Services - Scrape Client",
5
5
  "type": "module",
6
6
  "engines": {
@@ -11,7 +11,7 @@
11
11
  */
12
12
 
13
13
  import { ScrapeJob as ScrapeJobModel } from '@adobe/spacecat-shared-data-access';
14
- import { isValidUrl, isValidUUID } from '@adobe/spacecat-shared-utils';
14
+ import { isValidUrl, isValidUUID, composeBaseURL } from '@adobe/spacecat-shared-utils';
15
15
 
16
16
  /**
17
17
  * Scrape Supervisor provides functionality to start and manage scrape jobs.
@@ -37,12 +37,6 @@ function ScrapeJobSupervisor(services, config) {
37
37
  maxUrlsPerMessage,
38
38
  } = config;
39
39
 
40
- function determineBaseURL(urls) {
41
- // Initially, we will just use the domain of the first URL
42
- const url = new URL(urls[0]);
43
- return `${url.protocol}//${url.hostname}`;
44
- }
45
-
46
40
  /**
47
41
  * Create a new scrape job by claiming one of the free scrape queues, persisting the scrape job
48
42
  * metadata, and setting the job status to 'RUNNING'.
@@ -59,7 +53,7 @@ function ScrapeJobSupervisor(services, config) {
59
53
  customHeaders = null,
60
54
  ) {
61
55
  const jobData = {
62
- baseURL: determineBaseURL(urls),
56
+ baseURL: composeBaseURL(urls[0]),
63
57
  processingType,
64
58
  options,
65
59
  urlCount: urls.length,