@promptbook/components 0.112.0-56 → 0.112.0-57

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/esm/index.es.js +1177 -1192
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/ParsedAgentSourceWithCommitments.d.ts +7 -0
  4. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/applyCommitmentsToAgentModelRequirements.d.ts +14 -0
  5. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/augmentAgentModelRequirementsFromSource.d.ts +14 -0
  6. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/filterCommitmentsForAgentModelRequirements.d.ts +10 -0
  7. package/esm/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/materializeInlineKnowledgeSources.d.ts +12 -0
  8. package/esm/src/book-2.0/agent-source/parseAgentSource/ParseAgentSourceState.d.ts +10 -0
  9. package/esm/src/book-2.0/agent-source/parseAgentSource/ParsedAgentProfile.d.ts +7 -0
  10. package/esm/src/book-2.0/agent-source/parseAgentSource/applyMetaCommitment.d.ts +8 -0
  11. package/esm/src/book-2.0/agent-source/parseAgentSource/consumeConversationSampleCommitment.d.ts +8 -0
  12. package/esm/src/book-2.0/agent-source/parseAgentSource/createCapabilitiesFromCommitment.d.ts +9 -0
  13. package/esm/src/book-2.0/agent-source/parseAgentSource/ensureMetaFullname.d.ts +7 -0
  14. package/esm/src/book-2.0/agent-source/parseAgentSource/extractAgentProfileText.d.ts +8 -0
  15. package/esm/src/book-2.0/agent-source/parseAgentSource/extractInitialMessage.d.ts +7 -0
  16. package/esm/src/book-2.0/agent-source/parseAgentSource/extractParsedAgentProfile.d.ts +8 -0
  17. package/esm/src/types/LlmToolDefinition.d.ts +17 -7
  18. package/esm/src/version.d.ts +1 -1
  19. package/package.json +1 -1
  20. package/umd/index.umd.js +1177 -1192
  21. package/umd/index.umd.js.map +1 -1
  22. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/ParsedAgentSourceWithCommitments.d.ts +7 -0
  23. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/applyCommitmentsToAgentModelRequirements.d.ts +14 -0
  24. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/augmentAgentModelRequirementsFromSource.d.ts +14 -0
  25. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/filterCommitmentsForAgentModelRequirements.d.ts +10 -0
  26. package/umd/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments/materializeInlineKnowledgeSources.d.ts +12 -0
  27. package/umd/src/book-2.0/agent-source/parseAgentSource/ParseAgentSourceState.d.ts +10 -0
  28. package/umd/src/book-2.0/agent-source/parseAgentSource/ParsedAgentProfile.d.ts +7 -0
  29. package/umd/src/book-2.0/agent-source/parseAgentSource/applyMetaCommitment.d.ts +8 -0
  30. package/umd/src/book-2.0/agent-source/parseAgentSource/consumeConversationSampleCommitment.d.ts +8 -0
  31. package/umd/src/book-2.0/agent-source/parseAgentSource/createCapabilitiesFromCommitment.d.ts +9 -0
  32. package/umd/src/book-2.0/agent-source/parseAgentSource/ensureMetaFullname.d.ts +7 -0
  33. package/umd/src/book-2.0/agent-source/parseAgentSource/extractAgentProfileText.d.ts +8 -0
  34. package/umd/src/book-2.0/agent-source/parseAgentSource/extractInitialMessage.d.ts +7 -0
  35. package/umd/src/book-2.0/agent-source/parseAgentSource/extractParsedAgentProfile.d.ts +8 -0
  36. package/umd/src/types/LlmToolDefinition.d.ts +17 -7
  37. package/umd/src/version.d.ts +1 -1
package/umd/index.umd.js CHANGED
@@ -30,7 +30,7 @@
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.112.0-56';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.112.0-57';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4889,403 +4889,6 @@
4889
4889
  return (jsxRuntime.jsxs("div", { className: classNames(styles$f.AvatarChip, className, isSelected ? styles$f.Selected : undefined), onClick: () => onSelect === null || onSelect === void 0 ? void 0 : onSelect(avatarBasicInformation), style: { cursor: onSelect ? 'pointer' : undefined }, children: [avatarUrl && jsxRuntime.jsx("img", { src: avatarUrl, alt: agentName || '', className: styles$f.Avatar }), meta.fullname || agentName, isTemplate && jsxRuntime.jsx("span", { className: styles$f.TemplateLabel, children: "Template" })] }));
4890
4890
  }
4891
4891
 
4892
- /**
4893
- * Hostnames accepted for GitHub repository references.
4894
- *
4895
- * @private internal USE PROJECT constant
4896
- */
4897
- const GITHUB_HOSTNAMES = new Set(['github.com', 'www.github.com']);
4898
- /**
4899
- * Pattern for validating owner/repository slugs.
4900
- *
4901
- * @private internal USE PROJECT constant
4902
- */
4903
- const GITHUB_REPOSITORY_SLUG_PATTERN = /^[A-Za-z0-9_.-]+\/[A-Za-z0-9_.-]+$/;
4904
- /**
4905
- * Parses a repository reference into canonical owner/repository details.
4906
- *
4907
- * Supported input forms:
4908
- * - `https://github.com/owner/repository`
4909
- * - `github.com/owner/repository`
4910
- * - `owner/repository`
4911
- * - optional `.git` suffix and trailing slash are supported
4912
- *
4913
- * @private internal utility of USE PROJECT commitment
4914
- */
4915
- function parseGitHubRepositoryReference(rawReference) {
4916
- const trimmedReference = rawReference.trim();
4917
- if (!trimmedReference) {
4918
- return null;
4919
- }
4920
- const normalizedReference = trimmedReference.replace(/\/+$/g, '');
4921
- if (normalizedReference.startsWith('http://') || normalizedReference.startsWith('https://')) {
4922
- return parseGitHubRepositoryReferenceFromUrl(normalizedReference);
4923
- }
4924
- if (normalizedReference.startsWith('github.com/')) {
4925
- return parseGitHubRepositoryReferenceFromUrl(`https://${normalizedReference}`);
4926
- }
4927
- if (!GITHUB_REPOSITORY_SLUG_PATTERN.test(normalizedReference)) {
4928
- return null;
4929
- }
4930
- const [owner, repositoryRaw] = normalizedReference.split('/');
4931
- if (!owner || !repositoryRaw) {
4932
- return null;
4933
- }
4934
- const repository = repositoryRaw.replace(/\.git$/i, '');
4935
- if (!isValidGitHubRepositoryPart(owner) || !isValidGitHubRepositoryPart(repository)) {
4936
- return null;
4937
- }
4938
- return createGitHubRepositoryReference(owner, repository);
4939
- }
4940
- /**
4941
- * Parses `USE PROJECT` commitment content into repository reference + optional instructions.
4942
- *
4943
- * @private internal utility of USE PROJECT commitment
4944
- */
4945
- function parseUseProjectCommitmentContent(content) {
4946
- const trimmedContent = spacetrim.spaceTrim(content);
4947
- if (!trimmedContent) {
4948
- return {
4949
- repository: null,
4950
- repositoryReferenceRaw: null,
4951
- instructions: '',
4952
- };
4953
- }
4954
- const lines = trimmedContent
4955
- .split(/\r?\n/)
4956
- .map((line) => line.trim())
4957
- .filter(Boolean);
4958
- if (lines.length === 0) {
4959
- return {
4960
- repository: null,
4961
- repositoryReferenceRaw: null,
4962
- instructions: '',
4963
- };
4964
- }
4965
- const firstLine = lines[0] || '';
4966
- const firstLineTokens = firstLine.split(/\s+/).filter(Boolean);
4967
- let repositoryReferenceRaw = null;
4968
- let repositoryReference = null;
4969
- let repositoryTokenIndex = -1;
4970
- for (let index = 0; index < firstLineTokens.length; index++) {
4971
- const token = firstLineTokens[index] || '';
4972
- const cleanedToken = token.replace(/[),.;:!?]+$/g, '');
4973
- const parsedReference = parseGitHubRepositoryReference(cleanedToken);
4974
- if (!parsedReference) {
4975
- continue;
4976
- }
4977
- repositoryReferenceRaw = cleanedToken;
4978
- repositoryReference = parsedReference;
4979
- repositoryTokenIndex = index;
4980
- break;
4981
- }
4982
- const instructionParts = [];
4983
- if (repositoryTokenIndex >= 0) {
4984
- const firstLineInstruction = firstLineTokens
4985
- .filter((_token, index) => index !== repositoryTokenIndex)
4986
- .join(' ')
4987
- .trim();
4988
- if (firstLineInstruction) {
4989
- instructionParts.push(firstLineInstruction);
4990
- }
4991
- }
4992
- else if (firstLine) {
4993
- instructionParts.push(firstLine);
4994
- }
4995
- if (lines.length > 1) {
4996
- const extraLines = lines.slice(1).join('\n').trim();
4997
- if (extraLines) {
4998
- instructionParts.push(extraLines);
4999
- }
5000
- }
5001
- return {
5002
- repository: repositoryReference,
5003
- repositoryReferenceRaw,
5004
- instructions: instructionParts.join('\n').trim(),
5005
- };
5006
- }
5007
- /**
5008
- * Parses URL-like repository references.
5009
- *
5010
- * @private utility of USE PROJECT commitment
5011
- */
5012
- function parseGitHubRepositoryReferenceFromUrl(rawUrl) {
5013
- let parsedUrl;
5014
- try {
5015
- parsedUrl = new URL(rawUrl);
5016
- }
5017
- catch (_a) {
5018
- return null;
5019
- }
5020
- if (!GITHUB_HOSTNAMES.has(parsedUrl.hostname.toLowerCase())) {
5021
- return null;
5022
- }
5023
- const segments = parsedUrl.pathname.split('/').filter(Boolean);
5024
- if (segments.length < 2) {
5025
- return null;
5026
- }
5027
- const owner = segments[0];
5028
- const repositoryRaw = segments[1];
5029
- if (!owner || !repositoryRaw) {
5030
- return null;
5031
- }
5032
- const repository = repositoryRaw.replace(/\.git$/i, '');
5033
- if (!isValidGitHubRepositoryPart(owner) || !isValidGitHubRepositoryPart(repository)) {
5034
- return null;
5035
- }
5036
- let defaultBranch;
5037
- if (segments[2] === 'tree' && segments[3]) {
5038
- defaultBranch = decodeURIComponent(segments[3]);
5039
- }
5040
- return createGitHubRepositoryReference(owner, repository, defaultBranch);
5041
- }
5042
- /**
5043
- * Validates one owner/repository slug part.
5044
- *
5045
- * @private utility of USE PROJECT commitment
5046
- */
5047
- function isValidGitHubRepositoryPart(value) {
5048
- return /^[A-Za-z0-9_.-]+$/.test(value);
5049
- }
5050
- /**
5051
- * Builds canonical repository reference object.
5052
- *
5053
- * @private utility of USE PROJECT commitment
5054
- */
5055
- function createGitHubRepositoryReference(owner, repository, defaultBranch) {
5056
- const slug = `${owner}/${repository}`;
5057
- return {
5058
- owner,
5059
- repository,
5060
- slug,
5061
- url: `https://github.com/${slug}`,
5062
- defaultBranch,
5063
- };
5064
- }
5065
- // Note: [💞] Ignore a discrepancy between file name and entity name
5066
-
5067
- /**
5068
- * Normalizes a given text to camelCase format.
5069
- *
5070
- * Note: [🔂] This function is idempotent.
5071
- *
5072
- * @param text The text to be normalized.
5073
- * @param _isFirstLetterCapital Whether the first letter should be capitalized.
5074
- * @returns The camelCase formatted string.
5075
- * @example 'helloWorld'
5076
- * @example 'iLovePromptbook'
5077
- *
5078
- * @public exported from `@promptbook/utils`
5079
- */
5080
- function normalizeTo_camelCase(text, _isFirstLetterCapital = false) {
5081
- let charType;
5082
- let lastCharType = null;
5083
- let normalizedName = '';
5084
- for (const char of text) {
5085
- let normalizedChar;
5086
- if (/^[a-z]$/.test(char)) {
5087
- charType = 'LOWERCASE';
5088
- normalizedChar = char;
5089
- }
5090
- else if (/^[A-Z]$/.test(char)) {
5091
- charType = 'UPPERCASE';
5092
- normalizedChar = char.toLowerCase();
5093
- }
5094
- else if (/^[0-9]$/.test(char)) {
5095
- charType = 'NUMBER';
5096
- normalizedChar = char;
5097
- }
5098
- else {
5099
- charType = 'OTHER';
5100
- normalizedChar = '';
5101
- }
5102
- if (!lastCharType) {
5103
- if (_isFirstLetterCapital) {
5104
- normalizedChar = normalizedChar.toUpperCase(); //TODO: DRY
5105
- }
5106
- }
5107
- else if (charType !== lastCharType &&
5108
- !(charType === 'LOWERCASE' && lastCharType === 'UPPERCASE') &&
5109
- !(lastCharType === 'NUMBER') &&
5110
- !(charType === 'NUMBER')) {
5111
- normalizedChar = normalizedChar.toUpperCase(); //TODO: [🌺] DRY
5112
- }
5113
- normalizedName += normalizedChar;
5114
- lastCharType = charType;
5115
- }
5116
- return normalizedName;
5117
- }
5118
- // TODO: [🌺] Use some intermediate util splitWords
5119
-
5120
- /**
5121
- * Tests if given string is valid URL.
5122
- *
5123
- * Note: [🔂] This function is idempotent.
5124
- * Note: Dataurl are considered perfectly valid.
5125
- * Note: There are few similar functions:
5126
- * - `isValidUrl` *(this one)* which tests any URL
5127
- * - `isValidAgentUrl` which tests just agent URL
5128
- * - `isValidPipelineUrl` which tests just pipeline URL
5129
- *
5130
- * @public exported from `@promptbook/utils`
5131
- */
5132
- function isValidUrl(url) {
5133
- if (typeof url !== 'string') {
5134
- return false;
5135
- }
5136
- try {
5137
- if (url.startsWith('blob:')) {
5138
- url = url.replace(/^blob:/, '');
5139
- }
5140
- const urlObject = new URL(url /* because fail is handled */);
5141
- if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
5142
- return false;
5143
- }
5144
- return true;
5145
- }
5146
- catch (error) {
5147
- return false;
5148
- }
5149
- }
5150
-
5151
- /**
5152
- * Matches URL-like candidates inside arbitrary text.
5153
- *
5154
- * @private
5155
- */
5156
- const URL_CANDIDATE_PATTERN = /https?:\/\/[^\s<>"'`]+/g;
5157
- /**
5158
- * Trims punctuation that commonly trails URLs in prose.
5159
- *
5160
- * @private
5161
- */
5162
- const TRAILING_PUNCTUATION_PATTERN = /[.,!?;:'"`]+$/;
5163
- /**
5164
- * Extracts all valid URLs from arbitrary text while removing common trailing punctuation.
5165
- *
5166
- * @param text - Input text that may contain one or more URLs.
5167
- * @returns Unique URLs in their first-seen order.
5168
- *
5169
- * @private utility of KNOWLEDGE parsing
5170
- */
5171
- function extractUrlsFromText(text) {
5172
- if (!text) {
5173
- return [];
5174
- }
5175
- const candidates = text.match(URL_CANDIDATE_PATTERN);
5176
- if (!candidates) {
5177
- return [];
5178
- }
5179
- const urls = [];
5180
- const seen = new Set();
5181
- for (const candidate of candidates) {
5182
- const normalizedCandidate = normalizeUrlCandidate(candidate);
5183
- if (!normalizedCandidate) {
5184
- continue;
5185
- }
5186
- if (!isValidUrl(normalizedCandidate)) {
5187
- continue;
5188
- }
5189
- if (seen.has(normalizedCandidate)) {
5190
- continue;
5191
- }
5192
- seen.add(normalizedCandidate);
5193
- urls.push(normalizedCandidate);
5194
- }
5195
- return urls;
5196
- }
5197
- /**
5198
- * Normalizes one extracted URL candidate by stripping trailing punctuation and unmatched closing wrappers.
5199
- *
5200
- * @private
5201
- */
5202
- function normalizeUrlCandidate(candidate) {
5203
- let normalized = candidate.trim();
5204
- if (!normalized) {
5205
- return '';
5206
- }
5207
- let shouldContinue = true;
5208
- while (shouldContinue) {
5209
- const before = normalized;
5210
- normalized = normalized.replace(TRAILING_PUNCTUATION_PATTERN, '');
5211
- normalized = stripTrailingUnmatchedClosing(normalized, '(', ')');
5212
- normalized = stripTrailingUnmatchedClosing(normalized, '[', ']');
5213
- normalized = stripTrailingUnmatchedClosing(normalized, '{', '}');
5214
- normalized = normalized.replace(TRAILING_PUNCTUATION_PATTERN, '');
5215
- shouldContinue = normalized !== before;
5216
- }
5217
- return normalized;
5218
- }
5219
- /**
5220
- * Removes trailing closing wrappers when they are unmatched in the candidate.
5221
- *
5222
- * @private
5223
- */
5224
- function stripTrailingUnmatchedClosing(candidate, openingChar, closingChar) {
5225
- let normalized = candidate;
5226
- while (normalized.endsWith(closingChar)) {
5227
- const openingCount = countOccurrences(normalized, openingChar);
5228
- const closingCount = countOccurrences(normalized, closingChar);
5229
- if (closingCount <= openingCount) {
5230
- break;
5231
- }
5232
- normalized = normalized.slice(0, -1);
5233
- }
5234
- return normalized;
5235
- }
5236
- /**
5237
- * Counts character occurrences in a string.
5238
- *
5239
- * @private
5240
- */
5241
- function countOccurrences(value, searchedChar) {
5242
- let count = 0;
5243
- for (const currentChar of value) {
5244
- if (currentChar === searchedChar) {
5245
- count++;
5246
- }
5247
- }
5248
- return count;
5249
- }
5250
-
5251
- /**
5252
- * Normalizes a domain-like string into a comparable hostname form.
5253
- *
5254
- * The returned value is lowercased and stripped to hostname only
5255
- * (protocol, path, query, hash, and port are removed).
5256
- *
5257
- * @param rawDomain - Raw domain value (for example `my-agent.com` or `https://my-agent.com/path`).
5258
- * @returns Normalized hostname or `null` when the value cannot be normalized.
5259
- *
5260
- * @private utility for host/domain matching
5261
- */
5262
- function normalizeDomainForMatching(rawDomain) {
5263
- const trimmedDomain = rawDomain.trim();
5264
- if (!trimmedDomain) {
5265
- return null;
5266
- }
5267
- const candidateUrl = hasHttpProtocol(trimmedDomain) ? trimmedDomain : `https://${trimmedDomain}`;
5268
- try {
5269
- const parsedUrl = new URL(candidateUrl);
5270
- const normalizedHostname = parsedUrl.hostname.trim().toLowerCase();
5271
- return normalizedHostname || null;
5272
- }
5273
- catch (_a) {
5274
- return null;
5275
- }
5276
- }
5277
- /**
5278
- * Checks whether the value already includes an HTTP(S) protocol prefix.
5279
- *
5280
- * @param value - Raw value to inspect.
5281
- * @returns True when the value starts with `http://` or `https://`.
5282
- *
5283
- * @private utility for host/domain matching
5284
- */
5285
- function hasHttpProtocol(value) {
5286
- return value.startsWith('http://') || value.startsWith('https://');
5287
- }
5288
-
5289
4892
  /**
5290
4893
  * Make error report URL for the given error
5291
4894
  *
@@ -5872,6 +5475,59 @@
5872
5475
  };
5873
5476
  // Note: [💞] Ignore a discrepancy between file name and entity name
5874
5477
 
5478
+ /**
5479
+ * Normalizes a given text to camelCase format.
5480
+ *
5481
+ * Note: [🔂] This function is idempotent.
5482
+ *
5483
+ * @param text The text to be normalized.
5484
+ * @param _isFirstLetterCapital Whether the first letter should be capitalized.
5485
+ * @returns The camelCase formatted string.
5486
+ * @example 'helloWorld'
5487
+ * @example 'iLovePromptbook'
5488
+ *
5489
+ * @public exported from `@promptbook/utils`
5490
+ */
5491
+ function normalizeTo_camelCase(text, _isFirstLetterCapital = false) {
5492
+ let charType;
5493
+ let lastCharType = null;
5494
+ let normalizedName = '';
5495
+ for (const char of text) {
5496
+ let normalizedChar;
5497
+ if (/^[a-z]$/.test(char)) {
5498
+ charType = 'LOWERCASE';
5499
+ normalizedChar = char;
5500
+ }
5501
+ else if (/^[A-Z]$/.test(char)) {
5502
+ charType = 'UPPERCASE';
5503
+ normalizedChar = char.toLowerCase();
5504
+ }
5505
+ else if (/^[0-9]$/.test(char)) {
5506
+ charType = 'NUMBER';
5507
+ normalizedChar = char;
5508
+ }
5509
+ else {
5510
+ charType = 'OTHER';
5511
+ normalizedChar = '';
5512
+ }
5513
+ if (!lastCharType) {
5514
+ if (_isFirstLetterCapital) {
5515
+ normalizedChar = normalizedChar.toUpperCase(); //TODO: DRY
5516
+ }
5517
+ }
5518
+ else if (charType !== lastCharType &&
5519
+ !(charType === 'LOWERCASE' && lastCharType === 'UPPERCASE') &&
5520
+ !(lastCharType === 'NUMBER') &&
5521
+ !(charType === 'NUMBER')) {
5522
+ normalizedChar = normalizedChar.toUpperCase(); //TODO: [🌺] DRY
5523
+ }
5524
+ normalizedName += normalizedChar;
5525
+ lastCharType = charType;
5526
+ }
5527
+ return normalizedName;
5528
+ }
5529
+ // TODO: [🌺] Use some intermediate util splitWords
5530
+
5875
5531
  /**
5876
5532
  * Tests if given string is valid file path.
5877
5533
  *
@@ -5927,6 +5583,37 @@
5927
5583
  }
5928
5584
  // TODO: [🍏] Implement for MacOs
5929
5585
 
5586
+ /**
5587
+ * Tests if given string is valid URL.
5588
+ *
5589
+ * Note: [🔂] This function is idempotent.
5590
+ * Note: Dataurl are considered perfectly valid.
5591
+ * Note: There are few similar functions:
5592
+ * - `isValidUrl` *(this one)* which tests any URL
5593
+ * - `isValidAgentUrl` which tests just agent URL
5594
+ * - `isValidPipelineUrl` which tests just pipeline URL
5595
+ *
5596
+ * @public exported from `@promptbook/utils`
5597
+ */
5598
+ function isValidUrl(url) {
5599
+ if (typeof url !== 'string') {
5600
+ return false;
5601
+ }
5602
+ try {
5603
+ if (url.startsWith('blob:')) {
5604
+ url = url.replace(/^blob:/, '');
5605
+ }
5606
+ const urlObject = new URL(url /* because fail is handled */);
5607
+ if (!['http:', 'https:', 'data:'].includes(urlObject.protocol)) {
5608
+ return false;
5609
+ }
5610
+ return true;
5611
+ }
5612
+ catch (error) {
5613
+ return false;
5614
+ }
5615
+ }
5616
+
5930
5617
  /**
5931
5618
  * Collection of default diacritics removal map.
5932
5619
  */
@@ -10926,6 +10613,106 @@
10926
10613
  }
10927
10614
  // Note: [💞] Ignore a discrepancy between file name and entity name
10928
10615
 
10616
+ /**
10617
+ * Matches URL-like candidates inside arbitrary text.
10618
+ *
10619
+ * @private
10620
+ */
10621
+ const URL_CANDIDATE_PATTERN = /https?:\/\/[^\s<>"'`]+/g;
10622
+ /**
10623
+ * Trims punctuation that commonly trails URLs in prose.
10624
+ *
10625
+ * @private
10626
+ */
10627
+ const TRAILING_PUNCTUATION_PATTERN = /[.,!?;:'"`]+$/;
10628
+ /**
10629
+ * Extracts all valid URLs from arbitrary text while removing common trailing punctuation.
10630
+ *
10631
+ * @param text - Input text that may contain one or more URLs.
10632
+ * @returns Unique URLs in their first-seen order.
10633
+ *
10634
+ * @private utility of KNOWLEDGE parsing
10635
+ */
10636
+ function extractUrlsFromText(text) {
10637
+ if (!text) {
10638
+ return [];
10639
+ }
10640
+ const candidates = text.match(URL_CANDIDATE_PATTERN);
10641
+ if (!candidates) {
10642
+ return [];
10643
+ }
10644
+ const urls = [];
10645
+ const seen = new Set();
10646
+ for (const candidate of candidates) {
10647
+ const normalizedCandidate = normalizeUrlCandidate(candidate);
10648
+ if (!normalizedCandidate) {
10649
+ continue;
10650
+ }
10651
+ if (!isValidUrl(normalizedCandidate)) {
10652
+ continue;
10653
+ }
10654
+ if (seen.has(normalizedCandidate)) {
10655
+ continue;
10656
+ }
10657
+ seen.add(normalizedCandidate);
10658
+ urls.push(normalizedCandidate);
10659
+ }
10660
+ return urls;
10661
+ }
10662
+ /**
10663
+ * Normalizes one extracted URL candidate by stripping trailing punctuation and unmatched closing wrappers.
10664
+ *
10665
+ * @private
10666
+ */
10667
+ function normalizeUrlCandidate(candidate) {
10668
+ let normalized = candidate.trim();
10669
+ if (!normalized) {
10670
+ return '';
10671
+ }
10672
+ let shouldContinue = true;
10673
+ while (shouldContinue) {
10674
+ const before = normalized;
10675
+ normalized = normalized.replace(TRAILING_PUNCTUATION_PATTERN, '');
10676
+ normalized = stripTrailingUnmatchedClosing(normalized, '(', ')');
10677
+ normalized = stripTrailingUnmatchedClosing(normalized, '[', ']');
10678
+ normalized = stripTrailingUnmatchedClosing(normalized, '{', '}');
10679
+ normalized = normalized.replace(TRAILING_PUNCTUATION_PATTERN, '');
10680
+ shouldContinue = normalized !== before;
10681
+ }
10682
+ return normalized;
10683
+ }
10684
+ /**
10685
+ * Removes trailing closing wrappers when they are unmatched in the candidate.
10686
+ *
10687
+ * @private
10688
+ */
10689
+ function stripTrailingUnmatchedClosing(candidate, openingChar, closingChar) {
10690
+ let normalized = candidate;
10691
+ while (normalized.endsWith(closingChar)) {
10692
+ const openingCount = countOccurrences(normalized, openingChar);
10693
+ const closingCount = countOccurrences(normalized, closingChar);
10694
+ if (closingCount <= openingCount) {
10695
+ break;
10696
+ }
10697
+ normalized = normalized.slice(0, -1);
10698
+ }
10699
+ return normalized;
10700
+ }
10701
+ /**
10702
+ * Counts character occurrences in a string.
10703
+ *
10704
+ * @private
10705
+ */
10706
+ function countOccurrences(value, searchedChar) {
10707
+ let count = 0;
10708
+ for (const currentChar of value) {
10709
+ if (currentChar === searchedChar) {
10710
+ count++;
10711
+ }
10712
+ }
10713
+ return count;
10714
+ }
10715
+
10929
10716
  /**
10930
10717
  * The default base name for inline knowledge files when the content lacks identifying text.
10931
10718
  *
@@ -16264,6 +16051,44 @@
16264
16051
  * @private constant of createUseCalendarTools
16265
16052
  */
16266
16053
  const CALENDAR_URL_PARAMETER_DESCRIPTION = 'Google Calendar URL configured by USE CALENDAR (for example "https://calendar.google.com/...").';
16054
+ /**
16055
+ * Shared schema for string arrays used by USE CALENDAR tools.
16056
+ *
16057
+ * @private constant of createUseCalendarTools
16058
+ */
16059
+ const STRING_ARRAY_ITEMS_SCHEMA = {
16060
+ type: 'string',
16061
+ };
16062
+ /**
16063
+ * Shared schema for integer arrays used by USE CALENDAR tools.
16064
+ *
16065
+ * @private constant of createUseCalendarTools
16066
+ */
16067
+ const INTEGER_ARRAY_ITEMS_SCHEMA = {
16068
+ type: 'integer',
16069
+ };
16070
+ /**
16071
+ * Shared `sendUpdates` schema used by USE CALENDAR tools.
16072
+ *
16073
+ * @private constant of createUseCalendarTools
16074
+ */
16075
+ const SEND_UPDATES_PARAMETER_SCHEMA = {
16076
+ type: 'string',
16077
+ description: 'Guest update policy ("all", "externalOnly", "none").',
16078
+ enum: ['all', 'externalOnly', 'none'],
16079
+ };
16080
+ /**
16081
+ * Creates an array parameter schema with explicit item definition so OpenAI accepts it.
16082
+ *
16083
+ * @private function of createUseCalendarTools
16084
+ */
16085
+ function createArrayParameterSchema(description, items) {
16086
+ return {
16087
+ type: 'array',
16088
+ description,
16089
+ items,
16090
+ };
16091
+ }
16267
16092
  /**
16268
16093
  * Adds USE CALENDAR tool definitions while keeping already registered tools untouched.
16269
16094
  *
@@ -16370,18 +16195,9 @@
16370
16195
  type: 'string',
16371
16196
  description: 'Optional timezone for datetime values.',
16372
16197
  },
16373
- attendees: {
16374
- type: 'array',
16375
- description: 'Optional guest email list.',
16376
- },
16377
- reminderMinutes: {
16378
- type: 'array',
16379
- description: 'Optional popup reminder minute offsets.',
16380
- },
16381
- sendUpdates: {
16382
- type: 'string',
16383
- description: 'Guest update policy ("all", "externalOnly", "none").',
16384
- },
16198
+ attendees: createArrayParameterSchema('Optional guest email list.', STRING_ARRAY_ITEMS_SCHEMA),
16199
+ reminderMinutes: createArrayParameterSchema('Optional popup reminder minute offsets.', INTEGER_ARRAY_ITEMS_SCHEMA),
16200
+ sendUpdates: SEND_UPDATES_PARAMETER_SCHEMA,
16385
16201
  },
16386
16202
  required: ['summary', 'start', 'end'],
16387
16203
  },
@@ -16424,18 +16240,9 @@
16424
16240
  type: 'string',
16425
16241
  description: 'Optional timezone for datetime values.',
16426
16242
  },
16427
- attendees: {
16428
- type: 'array',
16429
- description: 'Optional replacement guest email list.',
16430
- },
16431
- reminderMinutes: {
16432
- type: 'array',
16433
- description: 'Optional replacement popup reminder minute offsets.',
16434
- },
16435
- sendUpdates: {
16436
- type: 'string',
16437
- description: 'Guest update policy ("all", "externalOnly", "none").',
16438
- },
16243
+ attendees: createArrayParameterSchema('Optional replacement guest email list.', STRING_ARRAY_ITEMS_SCHEMA),
16244
+ reminderMinutes: createArrayParameterSchema('Optional replacement popup reminder minute offsets.', INTEGER_ARRAY_ITEMS_SCHEMA),
16245
+ sendUpdates: SEND_UPDATES_PARAMETER_SCHEMA,
16439
16246
  },
16440
16247
  required: ['eventId'],
16441
16248
  },
@@ -16454,10 +16261,7 @@
16454
16261
  type: 'string',
16455
16262
  description: 'Google Calendar event id.',
16456
16263
  },
16457
- sendUpdates: {
16458
- type: 'string',
16459
- description: 'Guest update policy ("all", "externalOnly", "none").',
16460
- },
16264
+ sendUpdates: SEND_UPDATES_PARAMETER_SCHEMA,
16461
16265
  },
16462
16266
  required: ['eventId'],
16463
16267
  },
@@ -16476,14 +16280,8 @@
16476
16280
  type: 'string',
16477
16281
  description: 'Google Calendar event id.',
16478
16282
  },
16479
- guests: {
16480
- type: 'array',
16481
- description: 'Guest email list to add to the event.',
16482
- },
16483
- sendUpdates: {
16484
- type: 'string',
16485
- description: 'Guest update policy ("all", "externalOnly", "none").',
16486
- },
16283
+ guests: createArrayParameterSchema('Guest email list to add to the event.', STRING_ARRAY_ITEMS_SCHEMA),
16284
+ sendUpdates: SEND_UPDATES_PARAMETER_SCHEMA,
16487
16285
  },
16488
16286
  required: ['eventId', 'guests'],
16489
16287
  },
@@ -18276,6 +18074,181 @@
18276
18074
  return normalized;
18277
18075
  }
18278
18076
 
18077
+ /**
18078
+ * Hostnames accepted for GitHub repository references.
18079
+ *
18080
+ * @private internal USE PROJECT constant
18081
+ */
18082
+ const GITHUB_HOSTNAMES = new Set(['github.com', 'www.github.com']);
18083
+ /**
18084
+ * Pattern for validating owner/repository slugs.
18085
+ *
18086
+ * @private internal USE PROJECT constant
18087
+ */
18088
+ const GITHUB_REPOSITORY_SLUG_PATTERN = /^[A-Za-z0-9_.-]+\/[A-Za-z0-9_.-]+$/;
18089
+ /**
18090
+ * Parses a repository reference into canonical owner/repository details.
18091
+ *
18092
+ * Supported input forms:
18093
+ * - `https://github.com/owner/repository`
18094
+ * - `github.com/owner/repository`
18095
+ * - `owner/repository`
18096
+ * - optional `.git` suffix and trailing slash are supported
18097
+ *
18098
+ * @private internal utility of USE PROJECT commitment
18099
+ */
18100
+ function parseGitHubRepositoryReference(rawReference) {
18101
+ const trimmedReference = rawReference.trim();
18102
+ if (!trimmedReference) {
18103
+ return null;
18104
+ }
18105
+ const normalizedReference = trimmedReference.replace(/\/+$/g, '');
18106
+ if (normalizedReference.startsWith('http://') || normalizedReference.startsWith('https://')) {
18107
+ return parseGitHubRepositoryReferenceFromUrl(normalizedReference);
18108
+ }
18109
+ if (normalizedReference.startsWith('github.com/')) {
18110
+ return parseGitHubRepositoryReferenceFromUrl(`https://${normalizedReference}`);
18111
+ }
18112
+ if (!GITHUB_REPOSITORY_SLUG_PATTERN.test(normalizedReference)) {
18113
+ return null;
18114
+ }
18115
+ const [owner, repositoryRaw] = normalizedReference.split('/');
18116
+ if (!owner || !repositoryRaw) {
18117
+ return null;
18118
+ }
18119
+ const repository = repositoryRaw.replace(/\.git$/i, '');
18120
+ if (!isValidGitHubRepositoryPart(owner) || !isValidGitHubRepositoryPart(repository)) {
18121
+ return null;
18122
+ }
18123
+ return createGitHubRepositoryReference(owner, repository);
18124
+ }
18125
+ /**
18126
+ * Parses `USE PROJECT` commitment content into repository reference + optional instructions.
18127
+ *
18128
+ * @private internal utility of USE PROJECT commitment
18129
+ */
18130
+ function parseUseProjectCommitmentContent(content) {
18131
+ const trimmedContent = spacetrim.spaceTrim(content);
18132
+ if (!trimmedContent) {
18133
+ return {
18134
+ repository: null,
18135
+ repositoryReferenceRaw: null,
18136
+ instructions: '',
18137
+ };
18138
+ }
18139
+ const lines = trimmedContent
18140
+ .split(/\r?\n/)
18141
+ .map((line) => line.trim())
18142
+ .filter(Boolean);
18143
+ if (lines.length === 0) {
18144
+ return {
18145
+ repository: null,
18146
+ repositoryReferenceRaw: null,
18147
+ instructions: '',
18148
+ };
18149
+ }
18150
+ const firstLine = lines[0] || '';
18151
+ const firstLineTokens = firstLine.split(/\s+/).filter(Boolean);
18152
+ let repositoryReferenceRaw = null;
18153
+ let repositoryReference = null;
18154
+ let repositoryTokenIndex = -1;
18155
+ for (let index = 0; index < firstLineTokens.length; index++) {
18156
+ const token = firstLineTokens[index] || '';
18157
+ const cleanedToken = token.replace(/[),.;:!?]+$/g, '');
18158
+ const parsedReference = parseGitHubRepositoryReference(cleanedToken);
18159
+ if (!parsedReference) {
18160
+ continue;
18161
+ }
18162
+ repositoryReferenceRaw = cleanedToken;
18163
+ repositoryReference = parsedReference;
18164
+ repositoryTokenIndex = index;
18165
+ break;
18166
+ }
18167
+ const instructionParts = [];
18168
+ if (repositoryTokenIndex >= 0) {
18169
+ const firstLineInstruction = firstLineTokens
18170
+ .filter((_token, index) => index !== repositoryTokenIndex)
18171
+ .join(' ')
18172
+ .trim();
18173
+ if (firstLineInstruction) {
18174
+ instructionParts.push(firstLineInstruction);
18175
+ }
18176
+ }
18177
+ else if (firstLine) {
18178
+ instructionParts.push(firstLine);
18179
+ }
18180
+ if (lines.length > 1) {
18181
+ const extraLines = lines.slice(1).join('\n').trim();
18182
+ if (extraLines) {
18183
+ instructionParts.push(extraLines);
18184
+ }
18185
+ }
18186
+ return {
18187
+ repository: repositoryReference,
18188
+ repositoryReferenceRaw,
18189
+ instructions: instructionParts.join('\n').trim(),
18190
+ };
18191
+ }
18192
+ /**
18193
+ * Parses URL-like repository references.
18194
+ *
18195
+ * @private utility of USE PROJECT commitment
18196
+ */
18197
+ function parseGitHubRepositoryReferenceFromUrl(rawUrl) {
18198
+ let parsedUrl;
18199
+ try {
18200
+ parsedUrl = new URL(rawUrl);
18201
+ }
18202
+ catch (_a) {
18203
+ return null;
18204
+ }
18205
+ if (!GITHUB_HOSTNAMES.has(parsedUrl.hostname.toLowerCase())) {
18206
+ return null;
18207
+ }
18208
+ const segments = parsedUrl.pathname.split('/').filter(Boolean);
18209
+ if (segments.length < 2) {
18210
+ return null;
18211
+ }
18212
+ const owner = segments[0];
18213
+ const repositoryRaw = segments[1];
18214
+ if (!owner || !repositoryRaw) {
18215
+ return null;
18216
+ }
18217
+ const repository = repositoryRaw.replace(/\.git$/i, '');
18218
+ if (!isValidGitHubRepositoryPart(owner) || !isValidGitHubRepositoryPart(repository)) {
18219
+ return null;
18220
+ }
18221
+ let defaultBranch;
18222
+ if (segments[2] === 'tree' && segments[3]) {
18223
+ defaultBranch = decodeURIComponent(segments[3]);
18224
+ }
18225
+ return createGitHubRepositoryReference(owner, repository, defaultBranch);
18226
+ }
18227
+ /**
18228
+ * Validates one owner/repository slug part.
18229
+ *
18230
+ * @private utility of USE PROJECT commitment
18231
+ */
18232
+ function isValidGitHubRepositoryPart(value) {
18233
+ return /^[A-Za-z0-9_.-]+$/.test(value);
18234
+ }
18235
+ /**
18236
+ * Builds canonical repository reference object.
18237
+ *
18238
+ * @private utility of USE PROJECT commitment
18239
+ */
18240
+ function createGitHubRepositoryReference(owner, repository, defaultBranch) {
18241
+ const slug = `${owner}/${repository}`;
18242
+ return {
18243
+ owner,
18244
+ repository,
18245
+ slug,
18246
+ url: `https://github.com/${slug}`,
18247
+ defaultBranch,
18248
+ };
18249
+ }
18250
+ // Note: [💞] Ignore a discrepancy between file name and entity name
18251
+
18279
18252
  /**
18280
18253
  * Wallet metadata used by USE PROJECT when resolving GitHub credentials.
18281
18254
  *
@@ -21186,126 +21159,16 @@
21186
21159
  }
21187
21160
 
21188
21161
  /**
21189
- * Parses basic information from agent source
21190
- *
21191
- * There are 2 similar functions:
21192
- * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
21193
- * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
21194
- *
21195
- * @public exported from `@promptbook/core`
21196
- */
21197
- function parseAgentSource(agentSource) {
21198
- const parseResult = parseAgentSourceWithCommitments(agentSource);
21199
- const resolvedAgentName = parseResult.agentName || createDefaultAgentName(agentSource);
21200
- const personaDescription = extractAgentProfileText(parseResult.commitments);
21201
- const initialMessage = extractInitialMessage(parseResult.commitments);
21202
- const parsedProfile = extractParsedAgentProfile(parseResult.commitments);
21203
- ensureMetaFullname(parsedProfile.meta, resolvedAgentName);
21204
- return {
21205
- agentName: normalizeAgentName(resolvedAgentName),
21206
- agentHash: computeAgentHash(agentSource),
21207
- permanentId: parsedProfile.meta.id,
21208
- personaDescription,
21209
- initialMessage,
21210
- meta: parsedProfile.meta,
21211
- links: parsedProfile.links,
21212
- parameters: parseParameters(agentSource),
21213
- capabilities: parsedProfile.capabilities,
21214
- samples: parsedProfile.samples,
21215
- knowledgeSources: parsedProfile.knowledgeSources,
21216
- };
21217
- }
21218
- /**
21219
- * Static capability descriptors for commitments that map one-to-one to a visible capability.
21220
- *
21221
- * @private internal utility of `parseAgentSource`
21222
- */
21223
- const SIMPLE_CAPABILITY_BY_COMMITMENT_TYPE = {
21224
- 'USE BROWSER': {
21225
- type: 'browser',
21226
- label: 'Browser',
21227
- iconName: 'Globe',
21228
- },
21229
- 'USE SEARCH ENGINE': {
21230
- type: 'search-engine',
21231
- label: 'Internet',
21232
- iconName: 'Search',
21233
- },
21234
- 'USE SEARCH': {
21235
- type: 'search-engine',
21236
- label: 'Internet',
21237
- iconName: 'Search',
21238
- },
21239
- 'USE DEEPSEARCH': {
21240
- type: 'search-engine',
21241
- label: 'DeepSearch',
21242
- iconName: 'Search',
21243
- },
21244
- 'USE TIME': {
21245
- type: 'time',
21246
- label: 'Time',
21247
- iconName: 'Clock',
21248
- },
21249
- 'USE TIMEOUT': {
21250
- type: 'timeout',
21251
- label: 'Timers',
21252
- iconName: 'Clock',
21253
- },
21254
- 'USE USER LOCATION': {
21255
- type: 'user-location',
21256
- label: 'User location',
21257
- iconName: 'MapPin',
21258
- },
21259
- 'USE EMAIL': {
21260
- type: 'email',
21261
- label: 'Email',
21262
- iconName: 'Mail',
21263
- },
21264
- 'USE POPUP': {
21265
- type: 'popup',
21266
- label: 'Popup',
21267
- iconName: 'SquareArrowOutUpRight',
21268
- },
21269
- 'USE IMAGE GENERATOR': {
21270
- type: 'image-generator',
21271
- label: 'Image Generator',
21272
- iconName: 'Image',
21273
- },
21274
- 'USE PRIVACY': {
21275
- type: 'privacy',
21276
- label: 'Privacy',
21277
- iconName: 'Shield',
21278
- },
21279
- 'USE CALENDAR': {
21280
- type: 'calendar',
21281
- label: 'Calendar',
21282
- iconName: 'Calendar',
21283
- },
21284
- };
21285
- /**
21286
- * Dedicated handlers for META-style commitments that directly map onto parsed meta fields.
21287
- *
21288
- * @private internal utility of `parseAgentSource`
21289
- */
21290
- const META_COMMITMENT_APPLIERS = {
21291
- 'META AVATAR': applyMetaAvatarContent,
21292
- 'META LINK': applyMetaLinkContent,
21293
- 'META DOMAIN': applyMetaDomainContent,
21294
- 'META IMAGE': applyMetaImageContent,
21295
- 'META DESCRIPTION': applyMetaDescriptionContent,
21296
- 'META DISCLAIMER': applyMetaDisclaimerContent,
21297
- 'META INPUT PLACEHOLDER': applyMetaInputPlaceholderContent,
21298
- 'MESSAGE SUFFIX': applyMessageSuffixContent,
21299
- 'META COLOR': applyMetaColorContent,
21300
- 'META FONT': applyMetaFontContent,
21301
- 'META VOICE': applyMetaVoiceContent,
21302
- };
21303
- /**
21304
- * Detects local slash-based references used by FROM and IMPORT commitments.
21162
+ * Ensures the parsed profile always exposes a fullname value.
21305
21163
  *
21306
21164
  * @private internal utility of `parseAgentSource`
21307
21165
  */
21308
- const LOCAL_AGENT_REFERENCE_PREFIXES = ['./', '../', '/'];
21166
+ function ensureMetaFullname(meta, fallbackFullname) {
21167
+ if (!meta.fullname) {
21168
+ meta.fullname = fallbackFullname;
21169
+ }
21170
+ }
21171
+
21309
21172
  /**
21310
21173
  * Resolves the public agent profile text from the last GOAL/GOALS commitment,
21311
21174
  * falling back to the deprecated PERSONA/PERSONAE commitments when no goal exists.
@@ -21335,6 +21198,7 @@
21335
21198
  }
21336
21199
  return null;
21337
21200
  }
21201
+
21338
21202
  /**
21339
21203
  * Resolves the last INITIAL MESSAGE commitment, which is the public initial-message value.
21340
21204
  *
@@ -21349,48 +21213,189 @@
21349
21213
  }
21350
21214
  return initialMessage;
21351
21215
  }
21216
+
21352
21217
  /**
21353
- * Collects capability, sample, meta, link, and knowledge-source data from commitments.
21218
+ * Normalizes a domain-like string into a comparable hostname form.
21354
21219
  *
21355
- * @private internal utility of `parseAgentSource`
21220
+ * The returned value is lowercased and stripped to hostname only
21221
+ * (protocol, path, query, hash, and port are removed).
21222
+ *
21223
+ * @param rawDomain - Raw domain value (for example `my-agent.com` or `https://my-agent.com/path`).
21224
+ * @returns Normalized hostname or `null` when the value cannot be normalized.
21225
+ *
21226
+ * @private utility for host/domain matching
21356
21227
  */
21357
- function extractParsedAgentProfile(commitments) {
21358
- const state = {
21359
- meta: {},
21360
- links: [],
21361
- capabilities: [],
21362
- samples: [],
21363
- knowledgeSources: [],
21364
- pendingUserMessage: null,
21365
- knownKnowledgeSourceUrls: new Set(),
21366
- };
21367
- for (const commitment of commitments) {
21368
- processParsedCommitment(state, commitment);
21228
+ function normalizeDomainForMatching(rawDomain) {
21229
+ const trimmedDomain = rawDomain.trim();
21230
+ if (!trimmedDomain) {
21231
+ return null;
21232
+ }
21233
+ const candidateUrl = hasHttpProtocol(trimmedDomain) ? trimmedDomain : `https://${trimmedDomain}`;
21234
+ try {
21235
+ const parsedUrl = new URL(candidateUrl);
21236
+ const normalizedHostname = parsedUrl.hostname.trim().toLowerCase();
21237
+ return normalizedHostname || null;
21238
+ }
21239
+ catch (_a) {
21240
+ return null;
21369
21241
  }
21370
- return {
21371
- meta: state.meta,
21372
- links: state.links,
21373
- capabilities: state.capabilities,
21374
- samples: state.samples,
21375
- knowledgeSources: state.knowledgeSources,
21376
- };
21377
21242
  }
21378
21243
  /**
21379
- * Processes one parsed commitment through the sample, capability, and meta stages.
21244
+ * Checks whether the value already includes an HTTP(S) protocol prefix.
21245
+ *
21246
+ * @param value - Raw value to inspect.
21247
+ * @returns True when the value starts with `http://` or `https://`.
21248
+ *
21249
+ * @private utility for host/domain matching
21250
+ */
21251
+ function hasHttpProtocol(value) {
21252
+ return value.startsWith('http://') || value.startsWith('https://');
21253
+ }
21254
+
21255
+ /**
21256
+ * Dedicated handlers for META-style commitments that directly map onto parsed meta fields.
21257
+ */
21258
+ const META_COMMITMENT_APPLIERS = {
21259
+ 'META AVATAR': applyMetaAvatarContent,
21260
+ 'META LINK': applyMetaLinkContent,
21261
+ 'META DOMAIN': applyMetaDomainContent,
21262
+ 'META IMAGE': applyMetaImageContent,
21263
+ 'META DESCRIPTION': applyMetaDescriptionContent,
21264
+ 'META DISCLAIMER': applyMetaDisclaimerContent,
21265
+ 'META INPUT PLACEHOLDER': applyMetaInputPlaceholderContent,
21266
+ 'MESSAGE SUFFIX': applyMessageSuffixContent,
21267
+ 'META COLOR': applyMetaColorContent,
21268
+ 'META FONT': applyMetaFontContent,
21269
+ 'META VOICE': applyMetaVoiceContent,
21270
+ };
21271
+ /**
21272
+ * Applies META-style commitments that mutate parsed profile metadata.
21380
21273
  *
21381
21274
  * @private internal utility of `parseAgentSource`
21382
21275
  */
21383
- function processParsedCommitment(state, commitment) {
21384
- if (consumeConversationSampleCommitment(state, commitment)) {
21276
+ function applyMetaCommitment(state, commitment) {
21277
+ const applyMetaContent = META_COMMITMENT_APPLIERS[commitment.type];
21278
+ if (applyMetaContent) {
21279
+ applyMetaContent(state, commitment.content);
21385
21280
  return;
21386
21281
  }
21387
- const capabilities = createCapabilitiesFromCommitment(state, commitment);
21388
- if (capabilities.length > 0) {
21389
- state.capabilities.push(...capabilities);
21282
+ if (commitment.type === 'META') {
21283
+ applyGenericMetaCommitment(state, commitment.content);
21284
+ }
21285
+ }
21286
+ /**
21287
+ * Applies the generic META commitment form (`META TYPE value`).
21288
+ */
21289
+ function applyGenericMetaCommitment(state, content) {
21290
+ const metaTypeRaw = content.split(' ')[0] || 'NONE';
21291
+ const metaValue = spacetrim.spaceTrim(content.substring(metaTypeRaw.length));
21292
+ if (metaTypeRaw === 'LINK') {
21293
+ state.links.push(metaValue);
21294
+ }
21295
+ if (metaTypeRaw.toUpperCase() === 'AVATAR') {
21296
+ applyMetaAvatarContent(state, metaValue);
21390
21297
  return;
21391
21298
  }
21392
- applyMetaCommitment(state, commitment);
21299
+ const metaType = normalizeTo_camelCase(metaTypeRaw);
21300
+ state.meta[metaType] = metaValue;
21393
21301
  }
21302
+ /**
21303
+ * Applies META AVATAR content into the canonical `meta.avatar` field.
21304
+ */
21305
+ function applyMetaAvatarContent(state, content) {
21306
+ const avatarVisualId = resolveAvatarVisualId(content);
21307
+ if (avatarVisualId) {
21308
+ state.meta.avatar = avatarVisualId;
21309
+ return;
21310
+ }
21311
+ delete state.meta.avatar;
21312
+ }
21313
+ /**
21314
+ * Applies META LINK content into links and the canonical `meta.link` field.
21315
+ */
21316
+ function applyMetaLinkContent(state, content) {
21317
+ const linkValue = spacetrim.spaceTrim(content);
21318
+ state.links.push(linkValue);
21319
+ state.meta.link = linkValue;
21320
+ }
21321
+ /**
21322
+ * Applies META DOMAIN content into the normalized `meta.domain` field.
21323
+ */
21324
+ function applyMetaDomainContent(state, content) {
21325
+ state.meta.domain = normalizeMetaDomain(content);
21326
+ }
21327
+ /**
21328
+ * Applies META IMAGE content into the canonical `meta.image` field.
21329
+ */
21330
+ function applyMetaImageContent(state, content) {
21331
+ state.meta.image = spacetrim.spaceTrim(content);
21332
+ }
21333
+ /**
21334
+ * Applies META DESCRIPTION content into the canonical `meta.description` field.
21335
+ */
21336
+ function applyMetaDescriptionContent(state, content) {
21337
+ state.meta.description = spacetrim.spaceTrim(content);
21338
+ }
21339
+ /**
21340
+ * Applies META DISCLAIMER content into the canonical `meta.disclaimer` field.
21341
+ */
21342
+ function applyMetaDisclaimerContent(state, content) {
21343
+ state.meta.disclaimer = content;
21344
+ }
21345
+ /**
21346
+ * Applies META INPUT PLACEHOLDER content into the canonical `meta.inputPlaceholder` field.
21347
+ */
21348
+ function applyMetaInputPlaceholderContent(state, content) {
21349
+ state.meta.inputPlaceholder = spacetrim.spaceTrim(content);
21350
+ }
21351
+ /**
21352
+ * Applies MESSAGE SUFFIX content into the canonical `meta.messageSuffix` field.
21353
+ */
21354
+ function applyMessageSuffixContent(state, content) {
21355
+ state.meta.messageSuffix = content;
21356
+ }
21357
+ /**
21358
+ * Applies META COLOR content into the canonical `meta.color` field.
21359
+ */
21360
+ function applyMetaColorContent(state, content) {
21361
+ state.meta.color = normalizeSeparator(content);
21362
+ }
21363
+ /**
21364
+ * Applies META FONT content into the canonical `meta.font` field.
21365
+ */
21366
+ function applyMetaFontContent(state, content) {
21367
+ state.meta.font = normalizeSeparator(content);
21368
+ }
21369
+ /**
21370
+ * Applies META VOICE content into the canonical `meta.voice` field.
21371
+ */
21372
+ function applyMetaVoiceContent(state, content) {
21373
+ state.meta.voice = spacetrim.spaceTrim(content);
21374
+ }
21375
+ /**
21376
+ * Normalizes the separator in the content
21377
+ *
21378
+ * @param content - The content to normalize
21379
+ * @returns The content with normalized separators
21380
+ */
21381
+ function normalizeSeparator(content) {
21382
+ const trimmed = spacetrim.spaceTrim(content);
21383
+ if (trimmed.includes(',')) {
21384
+ return trimmed;
21385
+ }
21386
+ return trimmed.split(/\s+/).join(', ');
21387
+ }
21388
+ /**
21389
+ * Normalizes META DOMAIN content to a hostname-like value when possible.
21390
+ *
21391
+ * @param content - Raw META DOMAIN content.
21392
+ * @returns Normalized domain or a trimmed fallback.
21393
+ */
21394
+ function normalizeMetaDomain(content) {
21395
+ const trimmed = spacetrim.spaceTrim(content);
21396
+ return normalizeDomainForMatching(trimmed) || trimmed.toLowerCase();
21397
+ }
21398
+
21394
21399
  /**
21395
21400
  * Updates sample-conversation state for communication commitments.
21396
21401
  *
@@ -21417,6 +21422,76 @@
21417
21422
  return false;
21418
21423
  }
21419
21424
  }
21425
+
21426
+ /**
21427
+ * Static capability descriptors for commitments that map one-to-one to a visible capability.
21428
+ */
21429
+ const SIMPLE_CAPABILITY_BY_COMMITMENT_TYPE = {
21430
+ 'USE BROWSER': {
21431
+ type: 'browser',
21432
+ label: 'Browser',
21433
+ iconName: 'Globe',
21434
+ },
21435
+ 'USE SEARCH ENGINE': {
21436
+ type: 'search-engine',
21437
+ label: 'Internet',
21438
+ iconName: 'Search',
21439
+ },
21440
+ 'USE SEARCH': {
21441
+ type: 'search-engine',
21442
+ label: 'Internet',
21443
+ iconName: 'Search',
21444
+ },
21445
+ 'USE DEEPSEARCH': {
21446
+ type: 'search-engine',
21447
+ label: 'DeepSearch',
21448
+ iconName: 'Search',
21449
+ },
21450
+ 'USE TIME': {
21451
+ type: 'time',
21452
+ label: 'Time',
21453
+ iconName: 'Clock',
21454
+ },
21455
+ 'USE TIMEOUT': {
21456
+ type: 'timeout',
21457
+ label: 'Timers',
21458
+ iconName: 'Clock',
21459
+ },
21460
+ 'USE USER LOCATION': {
21461
+ type: 'user-location',
21462
+ label: 'User location',
21463
+ iconName: 'MapPin',
21464
+ },
21465
+ 'USE EMAIL': {
21466
+ type: 'email',
21467
+ label: 'Email',
21468
+ iconName: 'Mail',
21469
+ },
21470
+ 'USE POPUP': {
21471
+ type: 'popup',
21472
+ label: 'Popup',
21473
+ iconName: 'SquareArrowOutUpRight',
21474
+ },
21475
+ 'USE IMAGE GENERATOR': {
21476
+ type: 'image-generator',
21477
+ label: 'Image Generator',
21478
+ iconName: 'Image',
21479
+ },
21480
+ 'USE PRIVACY': {
21481
+ type: 'privacy',
21482
+ label: 'Privacy',
21483
+ iconName: 'Shield',
21484
+ },
21485
+ 'USE CALENDAR': {
21486
+ type: 'calendar',
21487
+ label: 'Calendar',
21488
+ iconName: 'Calendar',
21489
+ },
21490
+ };
21491
+ /**
21492
+ * Detects local slash-based references used by FROM and IMPORT commitments.
21493
+ */
21494
+ const LOCAL_AGENT_REFERENCE_PREFIXES = ['./', '../', '/'];
21420
21495
  /**
21421
21496
  * Creates the visible capabilities produced by one parsed commitment.
21422
21497
  *
@@ -21446,8 +21521,6 @@
21446
21521
  }
21447
21522
  /**
21448
21523
  * Clones one static capability descriptor for a simple capability commitment.
21449
- *
21450
- * @private internal utility of `parseAgentSource`
21451
21524
  */
21452
21525
  function createSimpleCapability(commitmentType) {
21453
21526
  const capability = SIMPLE_CAPABILITY_BY_COMMITMENT_TYPE[commitmentType];
@@ -21455,8 +21528,6 @@
21455
21528
  }
21456
21529
  /**
21457
21530
  * Creates the USE PROJECT capability badge.
21458
- *
21459
- * @private internal utility of `parseAgentSource`
21460
21531
  */
21461
21532
  function createProjectCapability(content) {
21462
21533
  var _a;
@@ -21470,8 +21541,6 @@
21470
21541
  }
21471
21542
  /**
21472
21543
  * Creates the FROM inheritance capability when the reference should stay visible in the profile.
21473
- *
21474
- * @private internal utility of `parseAgentSource`
21475
21544
  */
21476
21545
  function createInheritanceCapability(content) {
21477
21546
  const reference = extractFirstCommitmentLine(content);
@@ -21498,8 +21567,6 @@
21498
21567
  }
21499
21568
  /**
21500
21569
  * Creates the IMPORT capability badge.
21501
- *
21502
- * @private internal utility of `parseAgentSource`
21503
21570
  */
21504
21571
  function createImportCapability(content) {
21505
21572
  const reference = extractFirstCommitmentLine(content);
@@ -21527,8 +21594,6 @@
21527
21594
  }
21528
21595
  /**
21529
21596
  * Creates TEAM capability badges for all parsed teammates.
21530
- *
21531
- * @private internal utility of `parseAgentSource`
21532
21597
  */
21533
21598
  function createTeamCapabilities(content) {
21534
21599
  const teammates = parseTeamCommitmentContent(content);
@@ -21541,8 +21606,6 @@
21541
21606
  }
21542
21607
  /**
21543
21608
  * Creates the KNOWLEDGE capability badge and records URL-based knowledge sources.
21544
- *
21545
- * @private internal utility of `parseAgentSource`
21546
21609
  */
21547
21610
  function createKnowledgeCapability(state, content) {
21548
21611
  const trimmedContent = spacetrim.spaceTrim(content);
@@ -21557,8 +21620,6 @@
21557
21620
  }
21558
21621
  /**
21559
21622
  * Stores unique URL-based knowledge sources for later citation resolution.
21560
- *
21561
- * @private internal utility of `parseAgentSource`
21562
21623
  */
21563
21624
  function rememberKnowledgeSources(state, extractedUrls) {
21564
21625
  for (const extractedUrl of extractedUrls) {
@@ -21582,8 +21643,6 @@
21582
21643
  }
21583
21644
  /**
21584
21645
  * Derives the visible KNOWLEDGE badge label and icon from commitment content.
21585
- *
21586
- * @private internal utility of `parseAgentSource`
21587
21646
  */
21588
21647
  function createKnowledgeCapabilityPresentation(content, extractedUrls) {
21589
21648
  let label = content;
@@ -21616,8 +21675,6 @@
21616
21675
  }
21617
21676
  /**
21618
21677
  * Shortens text-only KNOWLEDGE commitments into the same preview label as before.
21619
- *
21620
- * @private internal utility of `parseAgentSource`
21621
21678
  */
21622
21679
  function createKnowledgeTextLabel(content) {
21623
21680
  const words = content.split(/\s+/);
@@ -21626,186 +21683,89 @@
21626
21683
  }
21627
21684
  return content;
21628
21685
  }
21629
- /**
21630
- * Applies META-style commitments that mutate parsed profile metadata.
21631
- *
21632
- * @private internal utility of `parseAgentSource`
21633
- */
21634
- function applyMetaCommitment(state, commitment) {
21635
- const applyMetaContent = META_COMMITMENT_APPLIERS[commitment.type];
21636
- if (applyMetaContent) {
21637
- applyMetaContent(state, commitment.content);
21638
- return;
21639
- }
21640
- if (commitment.type === 'META') {
21641
- applyGenericMetaCommitment(state, commitment.content);
21642
- }
21643
- }
21644
- /**
21645
- * Applies the generic META commitment form (`META TYPE value`).
21646
- *
21647
- * @private internal utility of `parseAgentSource`
21648
- */
21649
- function applyGenericMetaCommitment(state, content) {
21650
- const metaTypeRaw = content.split(' ')[0] || 'NONE';
21651
- const metaValue = spacetrim.spaceTrim(content.substring(metaTypeRaw.length));
21652
- if (metaTypeRaw === 'LINK') {
21653
- state.links.push(metaValue);
21654
- }
21655
- if (metaTypeRaw.toUpperCase() === 'AVATAR') {
21656
- applyMetaAvatarContent(state, metaValue);
21657
- return;
21658
- }
21659
- const metaType = normalizeTo_camelCase(metaTypeRaw);
21660
- state.meta[metaType] = metaValue;
21661
- }
21662
- /**
21663
- * Applies META AVATAR content into the canonical `meta.avatar` field.
21664
- *
21665
- * @private internal utility of `parseAgentSource`
21666
- */
21667
- function applyMetaAvatarContent(state, content) {
21668
- const avatarVisualId = resolveAvatarVisualId(content);
21669
- if (avatarVisualId) {
21670
- state.meta.avatar = avatarVisualId;
21671
- return;
21672
- }
21673
- delete state.meta.avatar;
21674
- }
21675
- /**
21676
- * Applies META LINK content into links and the canonical `meta.link` field.
21677
- *
21678
- * @private internal utility of `parseAgentSource`
21679
- */
21680
- function applyMetaLinkContent(state, content) {
21681
- const linkValue = spacetrim.spaceTrim(content);
21682
- state.links.push(linkValue);
21683
- state.meta.link = linkValue;
21684
- }
21685
- /**
21686
- * Applies META DOMAIN content into the normalized `meta.domain` field.
21687
- *
21688
- * @private internal utility of `parseAgentSource`
21689
- */
21690
- function applyMetaDomainContent(state, content) {
21691
- state.meta.domain = normalizeMetaDomain(content);
21692
- }
21693
- /**
21694
- * Applies META IMAGE content into the canonical `meta.image` field.
21695
- *
21696
- * @private internal utility of `parseAgentSource`
21697
- */
21698
- function applyMetaImageContent(state, content) {
21699
- state.meta.image = spacetrim.spaceTrim(content);
21700
- }
21701
- /**
21702
- * Applies META DESCRIPTION content into the canonical `meta.description` field.
21703
- *
21704
- * @private internal utility of `parseAgentSource`
21705
- */
21706
- function applyMetaDescriptionContent(state, content) {
21707
- state.meta.description = spacetrim.spaceTrim(content);
21708
- }
21709
- /**
21710
- * Applies META DISCLAIMER content into the canonical `meta.disclaimer` field.
21711
- *
21712
- * @private internal utility of `parseAgentSource`
21713
- */
21714
- function applyMetaDisclaimerContent(state, content) {
21715
- state.meta.disclaimer = content;
21716
- }
21717
- /**
21718
- * Applies META INPUT PLACEHOLDER content into the canonical `meta.inputPlaceholder` field.
21719
- *
21720
- * @private internal utility of `parseAgentSource`
21721
- */
21722
- function applyMetaInputPlaceholderContent(state, content) {
21723
- state.meta.inputPlaceholder = spacetrim.spaceTrim(content);
21724
- }
21725
- /**
21726
- * Applies MESSAGE SUFFIX content into the canonical `meta.messageSuffix` field.
21727
- *
21728
- * @private internal utility of `parseAgentSource`
21729
- */
21730
- function applyMessageSuffixContent(state, content) {
21731
- state.meta.messageSuffix = content;
21732
- }
21733
- /**
21734
- * Applies META COLOR content into the canonical `meta.color` field.
21735
- *
21736
- * @private internal utility of `parseAgentSource`
21737
- */
21738
- function applyMetaColorContent(state, content) {
21739
- state.meta.color = normalizeSeparator(content);
21740
- }
21741
- /**
21742
- * Applies META FONT content into the canonical `meta.font` field.
21743
- *
21744
- * @private internal utility of `parseAgentSource`
21745
- */
21746
- function applyMetaFontContent(state, content) {
21747
- state.meta.font = normalizeSeparator(content);
21748
- }
21749
- /**
21750
- * Applies META VOICE content into the canonical `meta.voice` field.
21751
- *
21752
- * @private internal utility of `parseAgentSource`
21753
- */
21754
- function applyMetaVoiceContent(state, content) {
21755
- state.meta.voice = spacetrim.spaceTrim(content);
21756
- }
21757
- /**
21758
- * Ensures the parsed profile always exposes a fullname value.
21759
- *
21760
- * @private internal utility of `parseAgentSource`
21761
- */
21762
- function ensureMetaFullname(meta, fallbackFullname) {
21763
- if (!meta.fullname) {
21764
- meta.fullname = fallbackFullname;
21765
- }
21766
- }
21767
21686
  /**
21768
21687
  * Extracts the first logical line from multiline commitment content.
21769
- *
21770
- * @private internal utility of `parseAgentSource`
21771
21688
  */
21772
21689
  function extractFirstCommitmentLine(content) {
21773
21690
  return spacetrim.spaceTrim(content).split(/\r?\n/)[0] || '';
21774
21691
  }
21775
21692
  /**
21776
21693
  * Detects local FROM/IMPORT references that should use local-link labels and icons.
21777
- *
21778
- * @private internal utility of `parseAgentSource`
21779
21694
  */
21780
21695
  function isLocalAgentReference(reference) {
21781
21696
  return LOCAL_AGENT_REFERENCE_PREFIXES.some((prefix) => reference.startsWith(prefix));
21782
21697
  }
21698
+
21783
21699
  /**
21784
- * Normalizes the separator in the content
21785
- *
21786
- * @param content - The content to normalize
21787
- * @returns The content with normalized separators
21700
+ * Collects capability, sample, meta, link, and knowledge-source data from commitments.
21788
21701
  *
21789
21702
  * @private internal utility of `parseAgentSource`
21790
21703
  */
21791
- function normalizeSeparator(content) {
21792
- const trimmed = spacetrim.spaceTrim(content);
21793
- if (trimmed.includes(',')) {
21794
- return trimmed;
21704
+ function extractParsedAgentProfile(commitments) {
21705
+ const state = {
21706
+ meta: {},
21707
+ links: [],
21708
+ capabilities: [],
21709
+ samples: [],
21710
+ knowledgeSources: [],
21711
+ pendingUserMessage: null,
21712
+ knownKnowledgeSourceUrls: new Set(),
21713
+ };
21714
+ for (const commitment of commitments) {
21715
+ processParsedCommitment(state, commitment);
21795
21716
  }
21796
- return trimmed.split(/\s+/).join(', ');
21717
+ return {
21718
+ meta: state.meta,
21719
+ links: state.links,
21720
+ capabilities: state.capabilities,
21721
+ samples: state.samples,
21722
+ knowledgeSources: state.knowledgeSources,
21723
+ };
21797
21724
  }
21798
21725
  /**
21799
- * Normalizes META DOMAIN content to a hostname-like value when possible.
21726
+ * Processes one parsed commitment through the sample, capability, and meta stages.
21727
+ */
21728
+ function processParsedCommitment(state, commitment) {
21729
+ if (consumeConversationSampleCommitment(state, commitment)) {
21730
+ return;
21731
+ }
21732
+ const capabilities = createCapabilitiesFromCommitment(state, commitment);
21733
+ if (capabilities.length > 0) {
21734
+ state.capabilities.push(...capabilities);
21735
+ return;
21736
+ }
21737
+ applyMetaCommitment(state, commitment);
21738
+ }
21739
+
21740
+ /**
21741
+ * Parses basic information from agent source
21800
21742
  *
21801
- * @param content - Raw META DOMAIN content.
21802
- * @returns Normalized domain or a trimmed fallback.
21743
+ * There are 2 similar functions:
21744
+ * - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
21745
+ * - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronously.
21803
21746
  *
21804
- * @private internal utility of `parseAgentSource`
21747
+ * @public exported from `@promptbook/core`
21805
21748
  */
21806
- function normalizeMetaDomain(content) {
21807
- const trimmed = spacetrim.spaceTrim(content);
21808
- return normalizeDomainForMatching(trimmed) || trimmed.toLowerCase();
21749
+ function parseAgentSource(agentSource) {
21750
+ const parseResult = parseAgentSourceWithCommitments(agentSource);
21751
+ const resolvedAgentName = parseResult.agentName || createDefaultAgentName(agentSource);
21752
+ const personaDescription = extractAgentProfileText(parseResult.commitments);
21753
+ const initialMessage = extractInitialMessage(parseResult.commitments);
21754
+ const parsedProfile = extractParsedAgentProfile(parseResult.commitments);
21755
+ ensureMetaFullname(parsedProfile.meta, resolvedAgentName);
21756
+ return {
21757
+ agentName: normalizeAgentName(resolvedAgentName),
21758
+ agentHash: computeAgentHash(agentSource),
21759
+ permanentId: parsedProfile.meta.id,
21760
+ personaDescription,
21761
+ initialMessage,
21762
+ meta: parsedProfile.meta,
21763
+ links: parsedProfile.links,
21764
+ parameters: parseParameters(agentSource),
21765
+ capabilities: parsedProfile.capabilities,
21766
+ samples: parsedProfile.samples,
21767
+ knowledgeSources: parsedProfile.knowledgeSources,
21768
+ };
21809
21769
  }
21810
21770
  // TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
21811
21771
 
@@ -35650,422 +35610,123 @@
35650
35610
  updateTldr({
35651
35611
  percent: percent,
35652
35612
  message,
35653
- });
35654
- });
35655
- },
35656
- });
35657
- };
35658
- // <- TODO: Make types such as there is no need to do `as` for `createTask`
35659
- return pipelineExecutor;
35660
- }
35661
-
35662
- /**
35663
- * Prepares the persona for the pipeline
35664
- *
35665
- * @see https://github.com/webgptorg/promptbook/discussions/22
35666
- *
35667
- * @public exported from `@promptbook/core`
35668
- */
35669
- async function preparePersona(personaDescription, tools, options) {
35670
- const { isVerbose = DEFAULT_IS_VERBOSE } = options;
35671
- if (tools === undefined || tools.llm === undefined) {
35672
- throw new MissingToolsError('LLM tools are required for preparing persona');
35673
- }
35674
- // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
35675
- const collection = createPipelineCollectionFromJson(...PipelineCollection);
35676
- const preparePersonaExecutor = createPipelineExecutor({
35677
- pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
35678
- tools,
35679
- });
35680
- const llmTools = getSingleLlmExecutionTools(tools.llm);
35681
- const availableModels = (await llmTools.listModels())
35682
- .filter(({ modelVariant }) => modelVariant === 'CHAT')
35683
- .map(({ modelName, modelDescription }) => ({
35684
- modelName,
35685
- modelDescription,
35686
- // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
35687
- }));
35688
- const result = await preparePersonaExecutor({
35689
- availableModels /* <- Note: Passing as JSON */,
35690
- personaDescription,
35691
- }).asPromise({ isCrashedOnError: true });
35692
- const { outputParameters } = result;
35693
- const { modelsRequirements: modelsRequirementsJson } = outputParameters;
35694
- let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
35695
- if (isVerbose) {
35696
- console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
35697
- }
35698
- if (!Array.isArray(modelsRequirementsUnchecked)) {
35699
- // <- TODO: Book should have syntax and system to enforce shape of JSON
35700
- modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
35701
- /*
35702
- throw new UnexpectedError(
35703
- spaceTrim(
35704
- (block) => `
35705
- Invalid \`modelsRequirements\`:
35706
-
35707
- \`\`\`json
35708
- ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
35709
- \`\`\`
35710
- `,
35711
- ),
35712
- );
35713
- */
35714
- }
35715
- const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
35716
- modelVariant: 'CHAT',
35717
- ...modelRequirements,
35718
- }));
35719
- return {
35720
- modelsRequirements,
35721
- };
35722
- }
35723
- // TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
35724
- // TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
35725
- // TODO: [🏢] Check validity of `modelName` in pipeline
35726
- // TODO: [🏢] Check validity of `systemMessage` in pipeline
35727
- // TODO: [🏢] Check validity of `temperature` in pipeline
35728
-
35729
- /**
35730
- * Creates an empty/basic agent model requirements object
35731
- * This serves as the starting point for the reduce-like pattern
35732
- * where each commitment applies its changes to build the final requirements
35733
- *
35734
- * @public exported from `@promptbook/core`
35735
- */
35736
- function createEmptyAgentModelRequirements() {
35737
- return {
35738
- systemMessage: '',
35739
- promptSuffix: '',
35740
- // modelName: 'gpt-5',
35741
- modelName: 'gpt-5.4-mini',
35742
- temperature: 0.7,
35743
- topP: 0.9,
35744
- topK: 50,
35745
- parentAgentUrl: null,
35746
- isClosed: false,
35747
- };
35748
- }
35749
- /**
35750
- * Creates a basic agent model requirements with just the agent name
35751
- * This is used when we have an agent name but no commitments
35752
- *
35753
- * @public exported from `@promptbook/core`
35754
- */
35755
- function createBasicAgentModelRequirements(agentName) {
35756
- const empty = createEmptyAgentModelRequirements();
35757
- return {
35758
- ...empty,
35759
- systemMessage: `You are ${agentName || 'AI Agent'}`,
35760
- };
35761
- }
35762
- // TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
35763
-
35764
- /**
35765
- * Plugin for importing agent books *(`.book` files)*
35766
- *
35767
- * @private [🥝] Maybe export the import plugins through some package
35768
- */
35769
- const AgentFileImportPlugin = {
35770
- name: 'agent-file-import-plugin',
35771
- canImport(mimeType) {
35772
- // [🧠] Should we have a specific MIME type for agent books?
35773
- // For now, let's assume it's identified by .book extension or certain MIME types if provided
35774
- return mimeType === 'text/x-promptbook' || mimeType === 'application/x-promptbook';
35775
- },
35776
- import(content) {
35777
- const parseResult = parseAgentSourceWithCommitments(content);
35778
- // Bring only the agent corpus (non-commitment lines and relevant commitments)
35779
- // Stripping the agent name (which is usually the first line)
35780
- const corpus = parseResult.nonCommitmentLines
35781
- .filter((line, index) => index > 0 || !parseResult.agentName)
35782
- .join('\n')
35783
- .trim();
35784
- // Also include relevant commitments that make up the "corpus" of the agent
35785
- // For example PERSONA, RULE, KNOWLEDGE
35786
- const relevantCommitments = parseResult.commitments
35787
- .filter((c) => ['PERSONA', 'RULE', 'KNOWLEDGE'].includes(c.type))
35788
- .map((c) => `${c.type} ${c.content}`)
35789
- .join('\n\n');
35790
- return spacetrim.spaceTrim((block) => `
35791
- ${block(relevantCommitments)}
35792
-
35793
- ${block(corpus)}
35794
- `).trim();
35795
- },
35796
- };
35797
-
35798
- /**
35799
- * Plugin for importing JSON files
35800
- *
35801
- * @private [🥝] Maybe export the import plugins through some package
35802
- */
35803
- const JsonFileImportPlugin = {
35804
- name: 'json-file-import-plugin',
35805
- canImport(mimeType) {
35806
- return mimeType === 'application/json' || mimeType.endsWith('+json');
35807
- },
35808
- import(content) {
35809
- try {
35810
- const json = JSON.parse(content);
35811
- const formattedJson = JSON.stringify(json, null, 4);
35812
- return `\`\`\`json\n${formattedJson}\n\`\`\``;
35813
- }
35814
- catch (error) {
35815
- // If JSON is invalid, still import it but maybe not as pretty JSON
35816
- return `\`\`\`json\n${content}\n\`\`\``;
35817
- }
35818
- },
35819
- };
35820
-
35821
- /**
35822
- * Plugin for importing generic text files
35823
- *
35824
- * @private [🥝] Maybe export the import plugins through some package
35825
- */
35826
- const TextFileImportPlugin = {
35827
- name: 'text-file-import-plugin',
35828
- canImport(mimeType) {
35829
- return (mimeType === 'text/plain' ||
35830
- mimeType === 'text/markdown' ||
35831
- mimeType === 'text/x-typescript' ||
35832
- mimeType === 'text/javascript' ||
35833
- mimeType === 'text/css' ||
35834
- mimeType === 'text/html' ||
35835
- mimeType.startsWith('text/'));
35836
- },
35837
- import(content, mimeType) {
35838
- const extension = mimeTypeToExtension(mimeType);
35839
- const codeBlockType = extension || 'txt';
35840
- return `\`\`\`${codeBlockType}\n${content}\n\`\`\``;
35841
- },
35842
- };
35843
-
35844
- /**
35845
- * All available file import plugins
35846
- *
35847
- * @private [🥝] Maybe export the import plugins through some package
35848
- */
35849
- const $fileImportPlugins = [
35850
- AgentFileImportPlugin,
35851
- JsonFileImportPlugin,
35852
- TextFileImportPlugin,
35853
- ];
35854
-
35855
- /**
35856
- * Removes single-hash comment lines (`# Comment`) from a system message
35857
- * This is used to clean up the final system message before sending it to the AI model
35858
- * while preserving the original content with comments in metadata
35859
- *
35860
- * @param systemMessage The system message that may contain comment lines
35861
- * @returns The system message with single-hash comment lines removed
35862
- *
35863
- * @private - TODO: [🧠] Maybe should be public?
35864
- */
35865
- function removeCommentsFromSystemMessage(systemMessage) {
35866
- if (!systemMessage) {
35867
- return systemMessage;
35868
- }
35869
- const lines = systemMessage.split(/\r?\n/);
35870
- const filteredLines = lines.filter((line) => {
35871
- const trimmedLine = line.trim();
35872
- // Remove only single-hash comment markers (`# Comment`) and keep markdown headings (`## Heading`).
35873
- return !/^#(?!#)\s/.test(trimmedLine);
35874
- });
35875
- return filteredLines.join('\n').trim();
35876
- }
35877
-
35878
- /**
35879
- * Commitment types whose content may contain compact agent references that must be resolved before applying the commitment.
35880
- *
35881
- * @private internal constant of `createAgentModelRequirementsWithCommitments`
35882
- */
35883
- const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
35884
- /**
35885
- * DELETE-like commitment types that invalidate earlier tagged commitments.
35886
- *
35887
- * @private internal constant of `createAgentModelRequirementsWithCommitments`
35888
- */
35889
- const DELETE_COMMITMENT_TYPES = new Set(['DELETE', 'CANCEL', 'DISCARD', 'REMOVE']);
35890
- /**
35891
- * Commitments whose earlier occurrences are overwritten by the last occurrence in source order.
35892
- *
35893
- * @private internal constant of `createAgentModelRequirementsWithCommitments`
35894
- */
35895
- const OVERWRITTEN_COMMITMENT_GROUP_BY_TYPE = new Map([
35896
- ['GOAL', 'GOAL'],
35897
- ['GOALS', 'GOAL'],
35898
- ]);
35899
- /**
35900
- * Regex pattern matching markdown horizontal lines that should not be copied into the final system message.
35901
- *
35902
- * @private internal constant of `createAgentModelRequirementsWithCommitments`
35903
- */
35904
- const HORIZONTAL_LINE_PATTERN = /^[\s]*[-_*][\s]*[-_*][\s]*[-_*][\s]*[-_*]*[\s]*$/;
35905
- /**
35906
- * MIME type prefixes treated as binary and therefore not eligible for text import plugins.
35907
- *
35908
- * @private internal constant of `createAgentModelRequirementsWithCommitments`
35909
- */
35910
- const BINARY_MIME_TYPE_PREFIXES = [
35911
- 'image/',
35912
- 'video/',
35913
- 'audio/',
35914
- 'application/octet-stream',
35915
- 'application/pdf',
35916
- 'application/zip',
35917
- ];
35918
- /**
35919
- * Returns a safe fallback content when a resolver fails to transform a reference commitment.
35920
- *
35921
- * @param commitmentType - Commitment being resolved.
35922
- * @param originalContent - Original unresolved commitment content.
35923
- * @returns Fallback content that keeps requirement creation resilient.
35924
- *
35925
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
35926
- */
35927
- function getSafeReferenceCommitmentFallback(commitmentType, originalContent) {
35928
- if (commitmentType === 'FROM') {
35929
- return 'VOID';
35930
- }
35931
- if (commitmentType === 'IMPORT' || commitmentType === 'IMPORTS' || commitmentType === 'TEAM') {
35932
- return '';
35933
- }
35934
- return originalContent;
35935
- }
35936
- /**
35937
- * Creates agent model requirements by parsing commitments, applying them in source order,
35938
- * and finalizing derived sections such as imports, example interactions, and inline knowledge uploads.
35939
- *
35940
- * @param agentSource - Agent source book to parse.
35941
- * @param modelName - Optional override for the agent model name.
35942
- * @param options - Additional options such as reference and teammate resolvers.
35943
- * @returns Fully prepared model requirements for the parsed agent source.
35944
- *
35945
- * @private internal utility of `createAgentModelRequirements`
35946
- */
35947
- async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
35948
- const parseResult = parseAgentSourceWithCommitments(agentSource);
35949
- const filteredCommitments = filterOverwrittenCommitments(filterDeletedCommitments(parseResult.commitments));
35950
- let requirements = createInitialAgentModelRequirements(parseResult.agentName, modelName);
35951
- requirements = await applyCommitmentsToRequirements(requirements, filteredCommitments, options);
35952
- requirements = aggregateUseCommitmentSystemMessages(requirements, filteredCommitments);
35953
- requirements = await importReferencedFiles(requirements);
35954
- requirements = appendMcpServers(requirements, agentSource);
35955
- requirements = appendNonCommitmentContent(requirements, parseResult);
35956
- requirements = appendExampleInteractions(requirements, parseResult);
35957
- requirements = await applyPendingInlineKnowledgeSources(requirements, options === null || options === void 0 ? void 0 : options.inlineKnowledgeSourceUploader);
35958
- return finalizeRequirements(requirements);
35959
- }
35960
- /**
35961
- * Removes earlier commitments that are overwritten by later commitments of the same semantic group.
35962
- *
35963
- * This currently keeps only the last `GOAL` / `GOALS` commitment so inheritance rewrites
35964
- * and multi-goal sources expose one effective goal to the runtime.
35965
- *
35966
- * @param commitments - Parsed commitments after DELETE-like filtering.
35967
- * @returns Commitments with overwritten entries removed while preserving source order.
35968
- *
35969
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
35970
- */
35971
- function filterOverwrittenCommitments(commitments) {
35972
- const seenOverwriteGroups = new Set();
35973
- const keptCommitments = [];
35974
- for (let index = commitments.length - 1; index >= 0; index--) {
35975
- const commitment = commitments[index];
35976
- const overwriteGroup = OVERWRITTEN_COMMITMENT_GROUP_BY_TYPE.get(commitment.type);
35977
- if (!overwriteGroup) {
35978
- keptCommitments.push(commitment);
35979
- continue;
35980
- }
35981
- if (seenOverwriteGroups.has(overwriteGroup)) {
35982
- continue;
35983
- }
35984
- seenOverwriteGroups.add(overwriteGroup);
35985
- keptCommitments.push(commitment);
35986
- }
35987
- return keptCommitments.reverse();
35613
+ });
35614
+ });
35615
+ },
35616
+ });
35617
+ };
35618
+ // <- TODO: Make types such as there is no need to do `as` for `createTask`
35619
+ return pipelineExecutor;
35988
35620
  }
35621
+
35989
35622
  /**
35990
- * Creates the initial requirements object with the parsed agent name stored in metadata and an optional model override.
35623
+ * Prepares the persona for the pipeline
35991
35624
  *
35992
- * @param agentName - Parsed agent name from the source prelude.
35993
- * @param modelName - Optional explicit model name override.
35994
- * @returns Initial requirements before any commitment is applied.
35625
+ * @see https://github.com/webgptorg/promptbook/discussions/22
35995
35626
  *
35996
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
35627
+ * @public exported from `@promptbook/core`
35997
35628
  */
35998
- function createInitialAgentModelRequirements(agentName, modelName) {
35999
- const initialRequirements = createBasicAgentModelRequirements(agentName);
36000
- const requirementsWithMetadata = {
36001
- ...initialRequirements,
36002
- _metadata: {
36003
- ...initialRequirements._metadata,
36004
- agentName,
36005
- },
36006
- };
36007
- if (!modelName) {
36008
- return requirementsWithMetadata;
35629
+ async function preparePersona(personaDescription, tools, options) {
35630
+ const { isVerbose = DEFAULT_IS_VERBOSE } = options;
35631
+ if (tools === undefined || tools.llm === undefined) {
35632
+ throw new MissingToolsError('LLM tools are required for preparing persona');
36009
35633
  }
36010
- return {
36011
- ...requirementsWithMetadata,
35634
+ // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
35635
+ const collection = createPipelineCollectionFromJson(...PipelineCollection);
35636
+ const preparePersonaExecutor = createPipelineExecutor({
35637
+ pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
35638
+ tools,
35639
+ });
35640
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
35641
+ const availableModels = (await llmTools.listModels())
35642
+ .filter(({ modelVariant }) => modelVariant === 'CHAT')
35643
+ .map(({ modelName, modelDescription }) => ({
36012
35644
  modelName,
35645
+ modelDescription,
35646
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
35647
+ }));
35648
+ const result = await preparePersonaExecutor({
35649
+ availableModels /* <- Note: Passing as JSON */,
35650
+ personaDescription,
35651
+ }).asPromise({ isCrashedOnError: true });
35652
+ const { outputParameters } = result;
35653
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
35654
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
35655
+ if (isVerbose) {
35656
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
35657
+ }
35658
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
35659
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
35660
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
35661
+ /*
35662
+ throw new UnexpectedError(
35663
+ spaceTrim(
35664
+ (block) => `
35665
+ Invalid \`modelsRequirements\`:
35666
+
35667
+ \`\`\`json
35668
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
35669
+ \`\`\`
35670
+ `,
35671
+ ),
35672
+ );
35673
+ */
35674
+ }
35675
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
35676
+ modelVariant: 'CHAT',
35677
+ ...modelRequirements,
35678
+ }));
35679
+ return {
35680
+ modelsRequirements,
36013
35681
  };
36014
35682
  }
35683
+ // TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
35684
+ // TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
35685
+ // TODO: [🏢] Check validity of `modelName` in pipeline
35686
+ // TODO: [🏢] Check validity of `systemMessage` in pipeline
35687
+ // TODO: [🏢] Check validity of `temperature` in pipeline
35688
+
36015
35689
  /**
36016
- * Applies DELETE-like invalidation commitments and returns only commitments that should continue through the pipeline.
36017
- *
36018
- * @param commitments - Parsed commitments in original source order.
36019
- * @returns Filtered commitments with earlier deleted items removed.
35690
+ * Creates an empty/basic agent model requirements object
35691
+ * This serves as the starting point for the reduce-like pattern
35692
+ * where each commitment applies its changes to build the final requirements
36020
35693
  *
36021
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
35694
+ * @public exported from `@promptbook/core`
36022
35695
  */
36023
- function filterDeletedCommitments(commitments) {
36024
- const filteredCommitments = [];
36025
- for (const commitment of commitments) {
36026
- if (!isDeleteCommitmentType(commitment.type)) {
36027
- filteredCommitments.push(commitment);
36028
- continue;
36029
- }
36030
- const targetParameterNames = getCommitmentParameterNames(commitment.content);
36031
- if (targetParameterNames.length === 0) {
36032
- continue;
36033
- }
36034
- for (let index = filteredCommitments.length - 1; index >= 0; index--) {
36035
- const previousCommitment = filteredCommitments[index];
36036
- const previousParameterNames = getCommitmentParameterNames(previousCommitment.content);
36037
- const isTargeted = previousParameterNames.some((parameterName) => targetParameterNames.includes(parameterName));
36038
- if (isTargeted) {
36039
- filteredCommitments.splice(index, 1);
36040
- }
36041
- }
36042
- }
36043
- return filteredCommitments;
35696
+ function createEmptyAgentModelRequirements() {
35697
+ return {
35698
+ systemMessage: '',
35699
+ promptSuffix: '',
35700
+ // modelName: 'gpt-5',
35701
+ modelName: 'gpt-5.4-mini',
35702
+ temperature: 0.7,
35703
+ topP: 0.9,
35704
+ topK: 50,
35705
+ parentAgentUrl: null,
35706
+ isClosed: false,
35707
+ };
36044
35708
  }
36045
35709
  /**
36046
- * Checks whether a commitment type behaves like DELETE and therefore invalidates earlier tagged commitments.
36047
- *
36048
- * @param commitmentType - Commitment type to check.
36049
- * @returns `true` when the commitment removes prior tagged commitments.
35710
+ * Creates a basic agent model requirements with just the agent name
35711
+ * This is used when we have an agent name but no commitments
36050
35712
  *
36051
- * @private internal utility of `filterDeletedCommitments`
35713
+ * @public exported from `@promptbook/core`
36052
35714
  */
36053
- function isDeleteCommitmentType(commitmentType) {
36054
- return DELETE_COMMITMENT_TYPES.has(commitmentType);
35715
+ function createBasicAgentModelRequirements(agentName) {
35716
+ const empty = createEmptyAgentModelRequirements();
35717
+ return {
35718
+ ...empty,
35719
+ systemMessage: `You are ${agentName || 'AI Agent'}`,
35720
+ };
36055
35721
  }
35722
+ // TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
35723
+
36056
35724
  /**
36057
- * Extracts normalized parameter names used for DELETE-like invalidation matching.
36058
- *
36059
- * @param content - Commitment content to parse.
36060
- * @returns Lower-cased non-empty parameter names.
35725
+ * Commitment types whose content may contain compact agent references that must be resolved before applying the commitment.
36061
35726
  *
36062
- * @private internal utility of `filterDeletedCommitments`
35727
+ * @private internal constant of `applyCommitmentsToAgentModelRequirements`
36063
35728
  */
36064
- function getCommitmentParameterNames(content) {
36065
- return parseParameters(content)
36066
- .map((parameter) => parameter.name.trim().toLowerCase())
36067
- .filter(Boolean);
36068
- }
35729
+ const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
36069
35730
  /**
36070
35731
  * Applies parsed commitments one by one while keeping the per-commitment steps focused and easy to follow.
36071
35732
  *
@@ -36074,9 +35735,9 @@
36074
35735
  * @param options - Optional reference and teammate resolvers.
36075
35736
  * @returns Requirements after all applicable commitments are processed.
36076
35737
  *
36077
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
35738
+ * @private function of `createAgentModelRequirementsWithCommitments`
36078
35739
  */
36079
- async function applyCommitmentsToRequirements(requirements, commitments, options) {
35740
+ async function applyCommitmentsToAgentModelRequirements(requirements, commitments, options) {
36080
35741
  for (const [index, commitment] of commitments.entries()) {
36081
35742
  if (shouldSkipCommitmentApplication(commitment, index, commitments.length)) {
36082
35743
  continue;
@@ -36094,7 +35755,7 @@
36094
35755
  * @param agentReferenceResolver - Optional resolver for compact agent references.
36095
35756
  * @returns Original or resolved commitment content.
36096
35757
  *
36097
- * @private internal utility of `applyCommitmentsToRequirements`
35758
+ * @private internal utility of `applyCommitmentsToAgentModelRequirements`
36098
35759
  */
36099
35760
  async function resolveCommitmentContent(commitment, agentReferenceResolver) {
36100
35761
  if (!agentReferenceResolver || !isAgentReferenceCommitment(commitment.type)) {
@@ -36108,6 +35769,24 @@
36108
35769
  return getSafeReferenceCommitmentFallback(commitment.type, commitment.content);
36109
35770
  }
36110
35771
  }
35772
+ /**
35773
+ * Returns a safe fallback content when a resolver fails to transform a reference commitment.
35774
+ *
35775
+ * @param commitmentType - Commitment being resolved.
35776
+ * @param originalContent - Original unresolved commitment content.
35777
+ * @returns Fallback content that keeps requirement creation resilient.
35778
+ *
35779
+ * @private internal utility of `applyCommitmentsToAgentModelRequirements`
35780
+ */
35781
+ function getSafeReferenceCommitmentFallback(commitmentType, originalContent) {
35782
+ if (commitmentType === 'FROM') {
35783
+ return 'VOID';
35784
+ }
35785
+ if (commitmentType === 'IMPORT' || commitmentType === 'IMPORTS' || commitmentType === 'TEAM') {
35786
+ return '';
35787
+ }
35788
+ return originalContent;
35789
+ }
36111
35790
  /**
36112
35791
  * Checks whether the commitment content may need agent-reference resolution before application.
36113
35792
  *
@@ -36127,7 +35806,7 @@
36127
35806
  * @param commitmentCount - Total number of filtered commitments.
36128
35807
  * @returns `true` when the commitment should not be applied.
36129
35808
  *
36130
- * @private internal utility of `applyCommitmentsToRequirements`
35809
+ * @private internal utility of `applyCommitmentsToAgentModelRequirements`
36131
35810
  */
36132
35811
  function shouldSkipCommitmentApplication(commitment, commitmentIndex, commitmentCount) {
36133
35812
  return commitment.type === 'CLOSED' && commitmentIndex !== commitmentCount - 1;
@@ -36141,7 +35820,7 @@
36141
35820
  * @param options - Optional teammate profile resolvers.
36142
35821
  * @returns Requirements with pre-resolved teammate profiles stored in metadata when possible.
36143
35822
  *
36144
- * @private internal utility of `applyCommitmentsToRequirements`
35823
+ * @private internal utility of `applyCommitmentsToAgentModelRequirements`
36145
35824
  */
36146
35825
  async function preResolveTeammateProfilesForTeamCommitment(requirements, commitment, commitmentContent, options) {
36147
35826
  var _a;
@@ -36196,7 +35875,7 @@
36196
35875
  * @param commitmentContent - Final content passed into the definition.
36197
35876
  * @returns Updated requirements, or the original requirements when the commitment fails.
36198
35877
  *
36199
- * @private internal utility of `applyCommitmentsToRequirements`
35878
+ * @private internal utility of `applyCommitmentsToAgentModelRequirements`
36200
35879
  */
36201
35880
  function applyCommitmentDefinitionSafely(requirements, commitment, commitmentContent) {
36202
35881
  const definition = getCommitmentDefinition(commitment.type);
@@ -36211,13 +35890,140 @@
36211
35890
  return requirements;
36212
35891
  }
36213
35892
  }
35893
+
35894
+ /**
35895
+ * Plugin for importing agent books *(`.book` files)*
35896
+ *
35897
+ * @private [🥝] Maybe export the import plugins through some package
35898
+ */
35899
+ const AgentFileImportPlugin = {
35900
+ name: 'agent-file-import-plugin',
35901
+ canImport(mimeType) {
35902
+ // [🧠] Should we have a specific MIME type for agent books?
35903
+ // For now, let's assume it's identified by .book extension or certain MIME types if provided
35904
+ return mimeType === 'text/x-promptbook' || mimeType === 'application/x-promptbook';
35905
+ },
35906
+ import(content) {
35907
+ const parseResult = parseAgentSourceWithCommitments(content);
35908
+ // Bring only the agent corpus (non-commitment lines and relevant commitments)
35909
+ // Stripping the agent name (which is usually the first line)
35910
+ const corpus = parseResult.nonCommitmentLines
35911
+ .filter((line, index) => index > 0 || !parseResult.agentName)
35912
+ .join('\n')
35913
+ .trim();
35914
+ // Also include relevant commitments that make up the "corpus" of the agent
35915
+ // For example PERSONA, RULE, KNOWLEDGE
35916
+ const relevantCommitments = parseResult.commitments
35917
+ .filter((c) => ['PERSONA', 'RULE', 'KNOWLEDGE'].includes(c.type))
35918
+ .map((c) => `${c.type} ${c.content}`)
35919
+ .join('\n\n');
35920
+ return spacetrim.spaceTrim((block) => `
35921
+ ${block(relevantCommitments)}
35922
+
35923
+ ${block(corpus)}
35924
+ `).trim();
35925
+ },
35926
+ };
35927
+
35928
+ /**
35929
+ * Plugin for importing JSON files
35930
+ *
35931
+ * @private [🥝] Maybe export the import plugins through some package
35932
+ */
35933
+ const JsonFileImportPlugin = {
35934
+ name: 'json-file-import-plugin',
35935
+ canImport(mimeType) {
35936
+ return mimeType === 'application/json' || mimeType.endsWith('+json');
35937
+ },
35938
+ import(content) {
35939
+ try {
35940
+ const json = JSON.parse(content);
35941
+ const formattedJson = JSON.stringify(json, null, 4);
35942
+ return `\`\`\`json\n${formattedJson}\n\`\`\``;
35943
+ }
35944
+ catch (error) {
35945
+ // If JSON is invalid, still import it but maybe not as pretty JSON
35946
+ return `\`\`\`json\n${content}\n\`\`\``;
35947
+ }
35948
+ },
35949
+ };
35950
+
35951
+ /**
35952
+ * Plugin for importing generic text files
35953
+ *
35954
+ * @private [🥝] Maybe export the import plugins through some package
35955
+ */
35956
+ const TextFileImportPlugin = {
35957
+ name: 'text-file-import-plugin',
35958
+ canImport(mimeType) {
35959
+ return (mimeType === 'text/plain' ||
35960
+ mimeType === 'text/markdown' ||
35961
+ mimeType === 'text/x-typescript' ||
35962
+ mimeType === 'text/javascript' ||
35963
+ mimeType === 'text/css' ||
35964
+ mimeType === 'text/html' ||
35965
+ mimeType.startsWith('text/'));
35966
+ },
35967
+ import(content, mimeType) {
35968
+ const extension = mimeTypeToExtension(mimeType);
35969
+ const codeBlockType = extension || 'txt';
35970
+ return `\`\`\`${codeBlockType}\n${content}\n\`\`\``;
35971
+ },
35972
+ };
35973
+
35974
+ /**
35975
+ * All available file import plugins
35976
+ *
35977
+ * @private [🥝] Maybe export the import plugins through some package
35978
+ */
35979
+ const $fileImportPlugins = [
35980
+ AgentFileImportPlugin,
35981
+ JsonFileImportPlugin,
35982
+ TextFileImportPlugin,
35983
+ ];
35984
+
35985
+ /**
35986
+ * Regex pattern matching markdown horizontal lines that should not be copied into the final system message.
35987
+ *
35988
+ * @private internal constant of `augmentAgentModelRequirementsFromSource`
35989
+ */
35990
+ const HORIZONTAL_LINE_PATTERN = /^[\s]*[-_*][\s]*[-_*][\s]*[-_*][\s]*[-_*]*[\s]*$/;
35991
+ /**
35992
+ * MIME type prefixes treated as binary and therefore not eligible for text import plugins.
35993
+ *
35994
+ * @private internal constant of `augmentAgentModelRequirementsFromSource`
35995
+ */
35996
+ const BINARY_MIME_TYPE_PREFIXES = [
35997
+ 'image/',
35998
+ 'video/',
35999
+ 'audio/',
36000
+ 'application/octet-stream',
36001
+ 'application/pdf',
36002
+ 'application/zip',
36003
+ ];
36004
+ /**
36005
+ * Adds source-derived sections after commitments have been applied.
36006
+ *
36007
+ * @param requirements - Requirements after commitment application and USE aggregation.
36008
+ * @param parseResult - Parsed source used to recover non-commitment prose and examples.
36009
+ * @param agentSource - Original source used to recover MCP server declarations.
36010
+ * @returns Requirements with source-derived sections appended.
36011
+ *
36012
+ * @private function of `createAgentModelRequirementsWithCommitments`
36013
+ */
36014
+ async function augmentAgentModelRequirementsFromSource(requirements, parseResult, agentSource) {
36015
+ requirements = await importReferencedFiles(requirements);
36016
+ requirements = appendMcpServers(requirements, agentSource);
36017
+ requirements = appendNonCommitmentContent(requirements, parseResult);
36018
+ return appendExampleInteractions(requirements, parseResult);
36019
+ }
36214
36020
  /**
36215
36021
  * Imports text files referenced by IMPORT commitments and appends their transformed content to the system message.
36216
36022
  *
36217
36023
  * @param requirements - Requirements possibly containing `importedFileUrls`.
36218
36024
  * @returns Requirements with imported file content appended to the system message.
36219
36025
  *
36220
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36026
+ * @private internal utility of `augmentAgentModelRequirementsFromSource`
36221
36027
  */
36222
36028
  async function importReferencedFiles(requirements) {
36223
36029
  const importedFileUrls = requirements.importedFileUrls;
@@ -36303,7 +36109,7 @@
36303
36109
  * @param agentSource - Original agent source used for MCP extraction.
36304
36110
  * @returns Requirements with `mcpServers` set when MCP commitments are present.
36305
36111
  *
36306
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36112
+ * @private internal utility of `augmentAgentModelRequirementsFromSource`
36307
36113
  */
36308
36114
  function appendMcpServers(requirements, agentSource) {
36309
36115
  const mcpServers = extractMcpServers(agentSource);
@@ -36322,7 +36128,7 @@
36322
36128
  * @param parseResult - Parsed source including non-commitment lines.
36323
36129
  * @returns Requirements with the remaining prose appended to the system message.
36324
36130
  *
36325
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36131
+ * @private internal utility of `augmentAgentModelRequirementsFromSource`
36326
36132
  */
36327
36133
  function appendNonCommitmentContent(requirements, parseResult) {
36328
36134
  const nonCommitmentContent = getNonCommitmentContent(parseResult);
@@ -36365,7 +36171,7 @@
36365
36171
  * @param parseResult - Parsed source used to recover initial message content.
36366
36172
  * @returns Requirements with the example interaction block appended when examples exist.
36367
36173
  *
36368
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36174
+ * @private internal utility of `augmentAgentModelRequirementsFromSource`
36369
36175
  */
36370
36176
  function appendExampleInteractions(requirements, parseResult) {
36371
36177
  const exampleInteractionsContent = createExampleInteractionsContent(parseResult, requirements.samples);
@@ -36420,7 +36226,7 @@
36420
36226
  * @param section - Section content to append.
36421
36227
  * @returns Requirements with the additional system-message block appended.
36422
36228
  *
36423
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36229
+ * @private internal utility of `augmentAgentModelRequirementsFromSource`
36424
36230
  */
36425
36231
  function appendSystemMessageSection(requirements, section) {
36426
36232
  return {
@@ -36429,29 +36235,149 @@
36429
36235
  };
36430
36236
  }
36431
36237
  /**
36432
- * Performs the final system-message cleanup pass after all other augmentation steps are complete.
36238
+ * Mocked security check for imported files.
36433
36239
  *
36434
- * @param requirements - Fully built requirements before final cleanup.
36435
- * @returns Requirements with comment lines removed from the final system message.
36240
+ * @param urlOrPath - The URL or local path of the file to check.
36241
+ * @returns A promise that resolves if the file is considered safe.
36436
36242
  *
36437
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36243
+ * @private internal utility of `createImportedFileSystemMessage`
36438
36244
  */
36439
- function finalizeRequirements(requirements) {
36440
- return {
36441
- ...requirements,
36442
- systemMessage: removeCommentsFromSystemMessage(requirements.systemMessage),
36443
- };
36245
+ async function mockedSecurityCheck(urlOrPath) {
36246
+ // TODO: Implement proper security checks
36247
+ await new Promise((resolve) => setTimeout(resolve, 10));
36248
+ if (urlOrPath.includes('malicious')) {
36249
+ throw new Error(`Security check failed for: ${urlOrPath}`);
36250
+ }
36251
+ }
36252
+ /**
36253
+ * Checks whether the given MIME type belongs to a binary file.
36254
+ *
36255
+ * @param mimeType - The MIME type to check.
36256
+ * @returns `true` when the MIME type is treated as binary.
36257
+ *
36258
+ * @private internal utility of `createImportedFileSystemMessage`
36259
+ */
36260
+ function isBinaryMimeType(mimeType) {
36261
+ return BINARY_MIME_TYPE_PREFIXES.some((prefix) => mimeType.startsWith(prefix));
36262
+ }
36263
+
36264
+ /**
36265
+ * DELETE-like commitment types that invalidate earlier tagged commitments.
36266
+ *
36267
+ * @private internal constant of `filterCommitmentsForAgentModelRequirements`
36268
+ */
36269
+ const DELETE_COMMITMENT_TYPES = new Set(['DELETE', 'CANCEL', 'DISCARD', 'REMOVE']);
36270
+ /**
36271
+ * Commitments whose earlier occurrences are overwritten by the last occurrence in source order.
36272
+ *
36273
+ * @private internal constant of `filterCommitmentsForAgentModelRequirements`
36274
+ */
36275
+ const OVERWRITTEN_COMMITMENT_GROUP_BY_TYPE = new Map([
36276
+ ['GOAL', 'GOAL'],
36277
+ ['GOALS', 'GOAL'],
36278
+ ]);
36279
+ /**
36280
+ * Applies the commitment filtering rules used before commitment definitions are executed.
36281
+ *
36282
+ * @param commitments - Parsed commitments in original source order.
36283
+ * @returns Commitments after DELETE-like invalidation and overwritten-goal filtering.
36284
+ *
36285
+ * @private function of `createAgentModelRequirementsWithCommitments`
36286
+ */
36287
+ function filterCommitmentsForAgentModelRequirements(commitments) {
36288
+ return filterOverwrittenCommitments(filterDeletedCommitments(commitments));
36289
+ }
36290
+ /**
36291
+ * Removes earlier commitments that are overwritten by later commitments of the same semantic group.
36292
+ *
36293
+ * @param commitments - Parsed commitments after DELETE-like filtering.
36294
+ * @returns Commitments with overwritten entries removed while preserving source order.
36295
+ *
36296
+ * @private internal utility of `filterCommitmentsForAgentModelRequirements`
36297
+ */
36298
+ function filterOverwrittenCommitments(commitments) {
36299
+ const seenOverwriteGroups = new Set();
36300
+ const keptCommitments = [];
36301
+ for (let index = commitments.length - 1; index >= 0; index--) {
36302
+ const commitment = commitments[index];
36303
+ const overwriteGroup = OVERWRITTEN_COMMITMENT_GROUP_BY_TYPE.get(commitment.type);
36304
+ if (!overwriteGroup) {
36305
+ keptCommitments.push(commitment);
36306
+ continue;
36307
+ }
36308
+ if (seenOverwriteGroups.has(overwriteGroup)) {
36309
+ continue;
36310
+ }
36311
+ seenOverwriteGroups.add(overwriteGroup);
36312
+ keptCommitments.push(commitment);
36313
+ }
36314
+ return keptCommitments.reverse();
36315
+ }
36316
+ /**
36317
+ * Applies DELETE-like invalidation commitments and returns only commitments that should continue through the pipeline.
36318
+ *
36319
+ * @param commitments - Parsed commitments in original source order.
36320
+ * @returns Filtered commitments with earlier deleted items removed.
36321
+ *
36322
+ * @private internal utility of `filterCommitmentsForAgentModelRequirements`
36323
+ */
36324
+ function filterDeletedCommitments(commitments) {
36325
+ const filteredCommitments = [];
36326
+ for (const commitment of commitments) {
36327
+ if (!isDeleteCommitmentType(commitment.type)) {
36328
+ filteredCommitments.push(commitment);
36329
+ continue;
36330
+ }
36331
+ const targetParameterNames = getCommitmentParameterNames(commitment.content);
36332
+ if (targetParameterNames.length === 0) {
36333
+ continue;
36334
+ }
36335
+ for (let index = filteredCommitments.length - 1; index >= 0; index--) {
36336
+ const previousCommitment = filteredCommitments[index];
36337
+ const previousParameterNames = getCommitmentParameterNames(previousCommitment.content);
36338
+ const isTargeted = previousParameterNames.some((parameterName) => targetParameterNames.includes(parameterName));
36339
+ if (isTargeted) {
36340
+ filteredCommitments.splice(index, 1);
36341
+ }
36342
+ }
36343
+ }
36344
+ return filteredCommitments;
36345
+ }
36346
+ /**
36347
+ * Checks whether a commitment type behaves like DELETE and therefore invalidates earlier tagged commitments.
36348
+ *
36349
+ * @param commitmentType - Commitment type to check.
36350
+ * @returns `true` when the commitment removes prior tagged commitments.
36351
+ *
36352
+ * @private internal utility of `filterDeletedCommitments`
36353
+ */
36354
+ function isDeleteCommitmentType(commitmentType) {
36355
+ return DELETE_COMMITMENT_TYPES.has(commitmentType);
36356
+ }
36357
+ /**
36358
+ * Extracts normalized parameter names used for DELETE-like invalidation matching.
36359
+ *
36360
+ * @param content - Commitment content to parse.
36361
+ * @returns Lower-cased non-empty parameter names.
36362
+ *
36363
+ * @private internal utility of `filterDeletedCommitments`
36364
+ */
36365
+ function getCommitmentParameterNames(content) {
36366
+ return parseParameters(content)
36367
+ .map((parameter) => parameter.name.trim().toLowerCase())
36368
+ .filter(Boolean);
36444
36369
  }
36370
+
36445
36371
  /**
36446
- * Attempts to upload inline knowledge entries, falling back to legacy data URLs when the upload fails or is not configured.
36372
+ * Converts staged inline knowledge files into the final knowledge source URLs stored on requirements.
36447
36373
  *
36448
36374
  * @param requirements - Current requirements snapshot.
36449
36375
  * @param uploader - Optional uploader for inline knowledge files.
36450
36376
  * @returns Requirements with inline knowledge converted into upload URLs or data URLs.
36451
36377
  *
36452
- * @private internal utility of `createAgentModelRequirementsWithCommitments`
36378
+ * @private function of `createAgentModelRequirementsWithCommitments`
36453
36379
  */
36454
- async function applyPendingInlineKnowledgeSources(requirements, uploader) {
36380
+ async function materializeInlineKnowledgeSources(requirements, uploader) {
36455
36381
  var _a;
36456
36382
  const inlineSources = extractInlineKnowledgeSources(requirements._metadata);
36457
36383
  if (inlineSources.length === 0) {
@@ -36477,7 +36403,7 @@
36477
36403
  * @param uploader - Upload implementation provided by the caller.
36478
36404
  * @returns Uploaded knowledge URL or a legacy data URL fallback.
36479
36405
  *
36480
- * @private internal utility of `applyPendingInlineKnowledgeSources`
36406
+ * @private internal utility of `materializeInlineKnowledgeSources`
36481
36407
  */
36482
36408
  async function uploadInlineKnowledgeSourceWithFallback(inlineSource, uploader) {
36483
36409
  try {
@@ -36497,7 +36423,7 @@
36497
36423
  * @param metadata - Current requirements metadata.
36498
36424
  * @returns Inline knowledge files collected during commitment application.
36499
36425
  *
36500
- * @private internal utility of `applyPendingInlineKnowledgeSources`
36426
+ * @private internal utility of `materializeInlineKnowledgeSources`
36501
36427
  */
36502
36428
  function extractInlineKnowledgeSources(metadata) {
36503
36429
  if (!metadata) {
@@ -36512,7 +36438,7 @@
36512
36438
  * @param metadata - Current requirements metadata.
36513
36439
  * @returns Metadata without the temporary inline knowledge staging field.
36514
36440
  *
36515
- * @private internal utility of `applyPendingInlineKnowledgeSources`
36441
+ * @private internal utility of `materializeInlineKnowledgeSources`
36516
36442
  */
36517
36443
  function stripInlineKnowledgeMetadata(metadata) {
36518
36444
  if (!metadata || !Object.prototype.hasOwnProperty.call(metadata, 'inlineKnowledgeSources')) {
@@ -36521,31 +36447,90 @@
36521
36447
  const { inlineKnowledgeSources: _unusedInlineKnowledgeSources, ...rest } = metadata;
36522
36448
  return Object.keys(rest).length > 0 ? rest : undefined;
36523
36449
  }
36450
+
36524
36451
  /**
36525
- * Mocked security check for imported files.
36452
+ * Removes single-hash comment lines (`# Comment`) from a system message
36453
+ * This is used to clean up the final system message before sending it to the AI model
36454
+ * while preserving the original content with comments in metadata
36526
36455
  *
36527
- * @param urlOrPath - The URL or local path of the file to check.
36528
- * @returns A promise that resolves if the file is considered safe.
36456
+ * @param systemMessage The system message that may contain comment lines
36457
+ * @returns The system message with single-hash comment lines removed
36529
36458
  *
36530
- * @private internal utility of `createImportedFileSystemMessage`
36459
+ * @private - TODO: [🧠] Maybe should be public?
36531
36460
  */
36532
- async function mockedSecurityCheck(urlOrPath) {
36533
- // TODO: Implement proper security checks
36534
- await new Promise((resolve) => setTimeout(resolve, 10));
36535
- if (urlOrPath.includes('malicious')) {
36536
- throw new Error(`Security check failed for: ${urlOrPath}`);
36461
+ function removeCommentsFromSystemMessage(systemMessage) {
36462
+ if (!systemMessage) {
36463
+ return systemMessage;
36464
+ }
36465
+ const lines = systemMessage.split(/\r?\n/);
36466
+ const filteredLines = lines.filter((line) => {
36467
+ const trimmedLine = line.trim();
36468
+ // Remove only single-hash comment markers (`# Comment`) and keep markdown headings (`## Heading`).
36469
+ return !/^#(?!#)\s/.test(trimmedLine);
36470
+ });
36471
+ return filteredLines.join('\n').trim();
36472
+ }
36473
+
36474
+ /**
36475
+ * Creates agent model requirements by parsing commitments, applying them in source order,
36476
+ * and finalizing derived sections such as imports, example interactions, and inline knowledge uploads.
36477
+ *
36478
+ * @param agentSource - Agent source book to parse.
36479
+ * @param modelName - Optional override for the agent model name.
36480
+ * @param options - Additional options such as reference and teammate resolvers.
36481
+ * @returns Fully prepared model requirements for the parsed agent source.
36482
+ *
36483
+ * @private internal utility of `createAgentModelRequirements`
36484
+ */
36485
+ async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
36486
+ const parseResult = parseAgentSourceWithCommitments(agentSource);
36487
+ const filteredCommitments = filterCommitmentsForAgentModelRequirements(parseResult.commitments);
36488
+ let requirements = createInitialAgentModelRequirements(parseResult.agentName, modelName);
36489
+ requirements = await applyCommitmentsToAgentModelRequirements(requirements, filteredCommitments, options);
36490
+ requirements = aggregateUseCommitmentSystemMessages(requirements, filteredCommitments);
36491
+ requirements = await augmentAgentModelRequirementsFromSource(requirements, parseResult, agentSource);
36492
+ requirements = await materializeInlineKnowledgeSources(requirements, options === null || options === void 0 ? void 0 : options.inlineKnowledgeSourceUploader);
36493
+ return finalizeRequirements(requirements);
36494
+ }
36495
+ /**
36496
+ * Creates the initial requirements object with the parsed agent name stored in metadata and an optional model override.
36497
+ *
36498
+ * @param agentName - Parsed agent name from the source prelude.
36499
+ * @param modelName - Optional explicit model name override.
36500
+ * @returns Initial requirements before any commitment is applied.
36501
+ *
36502
+ * @private internal utility of `createAgentModelRequirementsWithCommitments`
36503
+ */
36504
+ function createInitialAgentModelRequirements(agentName, modelName) {
36505
+ const initialRequirements = createBasicAgentModelRequirements(agentName);
36506
+ const requirementsWithMetadata = {
36507
+ ...initialRequirements,
36508
+ _metadata: {
36509
+ ...initialRequirements._metadata,
36510
+ agentName,
36511
+ },
36512
+ };
36513
+ if (!modelName) {
36514
+ return requirementsWithMetadata;
36537
36515
  }
36516
+ return {
36517
+ ...requirementsWithMetadata,
36518
+ modelName,
36519
+ };
36538
36520
  }
36539
36521
  /**
36540
- * Checks whether the given MIME type belongs to a binary file.
36522
+ * Performs the final system-message cleanup pass after all other augmentation steps are complete.
36541
36523
  *
36542
- * @param mimeType - The MIME type to check.
36543
- * @returns `true` when the MIME type is treated as binary.
36524
+ * @param requirements - Fully built requirements before final cleanup.
36525
+ * @returns Requirements with comment lines removed from the final system message.
36544
36526
  *
36545
- * @private internal utility of `createImportedFileSystemMessage`
36527
+ * @private internal utility of `createAgentModelRequirementsWithCommitments`
36546
36528
  */
36547
- function isBinaryMimeType(mimeType) {
36548
- return BINARY_MIME_TYPE_PREFIXES.some((prefix) => mimeType.startsWith(prefix));
36529
+ function finalizeRequirements(requirements) {
36530
+ return {
36531
+ ...requirements,
36532
+ systemMessage: removeCommentsFromSystemMessage(requirements.systemMessage),
36533
+ };
36549
36534
  }
36550
36535
 
36551
36536
  /**