@promptbook/components 0.112.0-42 → 0.112.0-43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -567,6 +567,7 @@ Prompts marked with `[-]` are not ready yet, prompts containing `@@@` are treate
567
567
  - **Reasoning control:** `--thinking-level low|medium|high|xhigh` for supported runners
568
568
  - **Interactive or unattended runs:** default wait mode, or `--no-wait` for batch execution
569
569
  - **Git safety:** clean working tree check by default, optional `--ignore-git-changes`
570
+ - **Opt-in remote pushes:** commits stay local unless you explicitly pass `--auto-push`
570
571
  - **Prompt triage:** `--priority` to process only more important tasks first
571
572
  - **Failure logging:** failed runs write a neighboring `.error.log`
572
573
  - **Line-ending normalization:** changed files are normalized back to LF by default
@@ -584,6 +585,8 @@ npx ts-node ./src/cli/test/ptbk.ts coder generate-boilerplates --template prompt
584
585
 
585
586
  npx ts-node ./src/cli/test/ptbk.ts coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md
586
587
 
588
+ npx ts-node ./src/cli/test/ptbk.ts coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --auto-push
589
+
587
590
  npx ts-node ./src/cli/test/ptbk.ts coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --ignore-git-changes --no-wait
588
591
 
589
592
  npx ts-node ./src/cli/test/ptbk.ts coder find-refactor-candidates
@@ -606,9 +609,11 @@ npx ptbk coder generate-boilerplates
606
609
 
607
610
  npx ptbk coder generate-boilerplates --template prompts/templates/common.md
608
611
 
609
- npx ptbk coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md
612
+ npx ptbk coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --test npm run test
613
+
614
+ npx ptbk coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --auto-push
610
615
 
611
- npx ptbk coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --ignore-git-changes --no-wait
616
+ npx ptbk coder run --agent github-copilot --model gpt-5.4 --thinking-level xhigh --context AGENTS.md --test npm run test --ignore-git-changes --no-wait
612
617
 
613
618
  npx ptbk coder find-refactor-candidates
614
619
 
@@ -625,7 +630,7 @@ npx ptbk coder verify
625
630
  | ------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | ------ | ---- | ----- | ------------------------------------------------------------------------ |
626
631
  | `ptbk coder init` | Creates `prompts/`, `prompts/done/`, the project-generic template files materialized in `prompts/templates/` (currently `common.md`), and a starter `AGENTS.md`; ensures `.env` contains `CODING_AGENT_GIT_NAME`, `CODING_AGENT_GIT_EMAIL`, and `CODING_AGENT_GIT_SIGNING_KEY`; adds helper coder scripts to `package.json`; ensures `.gitignore` contains `/.tmp`; and configures `.vscode/settings.json` to save pasted prompt images into `prompts/screenshots/`. |
627
632
  | `ptbk coder generate-boilerplates` | Creates new prompt markdown files with fresh emoji tags so you can quickly fill in coding tasks; `--template` accepts either a built-in alias or a markdown file path relative to the project root. |
628
- | `ptbk coder run` | Picks the next ready prompt, appends optional context, runs it through the selected coding agent, marks success or failure, then commits and pushes the result. |
633
+ | `ptbk coder run` | Picks the next ready prompt, appends optional context, runs it through the selected coding agent, can optionally verify each attempt with a shell test command and feed failing output back for retries, then marks success or failure, commits the result, and pushes only when `--auto-push` is enabled. |
629
634
  | `ptbk coder find-refactor-candidates` | Scans the repository for oversized or overpacked files and writes prompt files for likely refactors; `--level <xlow | low | medium | high | xhigh | extreme>` ranges from a very benevolent scan to a very aggressive sweep. |
630
635
  | `ptbk coder verify` | Walks through completed prompts, archives truly finished work, and adds follow-up repair prompts for unfinished results. |
631
636
 
@@ -636,12 +641,14 @@ npx ptbk coder verify
636
641
  | `--agent <name>` | Selects the coding backend. |
637
642
  | `--model <model>` | Chooses the runner model; required for `openai-codex` and `gemini`, optional for `github-copilot`. |
638
643
  | `--context <text-or-file>` | Appends extra instructions inline or from a file like `AGENTS.md`. |
644
+ | `--test <command>` | Runs a verification command after each prompt attempt and feeds failing output back for retries. |
639
645
  | `--thinking-level <level>` | Sets reasoning effort for supported runners. |
640
646
  | `--no-wait` | Skips interactive pauses between prompts for unattended execution. |
641
647
  | `--ignore-git-changes` | Disables the clean-working-tree guard. |
642
648
  | `--priority <n>` | Runs only prompts at or above the given priority. |
643
649
  | `--dry-run` | Prints which prompts are ready instead of executing them. |
644
650
  | `--allow-credits` | Lets OpenAI Codex spend credits when required. |
651
+ | `--auto-push` | Pushes each successful coding-agent commit to the configured remote. |
645
652
  | `--auto-migrate` | Runs testing-server database migrations after each successful prompt. |
646
653
 
647
654
  #### Typical usage pattern
package/esm/index.es.js CHANGED
@@ -40,7 +40,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
40
40
  * @generated
41
41
  * @see https://github.com/webgptorg/promptbook
42
42
  */
43
- const PROMPTBOOK_ENGINE_VERSION = '0.112.0-42';
43
+ const PROMPTBOOK_ENGINE_VERSION = '0.112.0-43';
44
44
  /**
45
45
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
46
46
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -10800,6 +10800,15 @@ const teamToolFunctions = {};
10800
10800
  * Map of team tool titles.
10801
10801
  */
10802
10802
  const teamToolTitles = {};
10803
+ /**
10804
+ * Shared TEAM usage rules appended ahead of teammate listings.
10805
+ *
10806
+ * @private
10807
+ */
10808
+ const TEAM_SYSTEM_MESSAGE_GUIDANCE_LINES = [
10809
+ '- If a teammate is relevant to the request, consult that teammate using the matching tool.',
10810
+ '- Do not ask the user for information that a listed teammate can provide directly.',
10811
+ ];
10803
10812
  /**
10804
10813
  * Constant for remote agents by Url.
10805
10814
  */
@@ -10889,12 +10898,9 @@ class TeamCommitmentDefinition extends BaseCommitmentDefinition {
10889
10898
  if (updatedTools.some((tool) => tool.name === entry.toolName)) {
10890
10899
  continue;
10891
10900
  }
10892
- const toolDescription = entry.description
10893
- ? `Consult teammate ${entry.teammate.label}\n${entry.description}`
10894
- : `Consult teammate ${entry.teammate.label}`;
10895
10901
  updatedTools.push({
10896
10902
  name: entry.toolName,
10897
- description: toolDescription,
10903
+ description: buildTeamToolDescription(entry),
10898
10904
  parameters: {
10899
10905
  type: 'object',
10900
10906
  properties: {
@@ -10963,22 +10969,72 @@ function resolveTeamTeammateLabels(teamContent, teammates) {
10963
10969
  /**
10964
10970
  * Builds the textual TEAM section body for the final system message.
10965
10971
  *
10966
- * Each teammate is listed with its tool name and, when available, a one-line description.
10967
- * Uses `spaceTrim` to ensure consistent whitespace and indentation.
10972
+ * Each teammate is listed with its tool name, TEAM instructions, and optional profile hints.
10968
10973
  */
10969
10974
  function buildTeamSystemMessageBody(teamEntries) {
10970
- const lines = teamEntries.map((entry, index) => {
10971
- const toolLine = `${index + 1}) ${entry.teammate.label} tool \`${entry.toolName}\``;
10972
- if (!entry.description) {
10973
- return toolLine;
10974
- }
10975
- return spaceTrim$1(`
10976
- ${toolLine}
10977
- ${entry.description}
10978
- `);
10979
- });
10975
+ const lines = [
10976
+ ...TEAM_SYSTEM_MESSAGE_GUIDANCE_LINES,
10977
+ '',
10978
+ ...teamEntries.map((entry, index) => {
10979
+ const toolLine = `${index + 1}) ${entry.teammate.label} tool \`${entry.toolName}\``;
10980
+ const detailLines = collectTeamEntryDetails(entry).map(formatTeamEntryDetailLine);
10981
+ return [toolLine, ...detailLines].join('\n');
10982
+ }),
10983
+ ];
10980
10984
  return lines.join('\n');
10981
10985
  }
10986
+ /**
10987
+ * Builds the model-visible description for one teammate tool.
10988
+ *
10989
+ * @private
10990
+ */
10991
+ function buildTeamToolDescription(entry) {
10992
+ const detailLines = collectTeamEntryDetails(entry).map(({ label, content }) => `${label}: ${content}`);
10993
+ return [`Consult teammate ${entry.teammate.label}`, ...detailLines].join('\n');
10994
+ }
10995
+ /**
10996
+ * Collects structured teammate details that should stay visible to the model.
10997
+ *
10998
+ * @private
10999
+ */
11000
+ function collectTeamEntryDetails(entry) {
11001
+ var _a;
11002
+ const details = [];
11003
+ const instructions = entry.teammate.instructions.trim();
11004
+ const description = ((_a = entry.description) === null || _a === void 0 ? void 0 : _a.trim()) || '';
11005
+ if (instructions) {
11006
+ details.push({
11007
+ label: 'TEAM instructions',
11008
+ content: instructions,
11009
+ });
11010
+ }
11011
+ if (description) {
11012
+ details.push({
11013
+ label: 'Profile',
11014
+ content: description,
11015
+ });
11016
+ }
11017
+ return details;
11018
+ }
11019
+ /**
11020
+ * Formats one teammate detail line for the TEAM system-message section.
11021
+ *
11022
+ * @private
11023
+ */
11024
+ function formatTeamEntryDetailLine(detail) {
11025
+ return indentMultilineText(`${detail.label}: ${detail.content}`, ' ');
11026
+ }
11027
+ /**
11028
+ * Indents all lines of one potentially multi-line text block.
11029
+ *
11030
+ * @private
11031
+ */
11032
+ function indentMultilineText(text, prefix) {
11033
+ return text
11034
+ .split('\n')
11035
+ .map((line) => `${prefix}${line}`)
11036
+ .join('\n');
11037
+ }
10982
11038
  /**
10983
11039
  * Registers tool function and title for a teammate tool.
10984
11040
  */
@@ -30916,7 +30972,7 @@ function createEmptyAgentModelRequirements() {
30916
30972
  systemMessage: '',
30917
30973
  promptSuffix: '',
30918
30974
  // modelName: 'gpt-5',
30919
- modelName: 'gemini-2.5-flash-lite',
30975
+ modelName: 'gpt-5.4-mini',
30920
30976
  temperature: 0.7,
30921
30977
  topP: 0.9,
30922
30978
  topK: 50,
@@ -35787,7 +35843,7 @@ class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
35787
35843
  /**
35788
35844
  * Constant for default agent kit model name.
35789
35845
  */
35790
- const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.4-nano';
35846
+ const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.4-mini';
35791
35847
  /**
35792
35848
  * Creates one structured log entry for streamed tool-call updates.
35793
35849
  *
@@ -37853,6 +37909,7 @@ class AgentLlmExecutionTools {
37853
37909
  * @param agentSource The agent source string that defines the agent's behavior
37854
37910
  */
37855
37911
  constructor(options) {
37912
+ var _a;
37856
37913
  this.options = options;
37857
37914
  /**
37858
37915
  * Cached model requirements to avoid re-parsing the agent source
@@ -37862,6 +37919,7 @@ class AgentLlmExecutionTools {
37862
37919
  * Cached parsed agent information
37863
37920
  */
37864
37921
  this._cachedAgentInfo = null;
37922
+ this.precomputedModelRequirements = (_a = options.precomputedModelRequirements) !== null && _a !== void 0 ? _a : null;
37865
37923
  }
37866
37924
  /**
37867
37925
  * Updates the agent source and clears the cache
@@ -37869,9 +37927,13 @@ class AgentLlmExecutionTools {
37869
37927
  * @param agentSource The new agent source string
37870
37928
  */
37871
37929
  updateAgentSource(agentSource) {
37930
+ if (this.options.agentSource === agentSource) {
37931
+ return;
37932
+ }
37872
37933
  this.options.agentSource = agentSource;
37873
37934
  this._cachedAgentInfo = null;
37874
37935
  this._cachedModelRequirements = null;
37936
+ this.precomputedModelRequirements = null;
37875
37937
  }
37876
37938
  /**
37877
37939
  * Get cached or parse agent information
@@ -37888,6 +37950,16 @@ class AgentLlmExecutionTools {
37888
37950
  * Note: [🐤] This is names `getModelRequirements` *(not `getAgentModelRequirements`)* because in future these two will be united
37889
37951
  */
37890
37952
  async getModelRequirements() {
37953
+ var _a, _b;
37954
+ if (this.precomputedModelRequirements !== null) {
37955
+ if (this.options.isVerbose) {
37956
+ console.info('[🤰]', 'Using precomputed agent model requirements', {
37957
+ agent: this.title,
37958
+ toolCount: (_b = (_a = this.precomputedModelRequirements.tools) === null || _a === void 0 ? void 0 : _a.length) !== null && _b !== void 0 ? _b : 0,
37959
+ });
37960
+ }
37961
+ return this.precomputedModelRequirements;
37962
+ }
37891
37963
  if (this._cachedModelRequirements === null) {
37892
37964
  const preparationStartedAtMs = Date.now();
37893
37965
  if (this.options.isVerbose) {
@@ -37997,6 +38069,7 @@ class AgentLlmExecutionTools {
37997
38069
  * Resolves agent requirements, attachments, and runtime overrides into one forwarded chat prompt.
37998
38070
  */
37999
38071
  async prepareChatPrompt(prompt) {
38072
+ var _a;
38000
38073
  const chatPrompt = this.requireChatPrompt(prompt);
38001
38074
  const { sanitizedRequirements, promptSuffix } = await this.getSanitizedAgentModelRequirements();
38002
38075
  const attachments = normalizeChatAttachments(chatPrompt.attachments);
@@ -38014,7 +38087,16 @@ class AgentLlmExecutionTools {
38014
38087
  mergedTools,
38015
38088
  knowledgeSourcesForAgent,
38016
38089
  });
38017
- console.log('!!!! promptWithAgentModelRequirements:', forwardedPrompt);
38090
+ if (this.options.isVerbose) {
38091
+ console.info('[🤰]', 'Prepared agent chat prompt', {
38092
+ agent: this.title,
38093
+ usedPrecomputedModelRequirements: this.precomputedModelRequirements !== null,
38094
+ toolNames: mergedTools.map((tool) => tool.name),
38095
+ knowledgeSourcesCount: (_a = knowledgeSourcesForAgent === null || knowledgeSourcesForAgent === void 0 ? void 0 : knowledgeSourcesForAgent.length) !== null && _a !== void 0 ? _a : 0,
38096
+ promptSuffixLength: promptSuffix.length,
38097
+ systemMessageLength: sanitizedRequirements.systemMessage.length,
38098
+ });
38099
+ }
38018
38100
  return {
38019
38101
  forwardedPrompt,
38020
38102
  sanitizedRequirements,
@@ -38201,6 +38283,7 @@ class AgentLlmExecutionTools {
38201
38283
  * Runs one prepared prompt through the deprecated OpenAI Assistant backend.
38202
38284
  */
38203
38285
  async callOpenAiAssistantChatModelStream(options) {
38286
+ var _a, _b, _c, _d;
38204
38287
  const assistant = await this.getOrPrepareOpenAiAssistant({
38205
38288
  llmTools: options.llmTools,
38206
38289
  originalPrompt: options.originalPrompt,
@@ -38208,7 +38291,14 @@ class AgentLlmExecutionTools {
38208
38291
  onProgress: options.onProgress,
38209
38292
  });
38210
38293
  const promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools = createOpenAiAssistantPrompt(options.preparedChatPrompt.forwardedPrompt);
38211
- console.log('!!!! promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools:', promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools);
38294
+ if (this.options.isVerbose) {
38295
+ console.info('[🤰]', 'Prepared OpenAI Assistant prompt', {
38296
+ agent: this.title,
38297
+ toolNames: (_b = (_a = promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools.modelRequirements.tools) === null || _a === void 0 ? void 0 : _a.map((tool) => tool.name)) !== null && _b !== void 0 ? _b : [],
38298
+ knowledgeSourcesCount: (_d = (_c = promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools.modelRequirements
38299
+ .knowledgeSources) === null || _c === void 0 ? void 0 : _c.length) !== null && _d !== void 0 ? _d : 0,
38300
+ });
38301
+ }
38212
38302
  return assistant.callChatModelStream(promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools, options.onProgress, options.streamOptions);
38213
38303
  }
38214
38304
  /**
@@ -38899,7 +38989,8 @@ class Agent extends AgentLlmExecutionTools {
38899
38989
  isVerbose: options.isVerbose,
38900
38990
  llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
38901
38991
  assistantPreparationMode: options.assistantPreparationMode,
38902
- agentSource: agentSource.value, // <- TODO: [🐱‍🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
38992
+ agentSource: agentSource.value,
38993
+ precomputedModelRequirements: options.precomputedModelRequirements,
38903
38994
  });
38904
38995
  this._agentName = undefined;
38905
38996
  /**
@@ -43162,7 +43253,7 @@ const ChatMessageItem = memo(
43162
43253
  // <- TODO: [🧠] Should we wrap more components in `React.memo`
43163
43254
  // Or make normal function from this?
43164
43255
  (props) => {
43165
- const { message, participant, participants, isLastMessage, onMessage, onActionButton, onQuickMessageButton, setExpandedMessageId, isExpanded, currentRating, handleRating, mode, isCopyButtonEnabled, isFeedbackEnabled, feedbackMode = 'stars', feedbackTranslations, timingTranslations, chatLocale, onCopy, onCreateAgent, toolTitles, teammates, onReplyToMessage, canReplyToMessage, teamAgentProfiles, CHAT_VISUAL_MODE = 'BUBBLE_MODE', onToolCallClick, onCitationClick, soundSystem, isSpeechPlaybackEnabled, elevenLabsVoiceId, chatUiTranslations, } = props;
43256
+ const { message, participant, participants, isLastMessage, onMessage, onActionButton, onQuickMessageButton, setExpandedMessageId, isExpanded, currentRating, handleRating, mode, isCopyButtonEnabled, isFeedbackEnabled, feedbackMode = 'stars', feedbackTranslations, timingTranslations, chatLocale, onCopy, onCreateAgent, toolTitles, teammates, onReplyToMessage, canReplyToMessage, teamAgentProfiles, CHAT_VISUAL_MODE = 'ARTICLE_MODE', onToolCallClick, onCitationClick, soundSystem, isSpeechPlaybackEnabled, elevenLabsVoiceId, chatUiTranslations, } = props;
43166
43257
  const { isComplete = true,
43167
43258
  // <- TODO: Destruct all `messages` properties like `isComplete`
43168
43259
  } = message;
@@ -46962,7 +47053,7 @@ function hasChatActions(postprocessedMessages, { onReset, newChatButtonHref, onU
46962
47053
  * @public exported from `@promptbook/components`
46963
47054
  */
46964
47055
  function Chat(props) {
46965
- const { title = 'Chat', messages, onChange, onMessage, onActionButton, onQuickMessageButton, onReplyToMessage, onCancelReply, onReset, resetRequiresConfirmation = true, newChatButtonHref, onFeedback, feedbackMode = 'stars', feedbackTranslations, timingTranslations, onFileUpload, chatLocale, speechRecognition, placeholderMessageContent, defaultMessage, enterBehavior, resolveEnterBehavior, children, className, style, isAiTextHumanizedAndPromptbookified = true, isVoiceCalling = false, isFocusedOnLoad, participants = [], canReplyToMessage, replyingToMessage, extraActions, actionsContainer, saveFormats, isSaveButtonEnabled = true, isCopyButtonEnabled = true, buttonColor: buttonColorRaw, onUseTemplate, onCreateAgent, toolTitles, teammates, teamAgentProfiles, visual, CHAT_VISUAL_MODE = 'BUBBLE_MODE', effectConfigs, soundSystem, speechRecognitionLanguage, isSpeechPlaybackEnabled = true, elevenLabsVoiceId, chatUiTranslations, } = props;
47056
+ const { title = 'Chat', messages, onChange, onMessage, onActionButton, onQuickMessageButton, onReplyToMessage, onCancelReply, onReset, resetRequiresConfirmation = true, newChatButtonHref, onFeedback, feedbackMode = 'stars', feedbackTranslations, timingTranslations, onFileUpload, chatLocale, speechRecognition, placeholderMessageContent, defaultMessage, enterBehavior, resolveEnterBehavior, children, className, style, isAiTextHumanizedAndPromptbookified = true, isVoiceCalling = false, isFocusedOnLoad, participants = [], canReplyToMessage, replyingToMessage, extraActions, actionsContainer, saveFormats, isSaveButtonEnabled = true, isCopyButtonEnabled = true, buttonColor: buttonColorRaw, onUseTemplate, onCreateAgent, toolTitles, teammates, teamAgentProfiles, visual, CHAT_VISUAL_MODE = 'ARTICLE_MODE', effectConfigs, soundSystem, speechRecognitionLanguage, isSpeechPlaybackEnabled = true, elevenLabsVoiceId, chatUiTranslations, } = props;
46966
47057
  const buttonColor = useMemo(() => Color.from(buttonColorRaw || '#0066cc'), [buttonColorRaw]);
46967
47058
  const agentParticipant = useMemo(() => participants.find((participant) => participant.name === 'AGENT'), [participants]);
46968
47059
  const postprocessedMessages = useChatPostprocessedMessages({