@automattic/jetpack-ai-client 0.13.1 → 0.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,20 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.14.1] - 2024-05-27
9
+ ### Changed
10
+ - AI Client: Add paragraph tweaks to Markdown conversion libs. [#37461]
11
+ - AI Featured Image: add type info. [#37474]
12
+
13
+ ## [0.14.0] - 2024-05-20
14
+ ### Added
15
+ - AI Client: Expose HTML render rules type. [#37386]
16
+ - AI Featured Image: Support Stable Diffusion image generation. [#37413]
17
+
18
+ ### Changed
19
+ - AI Client: Change default behavior of Message components [#37365]
20
+ - Updated package dependencies. [#37379] [#37380]
21
+
8
22
  ## [0.13.1] - 2024-05-13
9
23
  ### Added
10
24
  - AI Client: Add className to AI Control component. [#37322]
@@ -314,6 +328,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
314
328
  - Updated package dependencies. [#31659]
315
329
  - Updated package dependencies. [#31785]
316
330
 
331
+ [0.14.1]: https://github.com/Automattic/jetpack-ai-client/compare/v0.14.0...v0.14.1
332
+ [0.14.0]: https://github.com/Automattic/jetpack-ai-client/compare/v0.13.1...v0.14.0
317
333
  [0.13.1]: https://github.com/Automattic/jetpack-ai-client/compare/v0.13.0...v0.13.1
318
334
  [0.13.0]: https://github.com/Automattic/jetpack-ai-client/compare/v0.12.4...v0.13.0
319
335
  [0.12.4]: https://github.com/Automattic/jetpack-ai-client/compare/v0.12.3...v0.12.4
@@ -0,0 +1,30 @@
1
+ import { AskQuestionOptionsArgProps } from './index.js';
2
+ import type { PromptProp } from '../types.js';
3
+ /**
4
+ * The response data from the AI assistant when doing a sync, not-streamed question.
5
+ */
6
+ export type ResponseData = {
7
+ choices: Array<{
8
+ message: {
9
+ content: string;
10
+ };
11
+ }>;
12
+ };
13
+ /**
14
+ * A function that asks a question without streaming.
15
+ *
16
+ * @param {PromptProp} question - The question to ask. It can be a simple string or an array of PromptMessageItemProps objects.
17
+ * @param {AskQuestionOptionsArgProps} options - An optional object for additional configuration: postId, feature, model.
18
+ * @returns {Promise<ResponseData>} - A promise that resolves to an instance of the ResponseData
19
+ * @example
20
+ * const question = "What is the meaning of life?";
21
+ * const options = {
22
+ * feature: 'ai-featured-image',
23
+ * model: 'gpt-4-turbo'
24
+ * }
25
+ * askQuestionSync( question, options ).then( responseData => {
26
+ * // access the choices array on the response data
27
+ * const content = responseData.choices[ 0 ].message.content;
28
+ * } );
29
+ */
30
+ export default function askQuestionSync(question: PromptProp, { postId, feature, model }?: AskQuestionOptionsArgProps): Promise<ResponseData>;
@@ -0,0 +1,66 @@
1
+ /**
2
+ * External dependencies
3
+ */
4
+ import debugFactory from 'debug';
5
+ /*
6
+ * Types & constants
7
+ */
8
+ import requestJwt from '../jwt/index.js';
9
+ const debug = debugFactory('jetpack-ai-client:ask-question-sync');
10
+ /**
11
+ * A function that asks a question without streaming.
12
+ *
13
+ * @param {PromptProp} question - The question to ask. It can be a simple string or an array of PromptMessageItemProps objects.
14
+ * @param {AskQuestionOptionsArgProps} options - An optional object for additional configuration: postId, feature, model.
15
+ * @returns {Promise<ResponseData>} - A promise that resolves to an instance of the ResponseData
16
+ * @example
17
+ * const question = "What is the meaning of life?";
18
+ * const options = {
19
+ * feature: 'ai-featured-image',
20
+ * model: 'gpt-4-turbo'
21
+ * }
22
+ * askQuestionSync( question, options ).then( responseData => {
23
+ * // access the choices array on the response data
24
+ * const content = responseData.choices[ 0 ].message.content;
25
+ * } );
26
+ */
27
+ export default async function askQuestionSync(question, { postId = null, feature, model } = {}) {
28
+ debug('Asking question with no streaming: %o. options: %o', question, {
29
+ postId,
30
+ feature,
31
+ model,
32
+ });
33
+ /**
34
+ * The URL to the AI assistant query endpoint.
35
+ */
36
+ const URL = 'https://public-api.wordpress.com/wpcom/v2/jetpack-ai-query';
37
+ let token = null;
38
+ try {
39
+ token = (await requestJwt()).token;
40
+ }
41
+ catch (error) {
42
+ debug('Error getting token: %o', error);
43
+ return Promise.reject(error);
44
+ }
45
+ const body = {
46
+ question: question,
47
+ stream: false,
48
+ postId,
49
+ feature,
50
+ model,
51
+ };
52
+ const headers = {
53
+ Authorization: `Bearer ${token}`,
54
+ 'Content-Type': 'application/json',
55
+ };
56
+ const data = await fetch(URL, {
57
+ method: 'POST',
58
+ headers,
59
+ body: JSON.stringify(body),
60
+ }).then(response => response.json());
61
+ if (data?.data?.status && data?.data?.status > 200) {
62
+ debug('Error generating prompt: %o', data);
63
+ return Promise.reject(data);
64
+ }
65
+ return data;
66
+ }
@@ -3,7 +3,7 @@ import './style.scss';
3
3
  /**
4
4
  * Types
5
5
  */
6
- import type { RequestingStateProp } from '../../types.js';
6
+ import type { RequestingErrorProps, RequestingStateProp } from '../../types.js';
7
7
  import type { ReactElement, MouseEvent } from 'react';
8
8
  type ExtensionAIControlProps = {
9
9
  className?: string;
@@ -14,7 +14,7 @@ type ExtensionAIControlProps = {
14
14
  isTransparent?: boolean;
15
15
  state?: RequestingStateProp;
16
16
  showGuideLine?: boolean;
17
- error?: string;
17
+ error?: RequestingErrorProps;
18
18
  requestsRemaining?: number;
19
19
  showUpgradeMessage?: boolean;
20
20
  wrapperRef?: React.MutableRefObject<HTMLDivElement | null>;
@@ -75,8 +75,8 @@ export function ExtensionAIControl({ className, disabled = false, value = '', pl
75
75
  });
76
76
  const actions = (_jsx(_Fragment, { children: loading ? (_jsx(Button, { className: "jetpack-components-ai-control__controls-prompt_button", onClick: stopHandler, variant: "secondary", label: __('Stop request', 'jetpack-ai-client'), children: showButtonLabels ? __('Stop', 'jetpack-ai-client') : _jsx(Icon, { icon: closeSmall }) })) : (_jsxs(_Fragment, { children: [value?.length > 0 && (_jsx("div", { className: "jetpack-components-ai-control__controls-prompt_button_wrapper", children: _jsx(Button, { className: "jetpack-components-ai-control__controls-prompt_button", onClick: sendHandler, variant: "primary", disabled: !value?.length || disabled, label: __('Send request', 'jetpack-ai-client'), children: showButtonLabels ? (__('Generate', 'jetpack-ai-client')) : (_jsx(Icon, { icon: arrowUp })) }) })), value?.length <= 0 && state === 'done' && (_jsx("div", { className: "jetpack-components-ai-control__controls-prompt_button_wrapper", children: _jsxs(ButtonGroup, { children: [_jsx(Button, { className: "jetpack-components-ai-control__controls-prompt_button", label: __('Undo', 'jetpack-ai-client'), onClick: undoHandler, tooltipPosition: "top", children: _jsx(Icon, { icon: undo }) }), _jsx(Button, { className: "jetpack-components-ai-control__controls-prompt_button", label: __('Close', 'jetpack-ai-client'), onClick: closeHandler, variant: "tertiary", children: __('Close', 'jetpack-ai-client') })] }) }))] })) }));
77
77
  let message = null;
78
- if (error) {
79
- message = _jsx(ErrorMessage, { error: error, onTryAgainClick: tryAgainHandler });
78
+ if (error?.message) {
79
+ message = (_jsx(ErrorMessage, { error: error.message, code: error.code, onTryAgainClick: tryAgainHandler, onUpgradeClick: upgradeHandler }));
80
80
  }
81
81
  else if (showUpgradeMessage) {
82
82
  message = (_jsx(UpgradeMessage, { requestsRemaining: requestsRemaining, onUpgradeClick: upgradeHandler }));
@@ -5,6 +5,7 @@ import './style.scss';
5
5
  /**
6
6
  * Types
7
7
  */
8
+ import type { SuggestionErrorCode } from '../../types.js';
8
9
  import type React from 'react';
9
10
  export declare const MESSAGE_SEVERITY_WARNING = "warning";
10
11
  export declare const MESSAGE_SEVERITY_ERROR = "error";
@@ -19,13 +20,17 @@ export type MessageProps = {
19
20
  onSidebarIconClick?: () => void;
20
21
  children: React.ReactNode;
21
22
  };
23
+ export type OnUpgradeClick = (event?: React.MouseEvent<HTMLButtonElement>) => void;
22
24
  export type UpgradeMessageProps = {
23
25
  requestsRemaining: number;
24
- onUpgradeClick: (event?: React.MouseEvent<HTMLButtonElement>) => void;
26
+ severity?: MessageSeverityProp;
27
+ onUpgradeClick: OnUpgradeClick;
25
28
  };
26
29
  export type ErrorMessageProps = {
27
30
  error?: string;
31
+ code?: SuggestionErrorCode;
28
32
  onTryAgainClick: () => void;
33
+ onUpgradeClick: OnUpgradeClick;
29
34
  };
30
35
  /**
31
36
  * React component to render a block message.
@@ -46,12 +51,12 @@ export declare function GuidelineMessage(): React.ReactElement;
46
51
  * @param {number} requestsRemaining - Number of requests remaining.
47
52
  * @returns {React.ReactElement } - Message component.
48
53
  */
49
- export declare function UpgradeMessage({ requestsRemaining, onUpgradeClick, }: UpgradeMessageProps): React.ReactElement;
54
+ export declare function UpgradeMessage({ requestsRemaining, severity, onUpgradeClick, }: UpgradeMessageProps): React.ReactElement;
50
55
  /**
51
56
  * React component to render an error message
52
57
  *
53
58
  * @param {number} requestsRemaining - Number of requests remaining.
54
59
  * @returns {React.ReactElement } - Message component.
55
60
  */
56
- export declare function ErrorMessage({ error, onTryAgainClick }: ErrorMessageProps): React.ReactElement;
61
+ export declare function ErrorMessage({ error, code, onTryAgainClick, onUpgradeClick, }: ErrorMessageProps): React.ReactElement;
57
62
  export {};
@@ -11,6 +11,7 @@ import classNames from 'classnames';
11
11
  */
12
12
  import './style.scss';
13
13
  import errorExclamation from '../../icons/error-exclamation.js';
14
+ import { ERROR_QUOTA_EXCEEDED } from '../../types.js';
14
15
  export const MESSAGE_SEVERITY_WARNING = 'warning';
15
16
  export const MESSAGE_SEVERITY_ERROR = 'error';
16
17
  export const MESSAGE_SEVERITY_SUCCESS = 'success';
@@ -50,8 +51,12 @@ export function GuidelineMessage() {
50
51
  * @param {number} requestsRemaining - Number of requests remaining.
51
52
  * @returns {React.ReactElement } - Message component.
52
53
  */
53
- export function UpgradeMessage({ requestsRemaining, onUpgradeClick, }) {
54
- return (_jsxs(Message, { severity: MESSAGE_SEVERITY_WARNING, children: [_jsx("span", { children: sprintf(
54
+ export function UpgradeMessage({ requestsRemaining, severity, onUpgradeClick, }) {
55
+ let messageSeverity = severity;
56
+ if (messageSeverity == null) {
57
+ messageSeverity = requestsRemaining > 0 ? MESSAGE_SEVERITY_INFO : MESSAGE_SEVERITY_WARNING;
58
+ }
59
+ return (_jsxs(Message, { severity: messageSeverity, children: [_jsx("span", { children: sprintf(
55
60
  // translators: %1$d: number of requests remaining
56
61
  __('You have %1$d free requests remaining.', 'jetpack-ai-client'), requestsRemaining) }), _jsx(Button, { variant: "link", onClick: onUpgradeClick, children: __('Upgrade now', 'jetpack-ai-client') })] }));
57
62
  }
@@ -61,9 +66,9 @@ export function UpgradeMessage({ requestsRemaining, onUpgradeClick, }) {
61
66
  * @param {number} requestsRemaining - Number of requests remaining.
62
67
  * @returns {React.ReactElement } - Message component.
63
68
  */
64
- export function ErrorMessage({ error, onTryAgainClick }) {
69
+ export function ErrorMessage({ error, code, onTryAgainClick, onUpgradeClick, }) {
65
70
  const errorMessage = error || __('Something went wrong', 'jetpack-ai-client');
66
71
  return (_jsxs(Message, { severity: MESSAGE_SEVERITY_ERROR, children: [_jsx("span", { children: sprintf(
67
72
  // translators: %1$d: A dynamic error message
68
- __('Error: %1$s', 'jetpack-ai-client'), errorMessage) }), _jsx(Button, { variant: "link", onClick: onTryAgainClick, children: __('Try Again', 'jetpack-ai-client') })] }));
73
+ __('Error: %1$s', 'jetpack-ai-client'), errorMessage) }), code === ERROR_QUOTA_EXCEEDED ? (_jsx(Button, { variant: "link", onClick: onUpgradeClick, children: __('Upgrade now', 'jetpack-ai-client') })) : (_jsx(Button, { variant: "link", onClick: onTryAgainClick, children: __('Try again', 'jetpack-ai-client') }))] }));
69
74
  }
@@ -1,13 +1,22 @@
1
+ /**
2
+ * The type of the response from the image generation API.
3
+ */
4
+ type ImageGenerationResponse = {
5
+ data: Array<{
6
+ [key: string]: string;
7
+ }>;
8
+ };
1
9
  declare const useImageGenerator: () => {
2
10
  generateImage: ({ feature, postContent, responseFormat, userPrompt, }: {
3
11
  feature: string;
4
12
  postContent: string;
5
13
  responseFormat?: 'url' | 'b64_json';
6
14
  userPrompt?: string;
7
- }) => Promise<{
8
- data: {
9
- [key: string]: string;
10
- }[];
11
- }>;
15
+ }) => Promise<ImageGenerationResponse>;
16
+ generateImageWithStableDiffusion: ({ feature, postContent, userPrompt, }: {
17
+ feature: string;
18
+ postContent: string;
19
+ userPrompt?: string;
20
+ }) => Promise<ImageGenerationResponse>;
12
21
  };
13
22
  export default useImageGenerator;
@@ -5,6 +5,7 @@ import debugFactory from 'debug';
5
5
  /**
6
6
  * Internal dependencies
7
7
  */
8
+ import askQuestionSync from '../../ask-question/sync.js';
8
9
  import requestJwt from '../../jwt/index.js';
9
10
  const debug = debugFactory('ai-client:use-image-generator');
10
11
  /**
@@ -27,7 +28,7 @@ const truncateContent = (content, currentPromptLength) => {
27
28
  * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated.
28
29
  * @returns {string} the prompt string
29
30
  */
30
- const getImageGenerationPrompt = (postContent, userPrompt) => {
31
+ const getDalleImageGenerationPrompt = (postContent, userPrompt) => {
31
32
  /**
32
33
  * If the user provide some custom prompt for the image generation,
33
34
  * we will use it, add the post content as additional context and
@@ -73,8 +74,62 @@ This is the post content:
73
74
  // truncating the content so the whole prompt is not longer than 4000 characters, the model limit.
74
75
  return imageGenerationPrompt + truncateContent(postContent, imageGenerationPrompt.length);
75
76
  };
77
+ /**
78
+ * Create the Stable Diffusion pre-processing prompt based on the provided context.
79
+ * @param {string} postContent - the content of the post.
80
+ * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated.
81
+ * @returns {string} the prompt string to be fed to the AI Assistant model.
82
+ */
83
+ const getStableDiffusionPreProcessingPrompt = (postContent, userPrompt) => {
84
+ /**
85
+ * If the user provide some custom prompt for the image generation,
86
+ * we will use it and add the post content as additional context.
87
+ */
88
+ if (userPrompt) {
89
+ const preProcessingPrompt = `I need a Stable Diffusion prompt to generate a featured image for a blog post based on this user-provided image description:
90
+
91
+ ${userPrompt.length > 1000 ? userPrompt.substring(0, 1000) : userPrompt}
92
+
93
+ The image should be a photo. Make sure you highlight the main suject of the image description, and include brief details about the light and style of the image.
94
+ Include a request to use high resolution and produce a highly detailed image, with sharp focus.
95
+ Return just the prompt, without comments.
96
+
97
+ For additional context, this is the post content:
98
+
99
+ `;
100
+ // truncating the content so the whole prompt is not longer than 4000 characters, the model limit.
101
+ return preProcessingPrompt + truncateContent(postContent, preProcessingPrompt.length);
102
+ }
103
+ /**
104
+ * When the user does not provide a custom prompt, we will use the
105
+ * standard one, based solely on the post content.
106
+ */
107
+ const preProcessingPrompt = `I need a Stable Diffusion prompt to generate a featured image for a blog post with the following content.
108
+ The image should be a photo. Make sure you highlight the main suject of the content, and include brief details about the light and style of the image.
109
+ Include a request to use high resolution and produce a highly detailed image, with sharp focus.
110
+ Return just the prompt, without comments. The content is:
111
+
112
+ `;
113
+ // truncating the content so the whole prompt is not longer than 4000 characters, the model limit.
114
+ return preProcessingPrompt + truncateContent(postContent, preProcessingPrompt.length);
115
+ };
116
+ /**
117
+ * Uses the Jetpack AI query endpoint to produce a prompt for the stable diffusion model.
118
+ * @param {string} postContent - the content of the post.
119
+ * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated
120
+ * @param {string} feature - the feature to be used for the image generation.
121
+ * @returns {string} the prompt string to be used on stable diffusion image generation.
122
+ */
123
+ const getStableDiffusionImageGenerationPrompt = async (postContent, userPrompt, feature) => {
124
+ const prompt = getStableDiffusionPreProcessingPrompt(postContent, userPrompt);
125
+ /**
126
+ * Request the prompt on the AI Assistant endpoint
127
+ */
128
+ const data = await askQuestionSync(prompt, { feature });
129
+ return data.choices?.[0]?.message?.content;
130
+ };
76
131
  const useImageGenerator = () => {
77
- const generateImage = async function ({ feature, postContent, responseFormat = 'url', userPrompt, }) {
132
+ const executeImageGeneration = async function (parameters) {
78
133
  let token = '';
79
134
  try {
80
135
  token = (await requestJwt()).token;
@@ -84,15 +139,7 @@ const useImageGenerator = () => {
84
139
  return Promise.reject(error);
85
140
  }
86
141
  try {
87
- debug('Generating image');
88
- const imageGenerationPrompt = getImageGenerationPrompt(postContent, userPrompt);
89
142
  const URL = 'https://public-api.wordpress.com/wpcom/v2/jetpack-ai-image';
90
- const body = {
91
- prompt: imageGenerationPrompt,
92
- response_format: responseFormat,
93
- feature,
94
- size: '1792x1024',
95
- };
96
143
  const headers = {
97
144
  Authorization: `Bearer ${token}`,
98
145
  'Content-Type': 'application/json',
@@ -100,7 +147,7 @@ const useImageGenerator = () => {
100
147
  const data = await fetch(URL, {
101
148
  method: 'POST',
102
149
  headers,
103
- body: JSON.stringify(body),
150
+ body: JSON.stringify(parameters),
104
151
  }).then(response => response.json());
105
152
  if (data?.data?.status && data?.data?.status > 200) {
106
153
  debug('Error generating image: %o', data);
@@ -113,8 +160,45 @@ const useImageGenerator = () => {
113
160
  return Promise.reject(error);
114
161
  }
115
162
  };
163
+ const generateImageWithStableDiffusion = async function ({ feature, postContent, userPrompt, }) {
164
+ try {
165
+ debug('Generating image with Stable Diffusion');
166
+ const prompt = await getStableDiffusionImageGenerationPrompt(postContent, userPrompt, feature);
167
+ const parameters = {
168
+ prompt,
169
+ feature,
170
+ model: 'stable-diffusion',
171
+ style: 'photographic',
172
+ };
173
+ const data = await executeImageGeneration(parameters);
174
+ return data;
175
+ }
176
+ catch (error) {
177
+ debug('Error generating image: %o', error);
178
+ return Promise.reject(error);
179
+ }
180
+ };
181
+ const generateImage = async function ({ feature, postContent, responseFormat = 'url', userPrompt, }) {
182
+ try {
183
+ debug('Generating image');
184
+ const imageGenerationPrompt = getDalleImageGenerationPrompt(postContent, userPrompt);
185
+ const parameters = {
186
+ prompt: imageGenerationPrompt,
187
+ response_format: responseFormat,
188
+ feature,
189
+ size: '1792x1024',
190
+ };
191
+ const data = await executeImageGeneration(parameters);
192
+ return data;
193
+ }
194
+ catch (error) {
195
+ debug('Error generating image: %o', error);
196
+ return Promise.reject(error);
197
+ }
198
+ };
116
199
  return {
117
200
  generateImage,
201
+ generateImageWithStableDiffusion,
118
202
  };
119
203
  };
120
204
  export default useImageGenerator;
@@ -1 +1,2 @@
1
1
  export { MarkdownToHTML, HTMLToMarkdown, renderHTMLFromMarkdown, renderMarkdownFromHTML, } from './markdown/index.js';
2
+ export type { RenderHTMLRules } from './markdown/index.js';
@@ -5,11 +5,19 @@ import TurndownService from 'turndown';
5
5
  /**
6
6
  * Types
7
7
  */
8
- import type { Options, Rule } from 'turndown';
8
+ import type { Options, Rule, Filter } from 'turndown';
9
+ export type Fix = 'paragraph';
9
10
  export default class HTMLToMarkdown {
10
11
  turndownService: TurndownService;
11
- constructor(options?: Options, rules?: {
12
- [key: string]: Rule;
12
+ fixes: Fix[];
13
+ constructor({ options, rules, keep, remove, fixes, }?: {
14
+ options?: Options;
15
+ rules?: {
16
+ [key: string]: Rule;
17
+ };
18
+ keep?: Filter;
19
+ remove?: Filter;
20
+ fixes?: Fix[];
13
21
  });
14
22
  /**
15
23
  * Renders HTML from Markdown content with specified processing rules.
@@ -2,6 +2,12 @@
2
2
  * External dependencies
3
3
  */
4
4
  import TurndownService from 'turndown';
5
+ const fixesList = {
6
+ paragraph: (content) => {
7
+ // Keep <br> tags to prevent paragraphs from being split
8
+ return content.replaceAll('\n', '<br />');
9
+ },
10
+ };
5
11
  const defaultTurndownOptions = { emDelimiter: '_', headingStyle: 'atx' };
6
12
  const defaultTurndownRules = {
7
13
  strikethrough: {
@@ -13,10 +19,15 @@ const defaultTurndownRules = {
13
19
  };
14
20
  export default class HTMLToMarkdown {
15
21
  turndownService;
16
- constructor(options = defaultTurndownOptions, rules = defaultTurndownRules) {
17
- this.turndownService = new TurndownService(options);
18
- for (const rule in rules) {
19
- this.turndownService.addRule(rule, rules[rule]);
22
+ fixes;
23
+ constructor({ options = {}, rules = {}, keep = [], remove = [], fixes = [], } = {}) {
24
+ this.fixes = fixes;
25
+ this.turndownService = new TurndownService({ ...defaultTurndownOptions, ...options });
26
+ this.turndownService.keep(keep);
27
+ this.turndownService.remove(remove);
28
+ const allRules = { ...defaultTurndownRules, ...rules };
29
+ for (const rule in allRules) {
30
+ this.turndownService.addRule(rule, allRules[rule]);
20
31
  }
21
32
  }
22
33
  /**
@@ -26,6 +37,9 @@ export default class HTMLToMarkdown {
26
37
  * @returns {string} The rendered Markdown content
27
38
  */
28
39
  render({ content }) {
29
- return this.turndownService.turndown(content);
40
+ const rendered = this.turndownService.turndown(content);
41
+ return this.fixes.reduce((renderedContent, fix) => {
42
+ return fixesList[fix](renderedContent);
43
+ }, rendered);
30
44
  }
31
45
  }
@@ -7,9 +7,10 @@ import MarkdownToHTML from './markdown-to-html.js';
7
7
  * Types
8
8
  */
9
9
  import type { Fix as HTMLFix } from './markdown-to-html.js';
10
+ export type RenderHTMLRules = 'all' | Array<HTMLFix>;
10
11
  declare const renderHTMLFromMarkdown: ({ content, rules, }: {
11
12
  content: string;
12
- rules?: Array<HTMLFix> | 'all';
13
+ rules?: RenderHTMLRules;
13
14
  }) => string;
14
15
  declare const renderMarkdownFromHTML: ({ content }: {
15
16
  content: string;
@@ -6,7 +6,7 @@ import MarkdownIt from 'markdown-it';
6
6
  * Types
7
7
  */
8
8
  import type { Options } from 'markdown-it';
9
- export type Fix = 'list';
9
+ export type Fix = 'list' | 'paragraph';
10
10
  export default class MarkdownToHTML {
11
11
  markdownConverter: MarkdownIt;
12
12
  constructor(options?: Options);
@@ -7,6 +7,10 @@ const fixes = {
7
7
  // Fix list indentation
8
8
  return content.replace(/<li>\s+<p>/g, '<li>').replace(/<\/p>\s+<\/li>/g, '</li>');
9
9
  },
10
+ paragraph: (content) => {
11
+ // Fix encoding of <br /> tags
12
+ return content.replaceAll(/\s*&lt;br \/&gt;\s*/g, '<br />');
13
+ },
10
14
  };
11
15
  const defaultMarkdownItOptions = {
12
16
  breaks: true,
package/build/types.d.ts CHANGED
@@ -39,3 +39,4 @@ export type Block = {
39
39
  originalContent?: string;
40
40
  };
41
41
  export type TranscriptionState = RecordingState | 'validating' | 'processing' | 'error';
42
+ export type { RenderHTMLRules } from './libs/index.js';
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "private": false,
3
3
  "name": "@automattic/jetpack-ai-client",
4
- "version": "0.13.1",
4
+ "version": "0.14.1",
5
5
  "description": "A JS client for consuming Jetpack AI services",
6
6
  "homepage": "https://github.com/Automattic/jetpack/tree/HEAD/projects/js-packages/ai-client/#readme",
7
7
  "bugs": {
@@ -42,24 +42,24 @@
42
42
  "main": "./build/index.js",
43
43
  "types": "./build/index.d.ts",
44
44
  "dependencies": {
45
- "@automattic/jetpack-base-styles": "^0.6.24",
46
- "@automattic/jetpack-connection": "^0.33.10",
47
- "@automattic/jetpack-shared-extension-utils": "^0.14.12",
45
+ "@automattic/jetpack-base-styles": "^0.6.25",
46
+ "@automattic/jetpack-connection": "^0.33.11",
47
+ "@automattic/jetpack-shared-extension-utils": "^0.14.13",
48
48
  "@microsoft/fetch-event-source": "2.0.1",
49
49
  "@types/react": "18.3.1",
50
- "@wordpress/api-fetch": "6.53.0",
51
- "@wordpress/block-editor": "12.24.0",
52
- "@wordpress/components": "27.4.0",
53
- "@wordpress/compose": "6.33.0",
54
- "@wordpress/data": "9.26.0",
55
- "@wordpress/element": "5.33.0",
56
- "@wordpress/i18n": "4.56.0",
57
- "@wordpress/icons": "9.47.0",
50
+ "@wordpress/api-fetch": "6.54.0",
51
+ "@wordpress/block-editor": "12.25.0",
52
+ "@wordpress/components": "27.5.0",
53
+ "@wordpress/compose": "6.34.0",
54
+ "@wordpress/data": "9.27.0",
55
+ "@wordpress/element": "5.34.0",
56
+ "@wordpress/i18n": "4.57.0",
57
+ "@wordpress/icons": "9.48.0",
58
58
  "classnames": "2.3.2",
59
59
  "debug": "4.3.4",
60
60
  "markdown-it": "14.0.0",
61
- "react": "18.2.0",
62
- "react-dom": "18.2.0",
61
+ "react": "18.3.1",
62
+ "react-dom": "18.3.1",
63
63
  "turndown": "7.1.2"
64
64
  }
65
65
  }
@@ -0,0 +1,91 @@
1
+ /**
2
+ * External dependencies
3
+ */
4
+ import debugFactory from 'debug';
5
+ /*
6
+ * Types & constants
7
+ */
8
+ import requestJwt from '../jwt/index.js';
9
+ import { AskQuestionOptionsArgProps } from './index.js';
10
+ import type { PromptProp } from '../types.js';
11
+
12
+ /**
13
+ * The response data from the AI assistant when doing a sync, not-streamed question.
14
+ */
15
+ export type ResponseData = {
16
+ choices: Array< {
17
+ message: {
18
+ content: string;
19
+ };
20
+ } >;
21
+ };
22
+
23
+ const debug = debugFactory( 'jetpack-ai-client:ask-question-sync' );
24
+
25
+ /**
26
+ * A function that asks a question without streaming.
27
+ *
28
+ * @param {PromptProp} question - The question to ask. It can be a simple string or an array of PromptMessageItemProps objects.
29
+ * @param {AskQuestionOptionsArgProps} options - An optional object for additional configuration: postId, feature, model.
30
+ * @returns {Promise<ResponseData>} - A promise that resolves to an instance of the ResponseData
31
+ * @example
32
+ * const question = "What is the meaning of life?";
33
+ * const options = {
34
+ * feature: 'ai-featured-image',
35
+ * model: 'gpt-4-turbo'
36
+ * }
37
+ * askQuestionSync( question, options ).then( responseData => {
38
+ * // access the choices array on the response data
39
+ * const content = responseData.choices[ 0 ].message.content;
40
+ * } );
41
+ */
42
+ export default async function askQuestionSync(
43
+ question: PromptProp,
44
+ { postId = null, feature, model }: AskQuestionOptionsArgProps = {}
45
+ ): Promise< ResponseData > {
46
+ debug( 'Asking question with no streaming: %o. options: %o', question, {
47
+ postId,
48
+ feature,
49
+ model,
50
+ } );
51
+
52
+ /**
53
+ * The URL to the AI assistant query endpoint.
54
+ */
55
+ const URL = 'https://public-api.wordpress.com/wpcom/v2/jetpack-ai-query';
56
+
57
+ let token = null;
58
+
59
+ try {
60
+ token = ( await requestJwt() ).token;
61
+ } catch ( error ) {
62
+ debug( 'Error getting token: %o', error );
63
+ return Promise.reject( error );
64
+ }
65
+
66
+ const body = {
67
+ question: question,
68
+ stream: false,
69
+ postId,
70
+ feature,
71
+ model,
72
+ };
73
+
74
+ const headers = {
75
+ Authorization: `Bearer ${ token }`,
76
+ 'Content-Type': 'application/json',
77
+ };
78
+
79
+ const data = await fetch( URL, {
80
+ method: 'POST',
81
+ headers,
82
+ body: JSON.stringify( body ),
83
+ } ).then( response => response.json() );
84
+
85
+ if ( data?.data?.status && data?.data?.status > 200 ) {
86
+ debug( 'Error generating prompt: %o', data );
87
+ return Promise.reject( data );
88
+ }
89
+
90
+ return data as ResponseData;
91
+ }
@@ -16,7 +16,7 @@ import './style.scss';
16
16
  /**
17
17
  * Types
18
18
  */
19
- import type { RequestingStateProp } from '../../types.js';
19
+ import type { RequestingErrorProps, RequestingStateProp } from '../../types.js';
20
20
  import type { ReactElement, MouseEvent } from 'react';
21
21
 
22
22
  type ExtensionAIControlProps = {
@@ -28,7 +28,7 @@ type ExtensionAIControlProps = {
28
28
  isTransparent?: boolean;
29
29
  state?: RequestingStateProp;
30
30
  showGuideLine?: boolean;
31
- error?: string;
31
+ error?: RequestingErrorProps;
32
32
  requestsRemaining?: number;
33
33
  showUpgradeMessage?: boolean;
34
34
  wrapperRef?: React.MutableRefObject< HTMLDivElement | null >;
@@ -202,8 +202,16 @@ export function ExtensionAIControl(
202
202
  );
203
203
 
204
204
  let message = null;
205
- if ( error ) {
206
- message = <ErrorMessage error={ error } onTryAgainClick={ tryAgainHandler } />;
205
+
206
+ if ( error?.message ) {
207
+ message = (
208
+ <ErrorMessage
209
+ error={ error.message }
210
+ code={ error.code }
211
+ onTryAgainClick={ tryAgainHandler }
212
+ onUpgradeClick={ upgradeHandler }
213
+ />
214
+ );
207
215
  } else if ( showUpgradeMessage ) {
208
216
  message = (
209
217
  <UpgradeMessage requestsRemaining={ requestsRemaining } onUpgradeClick={ upgradeHandler } />
@@ -10,9 +10,11 @@ import classNames from 'classnames';
10
10
  */
11
11
  import './style.scss';
12
12
  import errorExclamation from '../../icons/error-exclamation.js';
13
+ import { ERROR_QUOTA_EXCEEDED } from '../../types.js';
13
14
  /**
14
15
  * Types
15
16
  */
17
+ import type { SuggestionErrorCode } from '../../types.js';
16
18
  import type React from 'react';
17
19
 
18
20
  export const MESSAGE_SEVERITY_WARNING = 'warning';
@@ -37,14 +39,19 @@ export type MessageProps = {
37
39
  children: React.ReactNode;
38
40
  };
39
41
 
42
+ export type OnUpgradeClick = ( event?: React.MouseEvent< HTMLButtonElement > ) => void;
43
+
40
44
  export type UpgradeMessageProps = {
41
45
  requestsRemaining: number;
42
- onUpgradeClick: ( event?: React.MouseEvent< HTMLButtonElement > ) => void;
46
+ severity?: MessageSeverityProp;
47
+ onUpgradeClick: OnUpgradeClick;
43
48
  };
44
49
 
45
50
  export type ErrorMessageProps = {
46
51
  error?: string;
52
+ code?: SuggestionErrorCode;
47
53
  onTryAgainClick: () => void;
54
+ onUpgradeClick: OnUpgradeClick;
48
55
  };
49
56
 
50
57
  const messageIconsMap = {
@@ -113,10 +120,17 @@ export function GuidelineMessage(): React.ReactElement {
113
120
  */
114
121
  export function UpgradeMessage( {
115
122
  requestsRemaining,
123
+ severity,
116
124
  onUpgradeClick,
117
125
  }: UpgradeMessageProps ): React.ReactElement {
126
+ let messageSeverity = severity;
127
+
128
+ if ( messageSeverity == null ) {
129
+ messageSeverity = requestsRemaining > 0 ? MESSAGE_SEVERITY_INFO : MESSAGE_SEVERITY_WARNING;
130
+ }
131
+
118
132
  return (
119
- <Message severity={ MESSAGE_SEVERITY_WARNING }>
133
+ <Message severity={ messageSeverity }>
120
134
  <span>
121
135
  { sprintf(
122
136
  // translators: %1$d: number of requests remaining
@@ -137,7 +151,12 @@ export function UpgradeMessage( {
137
151
  * @param {number} requestsRemaining - Number of requests remaining.
138
152
  * @returns {React.ReactElement } - Message component.
139
153
  */
140
- export function ErrorMessage( { error, onTryAgainClick }: ErrorMessageProps ): React.ReactElement {
154
+ export function ErrorMessage( {
155
+ error,
156
+ code,
157
+ onTryAgainClick,
158
+ onUpgradeClick,
159
+ }: ErrorMessageProps ): React.ReactElement {
141
160
  const errorMessage = error || __( 'Something went wrong', 'jetpack-ai-client' );
142
161
 
143
162
  return (
@@ -149,9 +168,15 @@ export function ErrorMessage( { error, onTryAgainClick }: ErrorMessageProps ): R
149
168
  errorMessage
150
169
  ) }
151
170
  </span>
152
- <Button variant="link" onClick={ onTryAgainClick }>
153
- { __( 'Try Again', 'jetpack-ai-client' ) }
154
- </Button>
171
+ { code === ERROR_QUOTA_EXCEEDED ? (
172
+ <Button variant="link" onClick={ onUpgradeClick }>
173
+ { __( 'Upgrade now', 'jetpack-ai-client' ) }
174
+ </Button>
175
+ ) : (
176
+ <Button variant="link" onClick={ onTryAgainClick }>
177
+ { __( 'Try again', 'jetpack-ai-client' ) }
178
+ </Button>
179
+ ) }
155
180
  </Message>
156
181
  );
157
182
  }
@@ -5,10 +5,18 @@ import debugFactory from 'debug';
5
5
  /**
6
6
  * Internal dependencies
7
7
  */
8
+ import askQuestionSync from '../../ask-question/sync.js';
8
9
  import requestJwt from '../../jwt/index.js';
9
10
 
10
11
  const debug = debugFactory( 'ai-client:use-image-generator' );
11
12
 
13
+ /**
14
+ * The type of the response from the image generation API.
15
+ */
16
+ type ImageGenerationResponse = {
17
+ data: Array< { [ key: string ]: string } >;
18
+ };
19
+
12
20
  /**
13
21
  * Cut the post content on a given lenght so the total length of the prompt is not longer than 4000 characters.
14
22
  * @param {string} content - the content to be truncated
@@ -30,7 +38,7 @@ const truncateContent = ( content: string, currentPromptLength: number ): string
30
38
  * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated.
31
39
  * @returns {string} the prompt string
32
40
  */
33
- const getImageGenerationPrompt = ( postContent: string, userPrompt?: string ): string => {
41
+ const getDalleImageGenerationPrompt = ( postContent: string, userPrompt?: string ): string => {
34
42
  /**
35
43
  * If the user provide some custom prompt for the image generation,
36
44
  * we will use it, add the post content as additional context and
@@ -78,18 +86,77 @@ This is the post content:
78
86
  return imageGenerationPrompt + truncateContent( postContent, imageGenerationPrompt.length );
79
87
  };
80
88
 
89
+ /**
90
+ * Create the Stable Diffusion pre-processing prompt based on the provided context.
91
+ * @param {string} postContent - the content of the post.
92
+ * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated.
93
+ * @returns {string} the prompt string to be fed to the AI Assistant model.
94
+ */
95
+ const getStableDiffusionPreProcessingPrompt = (
96
+ postContent: string,
97
+ userPrompt?: string
98
+ ): string => {
99
+ /**
100
+ * If the user provide some custom prompt for the image generation,
101
+ * we will use it and add the post content as additional context.
102
+ */
103
+ if ( userPrompt ) {
104
+ const preProcessingPrompt = `I need a Stable Diffusion prompt to generate a featured image for a blog post based on this user-provided image description:
105
+
106
+ ${ userPrompt.length > 1000 ? userPrompt.substring( 0, 1000 ) : userPrompt }
107
+
108
+ The image should be a photo. Make sure you highlight the main suject of the image description, and include brief details about the light and style of the image.
109
+ Include a request to use high resolution and produce a highly detailed image, with sharp focus.
110
+ Return just the prompt, without comments.
111
+
112
+ For additional context, this is the post content:
113
+
114
+ `;
115
+ // truncating the content so the whole prompt is not longer than 4000 characters, the model limit.
116
+ return preProcessingPrompt + truncateContent( postContent, preProcessingPrompt.length );
117
+ }
118
+
119
+ /**
120
+ * When the user does not provide a custom prompt, we will use the
121
+ * standard one, based solely on the post content.
122
+ */
123
+ const preProcessingPrompt = `I need a Stable Diffusion prompt to generate a featured image for a blog post with the following content.
124
+ The image should be a photo. Make sure you highlight the main suject of the content, and include brief details about the light and style of the image.
125
+ Include a request to use high resolution and produce a highly detailed image, with sharp focus.
126
+ Return just the prompt, without comments. The content is:
127
+
128
+ `;
129
+
130
+ // truncating the content so the whole prompt is not longer than 4000 characters, the model limit.
131
+ return preProcessingPrompt + truncateContent( postContent, preProcessingPrompt.length );
132
+ };
133
+
134
+ /**
135
+ * Uses the Jetpack AI query endpoint to produce a prompt for the stable diffusion model.
136
+ * @param {string} postContent - the content of the post.
137
+ * @param {string} userPrompt - the user prompt for the image generation, if provided. Max length is 1000 characters, will be truncated
138
+ * @param {string} feature - the feature to be used for the image generation.
139
+ * @returns {string} the prompt string to be used on stable diffusion image generation.
140
+ */
141
+ const getStableDiffusionImageGenerationPrompt = async (
142
+ postContent: string,
143
+ userPrompt?: string,
144
+ feature?: string
145
+ ): Promise< string > => {
146
+ const prompt = getStableDiffusionPreProcessingPrompt( postContent, userPrompt );
147
+
148
+ /**
149
+ * Request the prompt on the AI Assistant endpoint
150
+ */
151
+ const data = await askQuestionSync( prompt, { feature } );
152
+
153
+ return data.choices?.[ 0 ]?.message?.content;
154
+ };
155
+
81
156
  const useImageGenerator = () => {
82
- const generateImage = async function ( {
83
- feature,
84
- postContent,
85
- responseFormat = 'url',
86
- userPrompt,
87
- }: {
88
- feature: string;
89
- postContent: string;
90
- responseFormat?: 'url' | 'b64_json';
91
- userPrompt?: string;
92
- } ): Promise< { data: Array< { [ key: string ]: string } > } > {
157
+ const executeImageGeneration = async function ( parameters: {
158
+ [ key: string ]: string;
159
+ } ): Promise< ImageGenerationResponse > {
93
160
  let token = '';
94
161
 
95
162
  try {
@@ -100,19 +167,8 @@ const useImageGenerator = () => {
100
167
  }
101
168
 
102
169
  try {
103
- debug( 'Generating image' );
104
-
105
- const imageGenerationPrompt = getImageGenerationPrompt( postContent, userPrompt );
106
-
107
170
  const URL = 'https://public-api.wordpress.com/wpcom/v2/jetpack-ai-image';
108
171
 
109
- const body = {
110
- prompt: imageGenerationPrompt,
111
- response_format: responseFormat,
112
- feature,
113
- size: '1792x1024',
114
- };
115
-
116
172
  const headers = {
117
173
  Authorization: `Bearer ${ token }`,
118
174
  'Content-Type': 'application/json',
@@ -121,7 +177,7 @@ const useImageGenerator = () => {
121
177
  const data = await fetch( URL, {
122
178
  method: 'POST',
123
179
  headers,
124
- body: JSON.stringify( body ),
180
+ body: JSON.stringify( parameters ),
125
181
  } ).then( response => response.json() );
126
182
 
127
183
  if ( data?.data?.status && data?.data?.status > 200 ) {
@@ -129,7 +185,71 @@ const useImageGenerator = () => {
129
185
  return Promise.reject( data );
130
186
  }
131
187
 
132
- return data as { data: { [ key: string ]: string }[] };
188
+ return data as ImageGenerationResponse;
189
+ } catch ( error ) {
190
+ debug( 'Error generating image: %o', error );
191
+ return Promise.reject( error );
192
+ }
193
+ };
194
+
195
+ const generateImageWithStableDiffusion = async function ( {
196
+ feature,
197
+ postContent,
198
+ userPrompt,
199
+ }: {
200
+ feature: string;
201
+ postContent: string;
202
+ userPrompt?: string;
203
+ } ): Promise< ImageGenerationResponse > {
204
+ try {
205
+ debug( 'Generating image with Stable Diffusion' );
206
+
207
+ const prompt = await getStableDiffusionImageGenerationPrompt(
208
+ postContent,
209
+ userPrompt,
210
+ feature
211
+ );
212
+
213
+ const parameters = {
214
+ prompt,
215
+ feature,
216
+ model: 'stable-diffusion',
217
+ style: 'photographic',
218
+ };
219
+
220
+ const data: ImageGenerationResponse = await executeImageGeneration( parameters );
221
+ return data;
222
+ } catch ( error ) {
223
+ debug( 'Error generating image: %o', error );
224
+ return Promise.reject( error );
225
+ }
226
+ };
227
+
228
+ const generateImage = async function ( {
229
+ feature,
230
+ postContent,
231
+ responseFormat = 'url',
232
+ userPrompt,
233
+ }: {
234
+ feature: string;
235
+ postContent: string;
236
+ responseFormat?: 'url' | 'b64_json';
237
+ userPrompt?: string;
238
+ } ): Promise< ImageGenerationResponse > {
239
+ try {
240
+ debug( 'Generating image' );
241
+
242
+ const imageGenerationPrompt = getDalleImageGenerationPrompt( postContent, userPrompt );
243
+
244
+ const parameters = {
245
+ prompt: imageGenerationPrompt,
246
+ response_format: responseFormat,
247
+ feature,
248
+ size: '1792x1024',
249
+ };
250
+
251
+ const data: ImageGenerationResponse = await executeImageGeneration( parameters );
252
+ return data;
133
253
  } catch ( error ) {
134
254
  debug( 'Error generating image: %o', error );
135
255
  return Promise.reject( error );
@@ -138,6 +258,7 @@ const useImageGenerator = () => {
138
258
 
139
259
  return {
140
260
  generateImage,
261
+ generateImageWithStableDiffusion,
141
262
  };
142
263
  };
143
264
 
package/src/libs/index.ts CHANGED
@@ -4,3 +4,5 @@ export {
4
4
  renderHTMLFromMarkdown,
5
5
  renderMarkdownFromHTML,
6
6
  } from './markdown/index.js';
7
+
8
+ export type { RenderHTMLRules } from './markdown/index.js';
@@ -35,7 +35,7 @@ const rules = {
35
35
  }
36
36
  }
37
37
  };
38
- const renderer = new HTMLToMarkdown( options, rules );
38
+ const renderer = new HTMLToMarkdown( { options, rules } );
39
39
  const markdownContent = renderer.render( { content: htmlContent } );
40
40
  // ***Hello world***
41
41
  ```
@@ -5,7 +5,19 @@ import TurndownService from 'turndown';
5
5
  /**
6
6
  * Types
7
7
  */
8
- import type { Options, Rule } from 'turndown';
8
+ import type { Options, Rule, Filter } from 'turndown';
9
+
10
+ export type Fix = 'paragraph';
11
+ type Fixes = {
12
+ [ key in Fix ]: ( content: string ) => string;
13
+ };
14
+
15
+ const fixesList: Fixes = {
16
+ paragraph: ( content: string ) => {
17
+ // Keep <br> tags to prevent paragraphs from being split
18
+ return content.replaceAll( '\n', '<br />' );
19
+ },
20
+ };
9
21
 
10
22
  const defaultTurndownOptions: Options = { emDelimiter: '_', headingStyle: 'atx' };
11
23
  const defaultTurndownRules: { [ key: string ]: Rule } = {
@@ -19,14 +31,29 @@ const defaultTurndownRules: { [ key: string ]: Rule } = {
19
31
 
20
32
  export default class HTMLToMarkdown {
21
33
  turndownService: TurndownService;
34
+ fixes: Fix[];
35
+
36
+ constructor( {
37
+ options = {},
38
+ rules = {},
39
+ keep = [],
40
+ remove = [],
41
+ fixes = [],
42
+ }: {
43
+ options?: Options;
44
+ rules?: { [ key: string ]: Rule };
45
+ keep?: Filter;
46
+ remove?: Filter;
47
+ fixes?: Fix[];
48
+ } = {} ) {
49
+ this.fixes = fixes;
50
+ this.turndownService = new TurndownService( { ...defaultTurndownOptions, ...options } );
51
+ this.turndownService.keep( keep );
52
+ this.turndownService.remove( remove );
22
53
 
23
- constructor(
24
- options: Options = defaultTurndownOptions,
25
- rules: { [ key: string ]: Rule } = defaultTurndownRules
26
- ) {
27
- this.turndownService = new TurndownService( options );
28
- for ( const rule in rules ) {
29
- this.turndownService.addRule( rule, rules[ rule ] );
54
+ const allRules = { ...defaultTurndownRules, ...rules };
55
+ for ( const rule in allRules ) {
56
+ this.turndownService.addRule( rule, allRules[ rule ] );
30
57
  }
31
58
  }
32
59
 
@@ -37,6 +64,10 @@ export default class HTMLToMarkdown {
37
64
  * @returns {string} The rendered Markdown content
38
65
  */
39
66
  render( { content }: { content: string } ): string {
40
- return this.turndownService.turndown( content );
67
+ const rendered = this.turndownService.turndown( content );
68
+
69
+ return this.fixes.reduce( ( renderedContent, fix ) => {
70
+ return fixesList[ fix ]( renderedContent );
71
+ }, rendered );
41
72
  }
42
73
  }
@@ -11,12 +11,14 @@ import type { Fix as HTMLFix } from './markdown-to-html.js';
11
11
  const defaultMarkdownConverter = new MarkdownToHTML();
12
12
  const defaultHTMLConverter = new HTMLToMarkdown();
13
13
 
14
+ export type RenderHTMLRules = 'all' | Array< HTMLFix >;
15
+
14
16
  const renderHTMLFromMarkdown = ( {
15
17
  content,
16
18
  rules = 'all',
17
19
  }: {
18
20
  content: string;
19
- rules?: Array< HTMLFix > | 'all';
21
+ rules?: RenderHTMLRules;
20
22
  } ) => {
21
23
  return defaultMarkdownConverter.render( { content, rules } );
22
24
  };
@@ -7,7 +7,7 @@ import MarkdownIt from 'markdown-it';
7
7
  */
8
8
  import type { Options } from 'markdown-it';
9
9
 
10
- export type Fix = 'list';
10
+ export type Fix = 'list' | 'paragraph';
11
11
  type Fixes = {
12
12
  [ key in Fix ]: ( content: string ) => string;
13
13
  };
@@ -17,6 +17,10 @@ const fixes: Fixes = {
17
17
  // Fix list indentation
18
18
  return content.replace( /<li>\s+<p>/g, '<li>' ).replace( /<\/p>\s+<\/li>/g, '</li>' );
19
19
  },
20
+ paragraph: ( content: string ) => {
21
+ // Fix encoding of <br /> tags
22
+ return content.replaceAll( /\s*&lt;br \/&gt;\s*/g, '<br />' );
23
+ },
20
24
  };
21
25
 
22
26
  const defaultMarkdownItOptions: Options = {
package/src/types.ts CHANGED
@@ -108,3 +108,8 @@ export type Block = {
108
108
  * Transcription types
109
109
  */
110
110
  export type TranscriptionState = RecordingState | 'validating' | 'processing' | 'error';
111
+
112
+ /*
113
+ * Lib types
114
+ */
115
+ export type { RenderHTMLRules } from './libs/index.js';