@jutge.org/toolkit 4.4.16 → 4.4.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +1 -0
  2. package/assets/problems/quizzes/demo-quiz.pbm/README.md +1 -0
  3. package/assets/problems/quizzes/demo-quiz.pbm/ca/award.png +0 -0
  4. package/assets/problems/quizzes/demo-quiz.pbm/ca/quiz.yml +1 -1
  5. package/assets/problems/quizzes/demo-quiz.pbm/ca/single-choice.yml +9 -1
  6. package/assets/problems/quizzes/demo-quiz.pbm/ca/some-drawing.svg +150 -0
  7. package/assets/problems/quizzes/demo-quiz.pbm/ca/some-image.png +0 -0
  8. package/assets/problems/quizzes/demo-quiz.pbm/en/quiz.yml +1 -1
  9. package/assets/problems/quizzes/demo-quiz.pbm/en/single-choice.yml +9 -1
  10. package/assets/problems/quizzes/demo-quiz.pbm/en/some-drawing.svg +150 -0
  11. package/assets/problems/quizzes/demo-quiz.pbm/en/some-image.png +0 -0
  12. package/assets/problems/quizzes/heroes-of-computation.pbm/README.md +11 -0
  13. package/assets/problems/quizzes/heroes-of-computation.pbm/en/handler.yml +1 -0
  14. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q01.yml +14 -0
  15. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q02.yml +14 -0
  16. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q03.yml +14 -0
  17. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q04.yml +15 -0
  18. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q05.yml +32 -0
  19. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q06.yml +12 -0
  20. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q07.yml +23 -0
  21. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q08.yml +14 -0
  22. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q09.yml +14 -0
  23. package/assets/problems/quizzes/heroes-of-computation.pbm/en/q10.yml +23 -0
  24. package/assets/problems/quizzes/heroes-of-computation.pbm/en/quiz.yml +48 -0
  25. package/assets/problems/quizzes/the-answer-is-42.pbm/en/quiz.yml +2 -0
  26. package/assets/python/pyexec.py +9 -2
  27. package/dist/index.js +479 -471
  28. package/docs/getting-started-guide.md +1 -1
  29. package/docs/login.md +0 -1
  30. package/docs/problem-anatomy.md +1 -1
  31. package/docs/quiz-anatomy.md +382 -0
  32. package/package.json +2 -1
  33. package/toolkit/ask.ts +1 -1
  34. package/toolkit/dummies.ts +5 -4
  35. package/toolkit/generate.ts +19 -16
  36. package/toolkit/lint.ts +3 -4
  37. package/toolkit/make.ts +20 -23
  38. package/toolkit/quiz.ts +49 -14
  39. package/toolkit/share.ts +5 -4
  40. package/toolkit/submit.ts +4 -1
@@ -1,6 +1,6 @@
1
1
  # Jutge Toolkit - Getting Started Guide
2
2
 
3
- Welcome to the New Jutge Toolkit! This guide will help you start using the toolkit to create, manage, and upload programming problems to Jutge.org. Information about problem formats will be provided in a separate document.
3
+ Welcome to the New Jutge Toolkit! This guide will help you start using the toolkit to create, manage, and upload programming problems to Jutge.org. For general problem structure and file formats, see [Problem anatomy](problem-anatomy.md); for quiz problems, see [Quiz anatomy](quiz-anatomy.md).
4
4
 
5
5
  ## What is Jutge Toolkit?
6
6
 
package/docs/login.md CHANGED
@@ -1 +0,0 @@
1
-
@@ -34,7 +34,7 @@ There are four types of problems in Jutge.org:
34
34
 
35
35
  - **Games**: Problems where users must implement an AI to play a game against other users or predefined strategies. These problems often involve turn-based gameplay and require users to implement specific functions to decide their moves. These are not covered in this document.
36
36
 
37
- - **Quizzes**: Problems where users must answer multiple-choice questions. These are not covered in this document.
37
+ - **Quizzes**: Problems where users must answer multiple-choice, fill-in, ordering, matching, or open-ended questions. See [Quiz anatomy](quiz-anatomy.md) for details.
38
38
 
39
39
  ## Problem structure
40
40
 
@@ -0,0 +1,382 @@
1
+ # Quiz anatomy
2
+
3
+ This document describes the anatomy of a **quiz problem** in [Jutge.org](https://jutge.org/). It explains the terminology used, the structure of a quiz, and the purpose of each file. Quiz problems are a type of problem where users answer multiple-choice, fill-in, ordering, matching, or open-ended questions instead of submitting a program. Quiz questions and answers may be randomized for each run. For the general structure of a problem folder (e.g. `.pbm` extension, `problem.yml`, statement files), see [Problem anatomy](problem-anatomy.md).
4
+
5
+ ## Terminology
6
+
7
+ A **quiz** is a problem whose handler is set to `quiz`. It is made of a **quiz root** and a list of **questions**. The quiz root is defined in `quiz.yml` and holds the quiz title, author, statement, whether questions are shuffled, and the list of questions with their scores. Each **question** is defined in a separate YAML file (e.g. `single-choice.yml`) and has a `type`: SingleChoice, MultipleChoice, FillIn, Ordering, Matching, or OpenQuestion.
8
+
9
+ Quiz content can be **localized**: the same quiz can have different `quiz.yml` and question files per language (e.g. under `en/` and `ca/`). The toolkit runs or lints the quiz for a given directory, so you typically run it from a language-specific subdirectory. Each language should live inside its own folder.
10
+
11
+ **Variable substitution** allows question text and options to depend on values generated at run time. If a question file is named `example.yml`, the toolkit looks for `example.py` in the same directory. When present, it runs the Python script with a random `seed` and collects the script’s global variables. Those variables can be referenced in the question YAML with `$name` or `${name}`. This makes it possible to have different numbers, strings, or options for each run while keeping the same correct answer logic (e.g. “What is $a + $b?” with `a` and `b` random).
12
+
13
+ **Scoring**: Each question has a `score` between 0–100, and the total of all question scores listed in `quiz.yml` must add up to 100. Users earn points for each question.
14
+
15
+ ## Quiz structure
16
+
17
+ A quiz lives inside a problem folder (e.g. a `.pbm`). Each language should live inside its own folder (e.g. `en/`, `ca/`) .
18
+
19
+ Example with multiple languages:
20
+
21
+ ```
22
+ problem_folder.pbm
23
+ ├── en
24
+ │ ├── handler.yml
25
+ │ ├── quiz.yml
26
+ │ ├── some-question.yml
27
+ │ ├── some-question.py
28
+ │ └── ...
29
+ ├── ca
30
+ │ ├── handler.yml
31
+ │ ├── quiz.yml
32
+ │ ├── some-question.yml
33
+ │ ├── some-question.py
34
+ │ └── ...
35
+ └── problem.yml
36
+ ```
37
+
38
+ You run or lint the quiz from the directory that contains `quiz.yml` (e.g. `jtk quiz run -d en` or `cd en && jtk quiz run`).
39
+
40
+ `yml` files are YAML (YAML Ain't Markup Language) files. YAML is a human-readable data-serialization language; see [YAML documentation](https://yaml.org/) for more details. Also, see [YAML multiline info](https://yaml-multiline.info/) for more details on how to write multiline strings in YAML.
41
+
42
+ Many items are written in Markdown. See [Markdown documentation](https://www.markdownguide.org/) for more details. In addition, you can use a small subset of LaTeX for mathematical expressions but these have to be enclosed between `·` signs, not standard `$` signs.
43
+
44
+ ## The `quiz.yml` file
45
+
46
+ The file `quiz.yml` defines the quiz root.
47
+
48
+ - `title`: Title of the quiz.
49
+ - `author`: Author of the quiz.
50
+ - `statement`: Short description or instructions shown for the quiz (Markdown).
51
+ - `questions`: List of question entries. Each entry has:
52
+ - `title`: Title of the question (e.g. for display in a table of contents).
53
+ - `file`: Base name of the question file, without the `.yml` extension (e.g. `question` for `question.yml`).
54
+ - `score`: Integer from 0 to 100. The sum of all `score` values in the list must be 100.
55
+ - `shuffle` (optional): Whether to shuffle the order of questions when running the quiz. Defaults to `false`.
56
+
57
+ ### Example
58
+
59
+ ```yaml
60
+ title: Demo quiz
61
+
62
+ author: Jordi Petit
63
+
64
+ statement: This quiz showcases the possibilities of the quiz problems at Jutge.org.
65
+
66
+ shuffle: true
67
+
68
+ questions:
69
+ - title: Single choice question
70
+ file: single-choice
71
+ score: 10
72
+
73
+ - title: Multiple choice question
74
+ file: multiple-choice
75
+ score: 10
76
+
77
+ - title: Fill in question
78
+ file: fill-in-1
79
+ score: 20
80
+
81
+ - title: Ordering question
82
+ file: ordering
83
+ score: 10
84
+
85
+ - title: Matchings question
86
+ file: matchings
87
+ score: 20
88
+ ```
89
+
90
+ ## Question types
91
+
92
+ Each question is stored in a YAML file whose name matches the `file` field in `quiz.yml` (e.g. `question.yml`). The file must contain a `type` field that identifies the kind of question. Variable substitution applies to text fields and options when a corresponding `.py` file exists. All question types support an optional `hide_score` (default `false`) and an optional `partial_answer` (default `false`).
93
+
94
+ The `partial_answer` option is set per question in the question YAML:
95
+
96
+ - If `partial_answer` is set to `false` (default), users get full points for that question only when their answer is completely correct; any mistake gives zero points for that question.
97
+
98
+ - If `partial_answer` is set to `true`, users can receive partial points for that question when the answer is partially correct (e.g. proportional to how many parts are right), and the response may still be marked as "correct" if at least one part is right.
99
+
100
+ The `hide_score` option is set per question in the question YAML. If set to `true`, the question score is not shown to the user.
101
+
102
+ ### SingleChoice
103
+
104
+ One correct option among several. Exactly one choice must have `correct: true`. Choices can be shuffled (optional `shuffle`, default `true`). Each choice can have an optional `hint`. Duplicate choice text is not allowed.
105
+
106
+ - `text`: Question text (supports `$var` and `${var}`).
107
+ - `choices`: List of `{ text, correct?, hint? }`. One and only one choice must have `correct: true`.
108
+ - `shuffle` (optional): Whether to shuffle choices. Defaults to `true`.
109
+ - `partial_answer` (optional): Whether to award partial credit for this question. Defaults to `false`.
110
+
111
+ Example:
112
+
113
+ ```yaml
114
+ type: SingleChoice
115
+
116
+ text: 'What is the result of the evaluation of `$a + $b`?'
117
+
118
+ choices:
119
+ - text: '`$s1`'
120
+ correct: true
121
+ hint: 'You did well'
122
+ - text: '`$s2`'
123
+ hint: 'Sorry...'
124
+ - text: '`$s3`'
125
+ ```
126
+
127
+ Variables `a`, `b`, `s1`, `s2`, `s3` would be produced by a `single-choice.py` script in the same directory.
128
+
129
+ ### MultipleChoice
130
+
131
+ Zero or more correct options. Multiple choices can have `correct: true`. Choices can be shuffled (optional `shuffle`, default `true`).
132
+
133
+ - `text`: Question text (supports variables).
134
+ - `choices`: List of `{ text, correct?, hint? }`.
135
+ - `shuffle` (optional): Whether to shuffle choices. Defaults to `true`.
136
+ - `partial_answer` (optional): Whether to award partial credit for this question. Defaults to `false`.
137
+
138
+ Example:
139
+
140
+ ```yaml
141
+ type: MultipleChoice
142
+
143
+ text: 'Which of the following expressions are `true`?'
144
+
145
+ choices:
146
+ - text: '`$s == $a + $b`'
147
+ correct: true
148
+ - text: '`$s != $a + $b`'
149
+ hint: 'Sorry...'
150
+ - text: '`$a + $b >= $s`'
151
+ correct: true
152
+ ```
153
+
154
+ ### FillIn
155
+
156
+ One or more blanks in a text or code block. Each blank is identified by a placeholder (e.g. `S1`, `XXXX`) and has a correct answer and optional options (dropdown). If `options` are given, the correct answer must be one of them.
157
+
158
+ - `text`: Question or instructions (supports variables).
159
+ - `context`: Text containing placeholders (e.g. `S1`, `S2`, `XXXX`). Placeholders are the keys in `items`.
160
+ - `items`: Map from placeholder name to an item object:
161
+ - `correct`: Correct answer (string).
162
+ - `options` (optional): List of strings; if present, the blank is shown as a dropdown and `correct` must be in this list.
163
+ - `maxlength` (optional): Max length for the answer. Defaults to 100.
164
+ - `placeholder` (optional): Placeholder text in the input (e.g. `"?"`).
165
+ - `ignorecase` (optional): Whether to ignore case when checking. Defaults to `true`.
166
+ - `trim` (optional): Whether to trim spaces. Defaults to `true`.
167
+ - `partial_answer` (optional): Whether this blank contributes to partial credit for the question. Defaults to `false`.
168
+ - `partial_answer` (optional, at question level): Whether to award partial credit for this question. Defaults to `false`.
169
+
170
+ Example with dropdowns:
171
+
172
+ ```yaml
173
+ type: FillIn
174
+
175
+ text: 'Fill in the blanks.'
176
+
177
+ context: |
178
+ A/An S1 is a self-contained step-by-step set of operations...
179
+ A/An S2 is a collection of instructions...
180
+
181
+ items:
182
+ S1:
183
+ correct: algorithm
184
+ options:
185
+ - program
186
+ - algorithm
187
+ - pseudocode
188
+ S2:
189
+ correct: program
190
+ options:
191
+ - program
192
+ - algorithm
193
+ ```
194
+
195
+ Example with free-text blanks (e.g. numeric):
196
+
197
+ ````yaml
198
+ type: FillIn
199
+
200
+ text: |
201
+ Fill in the blanks of the given program.
202
+
203
+ context: |
204
+ ```python
205
+ def sum (L):
206
+ s = XXXX
207
+ for x in L:
208
+ s = s YYYY x
209
+ return s
210
+ ```
211
+
212
+ items:
213
+ XXXX:
214
+ maxlength: 5
215
+ correct: 0
216
+ YYYY:
217
+ correct: '+'
218
+ options:
219
+ - '+'
220
+ - '-'
221
+ - '*'
222
+ - '/'
223
+ ````
224
+
225
+ ### Ordering
226
+
227
+ User must order a list of items (e.g. chronological order). Items can be shown in shuffled order (optional `shuffle`, default `true`).
228
+
229
+ - `text`: Question text (supports variables).
230
+ - `label`: Label for the list (e.g. “Programming language”).
231
+ - `items`: List of strings in the correct order.
232
+ - `shuffle` (optional): Whether to show items in random order. Defaults to `true`.
233
+ - `partial_answer` (optional): Whether to award partial credit for this question. Defaults to `false`.
234
+
235
+ Example:
236
+
237
+ ```yaml
238
+ type: Ordering
239
+
240
+ text: Drag and drop the item to order the programming languages by date of appearance (older on top).
241
+
242
+ label: Programming language
243
+
244
+ items:
245
+ - Fortran
246
+ - C
247
+ - C++
248
+ - Python
249
+ - Java
250
+ - Julia
251
+ ```
252
+
253
+ ### Matching
254
+
255
+ Two columns: user matches each left item with one right item. Left and right lists can be shuffled (optional `shuffle`, default `true`).
256
+
257
+ - `text`: Question text (supports variables).
258
+ - `labels`: Two strings, e.g. `["Countries", "Capitals"]`.
259
+ - `left`: List of strings (e.g. countries).
260
+ - `right`: List of strings (e.g. capitals), in the same order as `left` (left[i] matches right[i]).
261
+ - `shuffle` (optional): Whether to shuffle left and right columns. Defaults to `true`.
262
+ - `partial_answer` (optional): Whether to award partial credit for this question. Defaults to `false`.
263
+
264
+ Example:
265
+
266
+ ```yaml
267
+ type: Matching
268
+
269
+ text: Match the countries with their capitals.
270
+
271
+ labels:
272
+ - Countries
273
+ - Capitals
274
+
275
+ left:
276
+ - France
277
+ - Germany
278
+ - Spain
279
+ - Italy
280
+ - Andorra
281
+
282
+ right:
283
+ - Paris
284
+ - Berlin
285
+ - Madrid
286
+ - Rome
287
+ - Andorra la Vella
288
+ ```
289
+
290
+ ### OpenQuestion
291
+
292
+ Free-text answer with no automatic correction. Useful for open-ended or reflective answers.
293
+
294
+ - `text`: Question text (supports variables).
295
+ - `placeholder` (optional): Placeholder for the text area. Defaults to `""`. Supports variables.
296
+ - `partial_answer` (optional): Whether to award partial credit for this question. Defaults to `false`.
297
+
298
+ Example:
299
+
300
+ ```yaml
301
+ type: OpenQuestion
302
+
303
+ text: Talk about yourself.
304
+
305
+ placeholder: 'My name is **$name** and I want to pass this course.'
306
+ ```
307
+
308
+ The variable `name` can be set by an optional `open-ended.py` (or the same base name as the question file) in the same directory.
309
+
310
+ ## Variable substitution (`.py` files)
311
+
312
+ If a question file is named `example.yml`, the toolkit looks for `example.py` in the same directory. When present:
313
+
314
+ 1. The Python script is run with a given `seed` (passed as an argument by the toolkit) so that the run is reproducible.
315
+ 2. The script’s global variables (that are JSON-serializable and whose names do not start with `__`) are collected.
316
+ 3. In the question YAML, any string field that supports substitution can use `$name` or `${name}` to be replaced by the value of `name`.
317
+
318
+ This allows different runs to show different numbers or options while keeping the same correct answer (e.g. “What is $a + $b?” with `a` and `b` random and one choice `$s1 = a + b` marked correct).
319
+
320
+ Example `question.py`:
321
+
322
+ ```python
323
+ import random
324
+
325
+ a = random.randint(1, 10)
326
+ b = random.randint(1, 10)
327
+
328
+ s1 = a + b
329
+ s2 = a + b - 1
330
+ s3 = a + b + 1
331
+ s4 = a - b
332
+ s5 = a * b
333
+ ```
334
+
335
+ Used with a SingleChoice question where the correct answer is `$s1`, so each run has different numbers but the same structure.
336
+
337
+ ## The `handler.yml` file
338
+
339
+ For a quiz problem, the problem’s `handler.yml` must set the handler to `quiz`:
340
+
341
+ ```yaml
342
+ handler: quiz
343
+ ```
344
+
345
+ Other handler options (e.g. `std`, `graphic`) are for non-quiz problems. See [Problem anatomy — handler.yml](problem-anatomy.md#the-handleryml-file) for the full list of handler and option descriptions.
346
+
347
+ ## Linting, running and playing quizzes
348
+
349
+ From the toolkit CLI:
350
+
351
+ - `jtk quiz lint` — lint a quiz (validate `quiz.yml` and all referenced question YAML files):
352
+
353
+ ```bash
354
+ jtk quiz lint -d <directory>
355
+ ```
356
+
357
+ Use the directory that contains `quiz.yml` (e.g. the `en` subdirectory).
358
+
359
+ - `jtk quiz run` — run a quiz (build questions, apply variables, output JSON or YAML):
360
+
361
+ ```bash
362
+ jtk quiz run -d <directory> [-s <seed>] [-f json|yaml]
363
+ ```
364
+
365
+ If no seed is provided, a random one is used. Running the quiz applies variable substitution and, if `shuffle` is true, shuffles question order (and, per question, choices or ordering/matching items when their `shuffle` is true).
366
+
367
+ - `jtk quiz play` — play a quiz in the terminal:
368
+ ```bash
369
+ jtk quiz play -d <directory> [-i <input>] [-o <output>] [-s <seed>]
370
+ ```
371
+ If no seed is provided, a random one is used. Playing the quiz applies variable substitution and, if `shuffle` is true, shuffles question order (and, per question, choices or ordering/matching items when their `shuffle` is true).
372
+
373
+ ## Quick checklist
374
+
375
+ - Use a problem folder with `handler: quiz` in `handler.yml`.
376
+ - Place `quiz.yml` in the root (single-language) or in a per-language subdirectory (e.g. `en/`, `ca/`).
377
+ - Ensure every `file` in `quiz.yml` has a corresponding `file.yml` in the same directory.
378
+ - Ensure question scores in `quiz.yml` sum to 100.
379
+ - For SingleChoice, set exactly one choice with `correct: true`.
380
+ - For FillIn with `options`, ensure `correct` is in the `options` list.
381
+ - For Matching, ensure `left` and `right` have the same length and are in matching order.
382
+ - Use variable names in `$name` / `${name}` that are produced by the optional `file.py` script; the script is run with the toolkit’s seed for reproducibility.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@jutge.org/toolkit",
3
3
  "description": "Toolkit to prepare problems for Jutge.org",
4
- "version": "4.4.16",
4
+ "version": "4.4.19",
5
5
  "homepage": "https://jutge.org",
6
6
  "author": {
7
7
  "name": "Jutge.org",
@@ -72,6 +72,7 @@
72
72
  "chalk": "^5.6.2",
73
73
  "chokidar": "^5.0.0",
74
74
  "cli-highlight": "^2.1.11",
75
+ "cli-table3": "^0.6.5",
75
76
  "commander": "^14.0.3",
76
77
  "dayjs": "^1.11.19",
77
78
  "env-paths": "^4.0.0",
package/toolkit/ask.ts CHANGED
@@ -21,7 +21,7 @@ export const askCmd = new Command('ask')
21
21
  const docs = await loadDocumentation() // Load your markdown files
22
22
  const fullPrompt = `${docs}\n\nUser question: ${question}`
23
23
 
24
- const answer = await complete(jutge, model, "ask", systemPrompt, fullPrompt)
24
+ const answer = await complete(jutge, model, 'ask', systemPrompt, fullPrompt)
25
25
  await tui.markdown(answer)
26
26
  tui.warning(
27
27
  `This answer generated by JutgeAI using ${model} is not authoritative but we hope it will help you.`,
@@ -50,7 +50,6 @@ async function chooseCommand(
50
50
  return chooseCommand(program, path.slice(0, -1))
51
51
  }
52
52
 
53
-
54
53
  const newPath = [...path, chosen]
55
54
  const nextCmd = resolveCommand(program, newPath)!
56
55
  const nextSubs = getVisibleSubcommands(nextCmd)
@@ -91,10 +90,12 @@ async function promptForArgument(arg: Argument, existing: string[]): Promise<str
91
90
 
92
91
  const defaultStr =
93
92
  defaultVal != null
94
- ? (Array.isArray(defaultVal) ? defaultVal.join(' ') : String(defaultVal))
93
+ ? Array.isArray(defaultVal)
94
+ ? defaultVal.join(' ')
95
+ : String(defaultVal)
95
96
  : required
96
- ? undefined
97
- : ''
97
+ ? undefined
98
+ : ''
98
99
 
99
100
  const raw = await input({
100
101
  message,
@@ -30,7 +30,8 @@ generateCmd
30
30
  .command('problem')
31
31
  .description('Generate a problem with JutgeAI')
32
32
 
33
- .summary(`Generate a problem with JutgeAI
33
+ .summary(
34
+ `Generate a problem with JutgeAI
34
35
 
35
36
  Use this command to generate a problem with JutgeAI from a specification.
36
37
 
@@ -72,14 +73,10 @@ Problem generation needs a problem specification:
72
73
  - unless the --do-not-ask flag is given.
73
74
 
74
75
  Treat the generated problem as a starting draft. You should edit the problem directory manually after the generation.
75
- `)
76
-
77
- .addOption(
78
- new Option
79
- ('-k, --kind <kind>', 'problem kind')
80
- .default('io')
81
- .choices(['io', 'funcs'])
76
+ `,
82
77
  )
78
+
79
+ .addOption(new Option('-k, --kind <kind>', 'problem kind').default('io').choices(['io', 'funcs']))
83
80
  .option('-d, --directory <path>', 'output directory', 'new-problem.pbm')
84
81
  .option('-i, --input <path>', 'input specification file')
85
82
  .option('-o, --output <path>', 'output specification file')
@@ -89,7 +86,6 @@ Treat the generated problem as a starting draft. You should edit the problem dir
89
86
  .action(async ({ input, output, directory, model, doNotAsk, kind }) => {
90
87
  const jutge = await getLoggedInJutgeClient()
91
88
  await tui.section(`Generating ${kind} problem with JutgeAI`, async () => {
92
-
93
89
  if (await exists(directory)) {
94
90
  throw new Error(`Directory ${directory} already exists`)
95
91
  }
@@ -118,8 +114,8 @@ The original statement will be used as the source text for translation.
118
114
 
119
115
  Provide one or more target language from the following list:
120
116
  ${Object.entries(languageNames)
121
- .map(([key, name]) => ` - ${key}: ${name}`)
122
- .join('\n')}
117
+ .map(([key, name]) => ` - ${key}: ${name}`)
118
+ .join('\n')}
123
119
 
124
120
  The added translations will be saved in the problem directory overwrite possible existing files.`,
125
121
  )
@@ -160,7 +156,14 @@ The result is written to statement.<lang>.tex in the problem directory.`,
160
156
  .action(async (proglang, language, prompt, { directory, model }) => {
161
157
  const jutge = await getLoggedInJutgeClient()
162
158
  const problem = await newProblem(directory)
163
- await generateStatementFromSolution(jutge, model, problem, proglang, language, (prompt ?? '').trim() || undefined)
159
+ await generateStatementFromSolution(
160
+ jutge,
161
+ model,
162
+ problem,
163
+ proglang,
164
+ language,
165
+ (prompt ?? '').trim() || undefined,
166
+ )
164
167
  })
165
168
 
166
169
  generateCmd
@@ -174,8 +177,8 @@ The golden solution will be used as a reference for generating the alternatives.
174
177
 
175
178
  Provide one or more target programming languages from the following list:
176
179
  ${Object.entries(languageNames)
177
- .map(([key, name]) => ` - ${key}: ${name}`)
178
- .join('\n')}
180
+ .map(([key, name]) => ` - ${key}: ${name}`)
181
+ .join('\n')}
179
182
 
180
183
  The added solutions will be saved in the problem directory overwrite possible existing files.`,
181
184
  )
@@ -209,8 +212,8 @@ The main file for the golden solution will be used as a reference for generating
209
212
 
210
213
  Provide one or more target programming languages from the following list:
211
214
  ${Object.entries(languageNames)
212
- .map(([key, name]) => ` - ${key}: ${name}`)
213
- .join('\n')}
215
+ .map(([key, name]) => ` - ${key}: ${name}`)
216
+ .join('\n')}
214
217
 
215
218
  The added main files will be saved in the problem directory overwrite possible existing files.`,
216
219
  )
package/toolkit/lint.ts CHANGED
@@ -16,7 +16,8 @@ export async function printLintResults(results: LintResult[], directories: strin
16
16
  const errors = result.issues.filter((i) => i.severity === 'error')
17
17
  if (errors.length > 0) hasError = true
18
18
 
19
- const dirLabel = result.directory === directories[0] && results.length === 1 ? result.directory : result.directory
19
+ const dirLabel =
20
+ result.directory === directories[0] && results.length === 1 ? result.directory : result.directory
20
21
  if (result.issues.length === 0) {
21
22
  tui.print(chalk.green('✓') + ' ' + dirLabel + ' ' + chalk.gray('— no issues'))
22
23
  } else {
@@ -35,9 +36,7 @@ export async function printLintResults(results: LintResult[], directories: strin
35
36
  const totalErrors = results.reduce((s, r) => s + r.issues.filter((i) => i.severity === 'error').length, 0)
36
37
  const totalWarnings = results.reduce((s, r) => s + r.issues.filter((i) => i.severity === 'warning').length, 0)
37
38
  if (totalErrors > 0 || totalWarnings > 0) {
38
- tui.gray(
39
- `Total: ${totalErrors} error(s), ${totalWarnings} warning(s) across ${results.length} problem(s)`,
40
- )
39
+ tui.gray(`Total: ${totalErrors} error(s), ${totalWarnings} warning(s) across ${results.length} problem(s)`)
41
40
  }
42
41
  }
43
42
 
package/toolkit/make.ts CHANGED
@@ -243,27 +243,27 @@ async function runWatch(maker: Maker): Promise<void> {
243
243
  tui.print('╭───╮ ╭───╮ ╭───╮ ╭───╮ ')
244
244
  tui.print(
245
245
  '│ ' +
246
- chalk.bold('A') +
247
- ' │ │ ' +
248
- chalk.bold('L') +
249
- ' │ │ ' +
250
- chalk.bold('H') +
251
- ' │ │ ' +
252
- chalk.bold('Q') +
253
- ' │ ',
246
+ chalk.bold('A') +
247
+ ' │ │ ' +
248
+ chalk.bold('L') +
249
+ ' │ │ ' +
250
+ chalk.bold('H') +
251
+ ' │ │ ' +
252
+ chalk.bold('Q') +
253
+ ' │ ',
254
254
  )
255
255
  tui.print('╰───╯ ╰───╯ ╰───╯ ╰───╯ ')
256
256
  tui.print(' ♻️ 🔍 ❓ 🚫')
257
257
  tui.print(
258
258
  ' ' +
259
- chalk.blue('All') +
260
- ' ' +
261
- chalk.blue('Lint') +
262
- ' ' +
263
- chalk.blue('Help') +
264
- ' ' +
265
- chalk.blue('Quit') +
266
- ' ',
259
+ chalk.blue('All') +
260
+ ' ' +
261
+ chalk.blue('Lint') +
262
+ ' ' +
263
+ chalk.blue('Help') +
264
+ ' ' +
265
+ chalk.blue('Quit') +
266
+ ' ',
267
267
  )
268
268
  tui.print(chalk.gray('Waiting for changes or keypress...'))
269
269
  }
@@ -291,11 +291,7 @@ async function runWatch(maker: Maker): Promise<void> {
291
291
  }
292
292
  } else if (/^problem\.\w+\.tex$/.test(name)) {
293
293
  pending.statementTex = true
294
- } else if (
295
- name === 'handler.yml' ||
296
- name === 'problem.yml' ||
297
- /^problem\.\w+\.yml$/.test(name)
298
- ) {
294
+ } else if (name === 'handler.yml' || name === 'problem.yml' || /^problem\.\w+\.yml$/.test(name)) {
299
295
  pending.ymlSchema = true
300
296
  }
301
297
  scheduleRun()
@@ -309,7 +305,9 @@ async function runWatch(maker: Maker): Promise<void> {
309
305
  await new Promise<void>((resolveExit, rejectExit) => {
310
306
  function printHelp() {
311
307
  tui.print()
312
- tui.print('Under watch mode, the toolkit automatically rebuilds the necessary files in the problem directory when you make changes to your files.')
308
+ tui.print(
309
+ 'Under watch mode, the toolkit automatically rebuilds the necessary files in the problem directory when you make changes to your files.',
310
+ )
313
311
  tui.print(
314
312
  [
315
313
  'Key bindings:',
@@ -320,7 +318,6 @@ async function runWatch(maker: Maker): Promise<void> {
320
318
  ' Q Quit watch mode',
321
319
  '',
322
320
  ].join('\n'),
323
-
324
321
  )
325
322
  tui.print('The watch mode is under development. Please report any issues to the developers.')
326
323
  printIdleMessage()