monadic-chat 0.1.2 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9949be7f2447dd466b5cfff5b7c13615f1348ca8d2dd9938a28fca68a48cc4e5
4
- data.tar.gz: 462c1f5af82cb50bd577305a782cad7499f1a7ce05b156491539546b5dfadba2
3
+ metadata.gz: 5ee0dcb302392e3e503b2f26a777953956a998244634c5935fc5be3e2316dc28
4
+ data.tar.gz: 1a42d5a6c02552ee676f89e338a8f7ff550b1905497cce42d0ba55e54c28038d
5
5
  SHA512:
6
- metadata.gz: affcd2df90174f9e85678234e11c0b08c3c486d793c87792d2bc676e3f02d4ec846a58e716bb86ec82f5b2e37ed90f14b61aacb3e93c37c0ba6fece31d4fce1c
7
- data.tar.gz: a4126ffff2679f31bcc000a05620f5914a55188f61e0f35501428888a3445ef6c11775602eed74ee029566b18ea1f8526277a14839b342edb01824fd2b121797
6
+ metadata.gz: 1bca191c52aad2d0b2fd0de6dad2a806046e57546e038b9905d19a78e58047c3cd2e44dbc9fed69b88e648e3970290130d4350ec3fb94aa59b28baf8b07fbe46
7
+ data.tar.gz: 7e9e8a1752c11ff3a50de51ab20ab7f77c4a85b46c15134e253b42f6522b30be15b4cbb8cb8bb9ed52c96588a36d9c409cfb75cc14f28f686279324f468d1ded
data/.ruby-version ADDED
@@ -0,0 +1 @@
1
+ 3.2.0
data/CHANGELOG.md CHANGED
@@ -4,10 +4,12 @@
4
4
 
5
5
  - Initial commit (private)
6
6
 
7
- ## [0.1.1] - 2023-03-12
7
+ ## [0.1.3] - 2023-03-12
8
8
 
9
9
  - Public release
10
+ - Authentication problem fixed
10
11
 
11
- ## [0.1.2] - 2023-03-12
12
+ ## [0.2.0] - 2023-03-13
12
13
 
13
- - Authentication problem fixed
14
+ - Research mode architecture changed
15
+ - Stability improvement
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- monadic-chat (0.1.0)
4
+ monadic-chat (0.2.0)
5
5
  http
6
6
  kramdown
7
7
  launchy
@@ -14,9 +14,7 @@ PATH
14
14
  tty-markdown
15
15
  tty-progressbar
16
16
  tty-prompt
17
- tty-reader
18
17
  tty-screen
19
- tty-spinner
20
18
 
21
19
  GEM
22
20
  remote: https://rubygems.org/
@@ -53,9 +51,7 @@ GEM
53
51
  llhttp-ffi (0.4.0)
54
52
  ffi-compiler (~> 1.0)
55
53
  rake (~> 13.0)
56
- mini_portile2 (2.8.1)
57
- nokogiri (1.14.2)
58
- mini_portile2 (~> 2.8.0)
54
+ nokogiri (1.14.2-x86_64-darwin)
59
55
  racc (~> 1.4)
60
56
  oj (3.14.2)
61
57
  parallel (1.22.1)
@@ -71,7 +67,7 @@ GEM
71
67
  reverse_markdown (2.1.1)
72
68
  nokogiri
73
69
  rexml (3.2.5)
74
- rouge (3.30.0)
70
+ rouge (4.1.0)
75
71
  rspec (3.12.0)
76
72
  rspec-core (~> 3.12.0)
77
73
  rspec-expectations (~> 3.12.0)
@@ -81,7 +77,7 @@ GEM
81
77
  rspec-expectations (3.12.2)
82
78
  diff-lcs (>= 1.2.0, < 2.0)
83
79
  rspec-support (~> 3.12.0)
84
- rspec-mocks (3.12.3)
80
+ rspec-mocks (3.12.4)
85
81
  diff-lcs (>= 1.2.0, < 2.0)
86
82
  rspec-support (~> 3.12.0)
87
83
  rspec-support (3.12.0)
@@ -126,10 +122,10 @@ GEM
126
122
  tty-cursor (~> 0.7)
127
123
  tty-color (0.6.0)
128
124
  tty-cursor (0.7.1)
129
- tty-markdown (0.7.1)
125
+ tty-markdown (0.7.2)
130
126
  kramdown (>= 1.16.2, < 3.0)
131
127
  pastel (~> 0.8)
132
- rouge (~> 3.14)
128
+ rouge (>= 3.14, < 5.0)
133
129
  strings (~> 0.2.0)
134
130
  tty-color (~> 0.5)
135
131
  tty-screen (~> 0.8)
@@ -146,8 +142,6 @@ GEM
146
142
  tty-screen (~> 0.8)
147
143
  wisper (~> 2.0)
148
144
  tty-screen (0.8.1)
149
- tty-spinner (0.9.3)
150
- tty-cursor (~> 0.7)
151
145
  unf (0.1.4)
152
146
  unf_ext
153
147
  unf_ext (0.0.8.2)
@@ -159,7 +153,7 @@ GEM
159
153
  webrick (~> 1.7.0)
160
154
 
161
155
  PLATFORMS
162
- ruby
156
+ x86_64-darwin-22
163
157
 
164
158
  DEPENDENCIES
165
159
  bundler
@@ -169,4 +163,4 @@ DEPENDENCIES
169
163
  solargraph
170
164
 
171
165
  BUNDLED WITH
172
- 2.4.2
166
+ 2.4.1
data/README.md CHANGED
@@ -7,7 +7,11 @@
7
7
  </p>
8
8
 
9
9
  > **Warning**
10
- > This software is *under active development*. It may be unstable, and the latest version may behave slightly differently than this document. Also, specifications may change in the future.
10
+ > This software is ***under active development***. It may be unstable, and the latest version may behave slightly differently than this document. Also, specifications may change in the future.
11
+
12
+ ** Change Log**
13
+
14
+ - [March 13, 2023] Text on the architecture of the `research` mode updated in accordance with Version 0.2.0
11
15
 
12
16
  ## Table of Contents
13
17
 
@@ -74,12 +78,23 @@ Monadic Chat comes with four apps (`Chat`, `Code`, `Novel`, and `Translate`). Ea
74
78
 
75
79
  ### Using RubyGems
76
80
 
77
- Execute the following command in an environment where Ruby 2.6 or higher is installed.
81
+ Execute the following command in an environment where Ruby 2.6.10 or higher is installed.
78
82
 
79
83
  ```text
80
84
  gem install monadic-chat
81
85
  ```
82
86
 
87
+ Then run the command to start the app:
88
+ ```text
89
+ monadic-chat
90
+ ```
91
+
92
+ To update:
93
+
94
+ ```text
95
+ gem update monadic-chat
96
+ ```
97
+
83
98
  ### Clone the GitHub Repository
84
99
 
85
100
  Alternatively, clone the code from the GitHub repository and follow the steps below. At this time, you must take this option to create a new app for Monadic Chat.
@@ -184,13 +199,7 @@ In `research` mode, it may take a while (usually several seconds) after the `dat
184
199
 
185
200
  All the information retrievable by running the `data/context` function can be presented in HTML. The HTML file is automatically opened in the default web browser.
186
201
 
187
- <br />
188
-
189
- <kbd><img src="./doc/img/linguistic-html.png" width="700px" style="border: thin solid darkgray;"/></kbd>
190
-
191
- <br />
192
-
193
- The generated HTML is saved in the user's home directory (`$HOME`) with the file name `monadic_chat.html`. The file contents does not automatically updated. Run `html` command every time when you need it. HTML data is written to this file regardless of the app.
202
+ The generated HTML will be saved in the user’s home directory (`$HOME`) with the file `monadic_chat.html`. Once the `html` command is executed, the file contents will continue to be updated until you `reset` or quit the running app. Reload the browser tab or rerun the `html` command to show the latest data. HTML data is written to this file regardless of the app.
194
203
 
195
204
  In `research` mode, it may take several seconds to several minutes after the `html` command is executed before the acutual HTML is displayed. This is because in `research` mode, even after displaying a direct response to user input, there may be a process running in the background that retrieves and reconstructs the context data, requiring the system to wait for it to finish.
196
205
 
@@ -277,13 +286,13 @@ Sometimes, however, problematic translations are created. The user can "save" th
277
286
 
278
287
  ## Modes
279
288
 
280
- Monadic Chat has two modes. The `normal` mode utilizes OpenAI's chat API to achieve ChatGPT-like functionality. It is suitable for using a large language model as a competent companion for various pragmatic purposes. On the other hand, the `research` mode utilizes OpenAI's text-completion API. This mode allows for acquiring metadata in the background while receiving the primary response at each conversation turn. It may be especially useful for researchers exploring the possibilities of large-scale language models and their applications.
289
+ Monadic Chat has two modes. The `normal` mode utilizes OpenAI's chat API to achieve ChatGPT-like functionality. It is suitable for using a large language model as a competent companion for various pragmatic purposes. On the other hand, the `research` mode utilizes OpenAI's text-completion API. This mode allows for acquiring **metadata** in the background while receiving the primary response at each conversation turn. It may be especially useful for researchers exploring the possibilities of large-scale language models and their applications.
281
290
 
282
291
  ### Normal Mode
283
292
 
284
293
  The default language model for `normal` mode is `gpt-3.5-turbo`.
285
294
 
286
- In the default configuration, the dialogue messages are reduced after ten turns by deleting the oldest ones (but not the messages that the `system` role gave as instructions).
295
+ In the default configuration, the dialogue messages are reduced after ten turns by deleting the oldest ones (but not the messages that the `system` role has given as instructions).
287
296
 
288
297
  ### Research Mode
289
298
 
@@ -347,7 +356,9 @@ Terms in bold in it may require more explanation.
347
356
  ]}
348
357
  ```
349
358
 
350
- The accumulator in `research` mode also looks like this.
359
+ The accumulator in `research` mode also looks like this.
360
+
361
+ The conversation history is kept entirely in memory until the running app is terminated or reset. The part of the conversation history sent through the API along with new input sentences is referred to here as the accumulator.
351
362
 
352
363
  ### Reducer
353
364
 
@@ -389,11 +400,20 @@ The specifications for Monadic Chat's command-line user interface for this app a
389
400
 
390
401
  > **Note**
391
402
  > The use of square brackets (instead of parentheses) in the notation of syntactic analysis here is to conform to the format of [RSyntaxTree](https://yohasebe.com/rsyntaxtree), a tree-drawing program for linguistic research developed by the author of Monadic Chat.
392
-
393
- <img src="./doc/img/syntree-sample.png" width="300px" />
403
+ > <img src="./doc/img/syntree-sample.png" width="300px" />
394
404
 
395
405
  The sample app we create in this section is stored in the [`sample_app`](https://github.com/yohasebe/monadic-chat/tree/main/sample_app) folder in the repository.
396
406
 
407
+ Below is a sample HTML displaying the conversation (sentence and its syntactic structure notation pairs) and metadata.
408
+
409
+ <br />
410
+
411
+ <kbd><img src="./doc/img/linguistic-html.png" width="700px" style="border: thin solid darkgray;"/></kbd>
412
+
413
+ <br />
414
+
415
+
416
+
397
417
  ### File Structure
398
418
 
399
419
  New Monadic Chat apps must be placed inside the `apps` folder. The folders and files for default apps `chat`, `code`, `novel`, and `translate` are also in this folder.
@@ -431,16 +451,14 @@ apps
431
451
  The purpose of each file is as follows.
432
452
 
433
453
  - `linguistic.rb`: Ruby code to define the "reducer"
434
- - `linguistic.json`: JSON template describing GPT behavior in `normal` mode
435
- - `linguistic.md`: Markdown template describing GPT behavior in `research` mode
436
-
437
- The `.rb` file is required, but you may create both `.json` and `.md` files, or only one of them.
454
+ - `linguistic.json`: JSON template describing GPT behavior in `normal` and `research` modes
455
+ - `linguistic.md`: Markdown template describing GPT behavior in addition to the `json` file above in `research` mode
438
456
 
439
457
  Template files with a name beginning with `_` are also ignored. If a folder has a name beginning with `_`, all its contents are ignored.
440
458
 
441
459
  ### Reducer Code
442
460
 
443
- We do not need to make the reducer do anything special for the current purposes. So, let's copy the code from the default `chat` app and make a minor modification, such as changing the class name so that it matches the app name. We save it as `apps/linguistic/linguistic.rb`.
461
+ We do not need to make the reducer do anything special for the current purposes. So, let's copy the code from the default `chat` app and make a minor modification, such as changing the class name and the app name so that it matches the app name. We save it as `apps/linguistic/linguistic.rb`.
444
462
 
445
463
  ### Template for `Normal` Mode
446
464
 
@@ -475,10 +493,13 @@ Below we will look at the `research` mode template for the `linguistic` app, sec
475
493
 
476
494
  **Main Section**
477
495
 
478
- <div style="highlight highlight-source-gfm"><pre style="white-space : pre-wrap !important;">You are a natural language syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user below and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. The sentence must always be parsed if the user's input sentence is enclosed in double quotes. Create a response to the following new prompt from the user and set your response to the "response" property of the JSON object below. All prompts by "user" in the "messages" property are continuous in content.
479
- </pre></div>
496
+ <div style="highlight highlight-source-gfm"><pre style="white-space : pre-wrap !important;">{{SYSTEM}
480
497
 
481
- The text here is the same as the text in the template for the `normal` mode in an instruction message by the `system`. However, note that it contains an instruction that the response from GPT should be presented in the form of a JSON object, as shown in one of the following sections.
498
+ All prompts by "user" in the "messages" property are continuous in content.You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user below and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. The sentence must always be parsed if the user's input sentence is enclosed in double quotes. Let the user know if parsing the sentence is difficult or the input must be enclosed in double quotes.
499
+
500
+ Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you.</pre></div>
501
+
502
+ Some of the text here is the same as the text of the directive message by `system` in the template in `normal` mode; Monadic Chat automatically replaces `{{SYSTEM}}} with the `system` directive text when the template is sent via API. However, the above text also includes a few additional paragpraphs, including the one instructing the response from GPT to be presented as a JSON object.
482
503
 
483
504
  **New Prompt**
484
505
 
@@ -486,7 +507,16 @@ The text here is the same as the text in the template for the `normal` mode in a
486
507
  NEW PROMPT: {{PROMPT}}
487
508
  ```
488
509
 
489
- Monadic Chat replaces `{{PROMPT}}` with input from the user when sending templates through the API.
510
+ Monadic Chat replaces `{{PROMPT}}` with input from the user when sending the template through the API.
511
+
512
+ **Past Messages**
513
+
514
+ ```markdown
515
+ PAST MESSAGES:
516
+ {{MESSAGES}}
517
+ ```
518
+
519
+ Monadic Chat replaces `{{MESSAGES}}` with messages from past conversations when sending the template. Note that not all the past messages have to be copied here: the reducer mechanism could select, modify, or even "generate" messages and include them instead.
490
520
 
491
521
  **JSON Object**
492
522
 
@@ -495,15 +525,11 @@ Monadic Chat replaces `{{PROMPT}}` with input from the user when sending templat
495
525
  "prompt": "\"We didn't have a camera.\"",
496
526
  "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`\n\n###\n\n",
497
527
  "mode": "linguistic",
498
- "turns": 2,
528
+ "tokens": 351
529
+ "turns": 3,
499
530
  "sentence_type": ["declarative"],
500
531
  "sentiment": ["sad"],
501
532
  "summary": "The user saw a beautiful sunset, but did not take a picture because the user did not have a camera.",
502
- "tokens": 351,
503
- "messages": [{"user": "\"We saw a beautiful sunset.\"", "assistant": "`[S [NP He] [VP [V saw] [NP [det a] [N' [Adj beautiful] [N sunset] ] ] ] ]`\n\n###\n\n" },
504
- {"user": "\"We didn't take a picture.\"", "assistant": "`[S [NP We] [IP [I didn't] [VP [V take] [NP [Det a] [N picture] ] ] ] ] ]`\n\n###\n\n" },
505
- {"user": "\"We didn't have a camera.\"", "assistant": "`[S [NP We] [IP [I didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`\n\n###\n\n" }
506
- ]
507
533
  }
508
534
  ```
509
535
 
@@ -511,15 +537,20 @@ This is the core of the `research` mode template.
511
537
 
512
538
  Note that the entire `research` mode template is written in Markdown format, so the above JSON object is actually separated from the rest of the template by a code fence, as shown below.
513
539
 
514
- ```json
515
- {
516
- "prompt": ...
517
- ...
518
- "messages": ...
519
- }
520
- ```
540
+ ```json
541
+ ```json
542
+ {
543
+ "prompt": ...
544
+ "response": ...
545
+ "mode": ...
546
+ "tokens": ...
547
+ "turns": ...
548
+ ...
549
+ }
550
+ ```
551
+ ```
521
552
 
522
- The required properties of this JSON object are `prompt`, `response`, and `messages`. Other properties are optional. The format of the `messages` property is similar to that of the `normal` mode (i.e., OpenAI's chat API. The only difference is that it is structured as a list of objects whose keys are user and assistant to make it easier to describe.)
553
+ The required properties of this JSON object are `prompt`, `response`, `mode`, and `tokens`. Other properties are optional. The `mode` property is used to check the app name when saving the conversation data or loading from an external file. The `tokens` property is used in the reducer mechanism to check the approximate size of the current JSON object. The `turns` property is also used in the reducer mechanism.
523
554
 
524
555
  The JSON object in the `research` mode template is saved in the user’s home directory (`$HOME`) with the file `monadic_chat.json`. The content is overwritten every time the JSON object is updated. Note that this JSON file is created for logging purposes (so the data is not pretty printed). Modifying its content does not affect the processes carried out by the app.
525
556
 
@@ -530,13 +561,12 @@ Make sure the following content requirements are all fulfilled:
530
561
 
531
562
  - keep the value of the "mode" property at "linguistic"
532
563
  - set the new prompt to the "prompt" property
533
- - create your response to the new prompt in accordance with the "messages" and set it to "response"
534
- - insert both the new prompt and the response after all the existing items in the "messages"
564
+ - create your response to the new prompt based on "PAST MESSAGES" and set it to "response"
535
565
  - analyze the new prompt's sentence type and set a sentence type value such as "interrogative", "imperative", "exclamatory", or "declarative" to the "sentence_type" property
536
566
  - analyze the new prompt's sentiment and set one or more sentiment types such as "happy", "excited", "troubled", "upset", or "sad" to the "sentiment" property
537
- - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words.
567
+ - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words using as many discourse markers such as "because", "therefore", "but", and "so" to show the logical connection between the events.
538
568
  - update the value of "tokens" with the number of tokens of the resulting JSON object"
539
- - increment the value of "turns" by 1 and update the property so that the value of "turns" equals the number of the items in the "messages" of the resulting JSON object
569
+ - increment the value of "turns" by 1
540
570
  ```
541
571
 
542
572
  Note that all the properties of the JSON object above are mentioned so that GPT can update them accordingly.
@@ -548,6 +578,7 @@ Make sure the following formal requirements are all fulfilled:
548
578
 
549
579
  - do not use invalid characters in the JSON object
550
580
  - escape double quotes and other special characters in the text values in the resulting JSON object
581
+ - check the validity of the generated JSON object and correct any possible parsing problems before returning it
551
582
 
552
583
  Add "\n\n###\n\n" at the end of the "response" value.
553
584
 
data/apps/chat/chat.json CHANGED
@@ -1,4 +1,4 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You are a friendly but professional consultant who answers various questions, write computer program code, make decent suggestions, give helpful advice in response to a prompt from the user. If the prompt is not clear enough, ask the user to rephrase it. You are able to empathize with the user; insert an emoji (displayable on the terminal screen) that you deem appropriate for the user's input at the beginning of your response. If the user input is sentimentally neutral, pick up any emoji that matchs the topic."}
3
+ "content": "You are a friendly but professional consultant who answers various questions, writes computer program code, makes decent suggestions, and gives helpful advice in response to a prompt from the user. If the prompt is not clear enough, ask the user to rephrase it. You are able to empathize with the user; insert an emoji (displayable on the terminal screen) that you deem appropriate for the user's input at the beginning of your response. If the user input is sentimentally neutral, pick up any emoji that matches the topic."}
4
4
  ]}
data/apps/chat/chat.md CHANGED
@@ -1,19 +1,24 @@
1
- You are a friendly but professional AI assistant capable of answering various questions, writing computer program code, making decent suggestions, and giving helpful advice in response to a new prompt from the user. If the prompt is not clear enough, ask the user to rephrase it. You are able to empathize with the user; insert a unicode emoji (one that is displayable on the terminal screen) that you deem appropriate for the user's input at the beginning of your response. If the user input is sentimentally neutral, pick up any emoji that matchs the topic. Create a response to the following new prompt from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in the value of the "conversation" property.
1
+ {{SYSTEM}}
2
2
 
3
- Make your response as detailed as possible.
3
+ Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object below. The preceding conversation is stored in "PAST MESSAGES".
4
+ The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you. Make your response as detailed as possible.
4
5
 
5
6
  NEW PROMPT: {{PROMPT}}
6
7
 
8
+ PAST MESSAGES:
9
+ {{MESSAGES}}
10
+
11
+ JSON:
12
+
7
13
  ```json
8
14
  {
9
15
  "prompt": "Can I ask something?",
10
16
  "response": "Sure!\n\n###\n\n",
11
17
  "mode": "chat",
12
- "turns": 1,
18
+ "turns": 0,
13
19
  "language": "English",
14
20
  "topics": [],
15
- "tokens": 109,
16
- "messages": [{"user": "Can I ask something?", "assistant": "Sure!\n\n###\n\n"}]
21
+ "tokens": 109
17
22
  }
18
23
  ```
19
24
 
@@ -21,12 +26,11 @@ Make sure the following content requirements are all fulfilled:
21
26
 
22
27
  - keep the value of the "mode" property at "chat"
23
28
  - set the new prompt to the "prompt" property
24
- - create your response to the new prompt in accordance with the "messages" and set it to "response"
25
- - insert both the new prompt and the response after all the existing items in the "messages"
29
+ - create your response to the new prompt based on the PAST MESSAGES and set it to "response"
26
30
  - if the new prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language
27
31
  - make your response in the same language as the new prompt
28
32
  - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property
29
- - avoid giving a response that is the same or similar to one of the previous responses in "messages"
33
+ - avoid giving a response that is the same or similar to one of the previous responses in PAST MESSAGES
30
34
  - program code in the response must be embedded in a code block in the markdown text
31
35
  - update the value of "tokens" with the number of tokens of the resulting JSON object"
32
36
 
@@ -34,9 +38,8 @@ Make sure the following formal requirements are all fulfilled:
34
38
 
35
39
  - do not use invalid characters in the JSON object
36
40
  - escape double quotes and other special characters in the text values in the resulting JSON object
37
- - increment the value of "turns" by 1 and update the property so that the value of "turns" equals the number of the items in the "messages" of the resulting JSON object
41
+ - increment the value of "turns" by 1
38
42
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
39
-
40
43
  Add "\n\n###\n\n" at the end of the "response" value.
41
44
 
42
45
  Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
data/apps/chat/chat.rb CHANGED
@@ -21,56 +21,54 @@ class Chat < MonadicApp
21
21
  "stop" => nil
22
22
  }
23
23
  method = OpenAI.model_to_method(params["model"])
24
- template = case method
25
- when "completions"
26
- TEMPLATES["research/chat"]
27
- when "chat/completions"
28
- TEMPLATES["normal/chat"]
29
- end
30
- super(params,
31
- template,
32
- {},
33
- "messages",
34
- "response",
35
- proc do |res|
24
+ case method
25
+ when "completions"
26
+ tjson = TEMPLATES["normal/chat"]
27
+ tmarkdown = TEMPLATES["research/chat"]
28
+ when "chat/completions"
29
+ tjson = TEMPLATES["normal/chat"]
30
+ tmarkdown = nil
31
+ end
32
+ super(params: params,
33
+ tjson: tjson,
34
+ tmarkdown: tmarkdown,
35
+ placeholders: {},
36
+ prop_accumulator: "messages",
37
+ prop_newdata: "response",
38
+ update_proc: proc do
36
39
  case method
37
40
  when "completions"
38
- obj = objectify
39
41
  ############################################################
40
- # Research mode recuder defined here #
41
- # obj: old Hash object #
42
- # res: new response Hash object to be modified #
42
+ # Research mode reduder defined here #
43
+ # @messages: messages to this point #
44
+ # @metadata: currently available metdata sent from GPT #
43
45
  ############################################################
46
+
44
47
  conditions = [
45
- res["messages"].size > 1,
46
- res["tokens"].to_i > params["max_tokens"].to_i / 2,
47
- !obj["topics"].empty?,
48
- res["topics"] != obj["topics"]
48
+ @messages.size > 1,
49
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
49
50
  ]
50
- if conditions.all?
51
- res["messages"].shift(1)
52
- res["turns"] = res["turns"].to_i - 1
53
- end
54
- res
51
+
52
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
+
55
54
  when "chat/completions"
56
- # obj = objectify
57
55
  ############################################################
58
56
  # Normal mode recuder defined here #
59
- # obj: old Hash object (uncomment a line above before use) #
60
- # res: new response Hash object to be modified #
57
+ # @messages: messages to this point #
61
58
  ############################################################
59
+
62
60
  conditions = [
63
- res.size > @num_retained_turns * 2 + 1
61
+ @messages.size > @num_retained_turns * 2 + 1
64
62
  ]
63
+
65
64
  if conditions.all?
66
- res.each_with_index do |ele, i|
65
+ @messages.each_with_index do |ele, i|
67
66
  if ele["role"] != "system"
68
- res.delete_at i
67
+ @messages.delete_at i
69
68
  break
70
69
  end
71
70
  end
72
71
  end
73
- res
74
72
  end
75
73
  end
76
74
  )
data/apps/code/code.json CHANGED
@@ -1,4 +1,8 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You are a friendly but professional software engineer who answers various questions, write computer program code, make decent suggestions, give helpful advice in response to a prompt from the user."}
3
+ "content": "You are a friendly but professional software engineer who answers various questions, writes computer program code, makes decent suggestions, and gives helpful advice in response to a prompt from the user."},
4
+ {"role": "user",
5
+ "content": "Can I ask something?"},
6
+ {"role": "assistant",
7
+ "content": "Sure!"}
4
8
  ]}
data/apps/code/code.md CHANGED
@@ -1,8 +1,13 @@
1
- You are a friendly but professional computer software assistant capable of answering various questions, writing computer program code, making decent suggestions, and giving helpful advice in response to a new prompt from the user. Create a detailed response to the following new prompt from the user and set your response to the "response" property of the JSON object shown below. The preceding context is stored in the value of the "messages" property. Always try to make your response relavant to the preceding context.
1
+ {{SYSTEM}}
2
+
3
+ Create a response "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. In "PAST MESSAGES", "assistant" refers to you. Make your response as detailed as possible.
2
4
 
3
5
  NEW PROMPT: {{PROMPT}}
4
6
 
5
- Make your response as detailed as possible.
7
+ PAST MESSAGES:
8
+ {{MESSAGES}}
9
+
10
+ JSON:
6
11
 
7
12
  ```json
8
13
  {
@@ -12,8 +17,7 @@ Make your response as detailed as possible.
12
17
  "turns": 1,
13
18
  "language": "English",
14
19
  "topics": [],
15
- "tokens": 109,
16
- "messages": [{"user": "Can I ask something?", "assistant": "Sure!\n\n###\n\n"}]
20
+ "tokens": 109
17
21
  }
18
22
  ```
19
23
 
@@ -21,12 +25,11 @@ Make sure the following content requirements are all fulfilled:
21
25
 
22
26
  - keep the value of the "mode" property at "chat"
23
27
  - set the new prompt to the "prompt" property
24
- - create your response to the new prompt in accordance with the "messages" and set it to "response"
25
- - insert both the new prompt and the response after all the existing items in the "messages"
28
+ - create your response to the new prompt based on "PAST MESSAGES" and set it to "response"
26
29
  - if the prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language
27
30
  - make your response in the same language as the new prompt
28
31
  - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property
29
- - avoid giving a response that is the same or similar to one of the previous responses in "messages"
32
+ - avoid giving a response that is the same or similar to one of the previous responses in "PAST MESSAGES"
30
33
  - program code in the response must be embedded in a code block in the markdown text
31
34
  - update the value of "tokens" with the number of tokens of the resulting JSON object"
32
35
 
@@ -34,7 +37,7 @@ Make sure the following formal requirements are all fulfilled:
34
37
 
35
38
  - do not use invalid characters in the JSON object
36
39
  - escape double quotes and other special characters in the text values in the resulting JSON object
37
- - increment the value of "turns" by 1 and update the property so that the value of "turns" equals the number of the items in the "messages" of the resulting JSON object
40
+ - increment the value of "turns" by 1
38
41
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
39
42
 
40
43
  Add "\n\n###\n\n" at the end of the "response" value.
data/apps/code/code.rb CHANGED
@@ -21,54 +21,54 @@ class Code < MonadicApp
21
21
  "stop" => nil
22
22
  }
23
23
  method = OpenAI.model_to_method(params["model"])
24
- template = case method
25
- when "completions"
26
- TEMPLATES["research/code"]
27
- when "chat/completions"
28
- TEMPLATES["normal/code"]
29
- end
30
- super(params,
31
- template,
32
- {},
33
- "messages",
34
- "response",
35
- proc do |res|
24
+ case method
25
+ when "completions"
26
+ tjson = TEMPLATES["normal/code"]
27
+ tmarkdown = TEMPLATES["research/code"]
28
+ when "chat/completions"
29
+ tjson = TEMPLATES["normal/code"]
30
+ tmarkdown = nil
31
+ end
32
+ super(params: params,
33
+ tjson: tjson,
34
+ tmarkdown: tmarkdown,
35
+ placeholders: {},
36
+ prop_accumulator: "messages",
37
+ prop_newdata: "response",
38
+ update_proc: proc do
36
39
  case method
37
40
  when "completions"
38
- # obj = objectify
39
41
  ############################################################
40
- # Research mode recuder defined here #
41
- # obj: old Hash object (uncomment a line above before use) #
42
- # res: new response Hash object to be modified #
42
+ # Research mode reduder defined here #
43
+ # @messages: messages to this point #
44
+ # @metadata: currently available metdata sent from GPT #
43
45
  ############################################################
46
+
44
47
  conditions = [
45
- res["messages"].size > 1,
46
- res["tokens"].to_i > params["max_tokens"].to_i / 2
48
+ @messages.size > 1,
49
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
47
50
  ]
48
- if conditions.all?
49
- res["messages"].shift(1)
50
- res["turns"] = res["turns"].to_i - 1
51
- end
52
- res
51
+
52
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
+
53
54
  when "chat/completions"
54
- # obj = objectify
55
55
  ############################################################
56
56
  # Normal mode recuder defined here #
57
- # obj: old Hash object (uncomment a line above before use) #
58
- # res: new response Hash object to be modified #
57
+ # @messages: messages to this point #
59
58
  ############################################################
59
+
60
60
  conditions = [
61
- res.size > @num_retained_turns * 2 + 1
61
+ @messages.size > @num_retained_turns * 2 + 1
62
62
  ]
63
+
63
64
  if conditions.all?
64
- res.each_with_index do |ele, i|
65
+ @messages.each_with_index do |ele, i|
65
66
  if ele["role"] != "system"
66
- res.delete_at i
67
+ @messages.delete_at i
67
68
  break
68
69
  end
69
70
  end
70
71
  end
71
- res
72
72
  end
73
73
  end
74
74
  )