monadic-chat 0.1.3 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
data/apps/code/code.rb CHANGED
@@ -21,54 +21,54 @@ class Code < MonadicApp
21
21
  "stop" => nil
22
22
  }
23
23
  method = OpenAI.model_to_method(params["model"])
24
- template = case method
25
- when "completions"
26
- TEMPLATES["research/code"]
27
- when "chat/completions"
28
- TEMPLATES["normal/code"]
29
- end
30
- super(params,
31
- template,
32
- {},
33
- "messages",
34
- "response",
35
- proc do |res|
24
+ case method
25
+ when "completions"
26
+ tjson = TEMPLATES["normal/code"]
27
+ tmarkdown = TEMPLATES["research/code"]
28
+ when "chat/completions"
29
+ tjson = TEMPLATES["normal/code"]
30
+ tmarkdown = nil
31
+ end
32
+ super(params: params,
33
+ tjson: tjson,
34
+ tmarkdown: tmarkdown,
35
+ placeholders: {},
36
+ prop_accumulator: "messages",
37
+ prop_newdata: "response",
38
+ update_proc: proc do
36
39
  case method
37
40
  when "completions"
38
- # obj = objectify
39
41
  ############################################################
40
- # Research mode recuder defined here #
41
- # obj: old Hash object (uncomment a line above before use) #
42
- # res: new response Hash object to be modified #
42
+ # Research mode reduder defined here #
43
+ # @messages: messages to this point #
44
+ # @metadata: currently available metdata sent from GPT #
43
45
  ############################################################
46
+
44
47
  conditions = [
45
- res["messages"].size > 1,
46
- res["tokens"].to_i > params["max_tokens"].to_i / 2
48
+ @messages.size > 1,
49
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
47
50
  ]
48
- if conditions.all?
49
- res["messages"].shift(1)
50
- res["turns"] = res["turns"].to_i - 1
51
- end
52
- res
51
+
52
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
+
53
54
  when "chat/completions"
54
- # obj = objectify
55
55
  ############################################################
56
56
  # Normal mode recuder defined here #
57
- # obj: old Hash object (uncomment a line above before use) #
58
- # res: new response Hash object to be modified #
57
+ # @messages: messages to this point #
59
58
  ############################################################
59
+
60
60
  conditions = [
61
- res.size > @num_retained_turns * 2 + 1
61
+ @messages.size > @num_retained_turns * 2 + 1
62
62
  ]
63
+
63
64
  if conditions.all?
64
- res.each_with_index do |ele, i|
65
+ @messages.each_with_index do |ele, i|
65
66
  if ele["role"] != "system"
66
- res.delete_at i
67
+ @messages.delete_at i
67
68
  break
68
69
  end
69
70
  end
70
71
  end
71
- res
72
72
  end
73
73
  end
74
74
  )
@@ -1,6 +1,6 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user below and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. The sentence must always be parsed if the user's input sentence is enclosed in double quotes. If pasing the sentence is extremely difficult, or the input is not enclosed in double quotes, let the user know."
3
+ "content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user below and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. The sentence must always be parsed if the user's input sentence is enclosed in double quotes. Let the user know if parsing the sentence is difficult or the input must be enclosed in double quotes."
4
4
  },
5
5
  {"role": "user",
6
6
  "content": "\"We saw a beautiful sunset.\""
@@ -13,5 +13,11 @@
13
13
  },
14
14
  {"role": "assistant",
15
15
  "content": "`[S [NP We] [IP [I didn't] [VP [V take] [NP [Det a] [N picture] ] ] ] ] ]`"
16
+ },
17
+ {"role": "user",
18
+ "content": "\"We didn't have a camera.\""
19
+ },
20
+ {"role": "assistant",
21
+ "content": "`[S [NP We] [IP [I didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`"
16
22
  }
17
23
  ]}
@@ -0,0 +1,46 @@
1
+ {{SYSTEM}}
2
+
3
+ All prompts by "user" in the "messages" property are continuous in content. If parsing the input sentence is extremely difficult, or the input is not enclosed in double quotes, let the user know.
4
+
5
+ Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you.
6
+
7
+ NEW PROMPT: {{PROMPT}}
8
+
9
+ PAST MESSAGES:
10
+ {{MESSAGES}}
11
+
12
+ JSON:
13
+
14
+ ```json
15
+ {
16
+ "prompt": "\"We didn't have a camera.\"",
17
+ "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`\n\n###\n\n",
18
+ "mode": "linguistic",
19
+ "turns": 3,
20
+ "sentence_type": ["declarative"],
21
+ "sentiment": ["sad"],
22
+ "summary": "The user saw a beautiful sunset, but did not take a picture because the user did not have a camera.",
23
+ "tokens": 351
24
+ }
25
+ ```
26
+
27
+ Make sure the following content requirements are all fulfilled:
28
+
29
+ - keep the value of the "mode" property at "linguistic"
30
+ - set the new prompt to the "prompt" property
31
+ - create your response to the new prompt based on "PAST MESSAGES" and set it to "response"
32
+ - analyze the new prompt's sentence type and set a sentence type value such as "interrogative", "imperative", "exclamatory", or "declarative" to the "sentence_type" property
33
+ - analyze the new prompt's sentiment and set one or more sentiment types such as "happy", "excited", "troubled", "upset", or "sad" to the "sentiment" property
34
+ - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words using as many discourse markers such as "because", "therefore", "but", and "so" to show the logical connection between the events.
35
+ - update the value of "tokens" with the number of tokens of the resulting JSON object"
36
+ - increment the value of "turns" by 1
37
+
38
+ Make sure the following formal requirements are all fulfilled:
39
+
40
+ - do not use invalid characters in the JSON object
41
+ - escape double quotes and other special characters in the text values in the resulting JSON object
42
+ - check the validity of the generated JSON object and correct any possible parsing problems before returning it
43
+
44
+ Add "\n\n###\n\n" at the end of the "response" value.
45
+
46
+ Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
@@ -21,51 +21,54 @@ class Linguistic < MonadicApp
21
21
  "stop" => nil
22
22
  }
23
23
  method = OpenAI.model_to_method(params["model"])
24
- template = case method
25
- when "completions"
26
- TEMPLATES["research/linguistic"]
27
- when "chat/completions"
28
- TEMPLATES["normal/linguistic"]
29
- end
30
- super(params,
31
- template,
32
- {},
33
- "messages",
34
- "response",
35
- proc do |res|
24
+ case method
25
+ when "completions"
26
+ tjson = TEMPLATES["normal/linguistic"]
27
+ tmarkdown = TEMPLATES["research/linguistic"]
28
+ when "chat/completions"
29
+ tjson = TEMPLATES["normal/linguistic"]
30
+ tmarkdown = nil
31
+ end
32
+ super(params: params,
33
+ tjson: tjson,
34
+ tmarkdown: tmarkdown,
35
+ placeholders: {},
36
+ prop_accumulator: "messages",
37
+ prop_newdata: "response",
38
+ update_proc: proc do
36
39
  case method
37
40
  when "completions"
38
- # obj = objectify
39
41
  ############################################################
40
- # Research mode recuder defined here #
41
- # obj: old Hash object #
42
- # res: new response Hash object to be modified #
42
+ # Research mode reduder defined here #
43
+ # @messages: messages to this point #
44
+ # @metadata: currently available metdata sent from GPT #
43
45
  ############################################################
46
+
44
47
  conditions = [
45
- res["messages"].size > 1,
46
- res["tokens"].to_i > params["max_tokens"].to_i / 2
48
+ @messages.size > 1,
49
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
47
50
  ]
48
- if conditions.all?
49
- res["messages"].shift(1)
50
- res["turns"] = res["turns"].to_i - 1
51
- end
52
- res
51
+
52
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
+
53
54
  when "chat/completions"
54
- # obj = objectify
55
55
  ############################################################
56
56
  # Normal mode recuder defined here #
57
- # obj: old Hash object (uncomment a line above before use) #
58
- # res: new response Hash object to be modified #
57
+ # @messages: messages to this point #
59
58
  ############################################################
60
- if res.size > @num_retained_turns * 2 + 1
61
- res.each_with_index do |ele, i|
59
+
60
+ conditions = [
61
+ @messages.size > @num_retained_turns * 2 + 1
62
+ ]
63
+
64
+ if conditions.all?
65
+ @messages.each_with_index do |ele, i|
62
66
  if ele["role"] != "system"
63
- res.delete_at i
67
+ @messages.delete_at i
64
68
  break
65
69
  end
66
70
  end
67
71
  end
68
- res
69
72
  end
70
73
  end
71
74
  )
@@ -1,4 +1,8 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You and I are collaboratively writing a novel. You write a paragraph about a synopsis, theme, topic, or event presented in the prompt."}
3
+ "content": "You and the user are collaboratively writing a novel. You write a paragraph elaborating on a synopsis, theme, topic, or event presented in the prompt."},
4
+ {"role": "user",
5
+ "content": "The preface to the novel is presented."},
6
+ {"role": "assistant",
7
+ "content": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and exciting novel."}
4
8
  ]}
data/apps/novel/novel.md CHANGED
@@ -1,17 +1,21 @@
1
- You are a professional novel-writing AI assistant. You and the user are collaboratively writing a novel. You write a paragraph about a theme, topic, or event presented in the new prompt below. The preceding prompts and paragraphs are contained in the "messages" property.
1
+ {{SYSTEM}}
2
+
3
+ Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you.
2
4
 
3
5
  NEW PROMPT: {{PROMPT}}
4
6
 
5
- Your response must be returned in the form of a JSON object having the structure shown below:
7
+ PAST MESSAGES:
8
+ {{MESSAGES}}
9
+
10
+ JSON:
6
11
 
7
12
  ```json
8
13
  {
9
- "prompt": "The prefice to the novel is presented",
10
- "response": "What follows is the story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.\n\n###\n\n",
14
+ "prompt": "The preface to the novel is presented",
15
+ "response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.\n\n###\n\n",
11
16
  "mode": "novel",
12
17
  "turns": 1,
13
- "tokens": 147,
14
- "messages": [{"user": "The prefice to the novel is presented", "assistant": "What follows is the story that an assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.\n\n###\n\n"}]
18
+ "tokens": 147
15
19
  }
16
20
  ```
17
21
 
@@ -20,15 +24,15 @@ Make sure the following content requirements are all fulfilled:
20
24
  - keep the value of the "mode" property at "novel"
21
25
  - set the new prompt to the "prompt" property
22
26
  - create your new paragraph in response to the new prompt and set it to "response"
23
- - do not repeat in your response what is already told in the "messages"
24
- - insert both the new prompt and the response after all the existing items in the "messages"
27
+ - do not repeat in your response what is already told in "PAST MESSAGES"
25
28
  - update the value of "tokens" with the number of tokens of the resulting JSON object"
29
+ - Make your response as detailed as possible within the maximum limit of 200 words
26
30
 
27
31
  Make sure the following formal requirements are all fulfilled:
28
32
 
29
33
  - do not use invalid characters in the JSON object
30
34
  - escape double quotes and other special characters in the text values in the resulting JSON object
31
- - increment the value of "turns" by 1 and update the property so that the value of "turns" equals the number of the items in the "messages" of the resulting JSON object
35
+ - increment the value of "turns" by 1
32
36
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
33
37
 
34
38
  Add "\n\n###\n\n" at the end of the "response" value.
data/apps/novel/novel.rb CHANGED
@@ -21,54 +21,54 @@ class Novel < MonadicApp
21
21
  "stop" => nil
22
22
  }
23
23
  method = OpenAI.model_to_method(params["model"])
24
- template = case method
25
- when "completions"
26
- TEMPLATES["research/novel"]
27
- when "chat/completions"
28
- TEMPLATES["normal/novel"]
29
- end
30
- super(params,
31
- template,
32
- {},
33
- "messages",
34
- "response",
35
- proc do |res|
24
+ case method
25
+ when "completions"
26
+ tjson = TEMPLATES["normal/novel"]
27
+ tmarkdown = TEMPLATES["research/novel"]
28
+ when "chat/completions"
29
+ tjson = TEMPLATES["normal/novel"]
30
+ tmarkdown = nil
31
+ end
32
+ super(params: params,
33
+ tjson: tjson,
34
+ tmarkdown: tmarkdown,
35
+ placeholders: {},
36
+ prop_accumulator: "messages",
37
+ prop_newdata: "response",
38
+ update_proc: proc do
36
39
  case method
37
40
  when "completions"
38
- # obj = objectify
39
41
  ############################################################
40
- # Research mode recuder defined here #
41
- # obj: old Hash object (uncomment a line above before use) #
42
- # res: new response Hash object to be modified #
42
+ # Research mode reduder defined here #
43
+ # @messages: messages to this point #
44
+ # @metadata: currently available metdata sent from GPT #
43
45
  ############################################################
46
+
44
47
  conditions = [
45
- res["messages"].size > 1,
46
- res["tokens"].to_i > params["max_tokens"].to_i / 2
48
+ @messages.size > 1,
49
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
47
50
  ]
48
- if conditions.all?
49
- res["messages"].shift(1)
50
- res["turns"] = res["turns"].to_i - 1
51
- end
52
- res
51
+
52
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
+
53
54
  when "chat/completions"
54
- # obj = objectify
55
55
  ############################################################
56
56
  # Normal mode recuder defined here #
57
- # obj: old Hash object (uncomment a line above before use) #
58
- # res: new response Hash object to be modified #
57
+ # @messages: messages to this point #
59
58
  ############################################################
59
+
60
60
  conditions = [
61
- res.size > @num_retained_turns * 2 + 1
61
+ @messages.size > @num_retained_turns * 2 + 1
62
62
  ]
63
+
63
64
  if conditions.all?
64
- res.each_with_index do |ele, i|
65
+ @messages.each_with_index do |ele, i|
65
66
  if ele["role"] != "system"
66
- res.delete_at i
67
+ @messages.delete_at i
67
68
  break
68
69
  end
69
70
  end
70
71
  end
71
- res
72
72
  end
73
73
  end
74
74
  )
@@ -1,4 +1,4 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You are a multilingual translator capable of professionally translating many languages. Translate the given text to {{TARGET_LANG}} in a way that the new sentence sounds connected to the preceding text. If there is specific translation that should be used for a particular expression, the user present the translation in a pair parentheses right after the original expression, which is enclose by a pair of brackets. Check both current and preceding user messages and use those specific translations every time a corresponding expression appears in the user input."}
3
+ "content": "You are a multilingual translator capable of professionally translating many languages. Translate the given text to {{TARGET_LANG}}. If a specific translation should be used for a particular expression, the user presents the translation in a pair of parentheses right after the original expression. Check both current and preceding user messages and use those specific translations every time a corresponding expression appears in the user input."}
4
4
  ]}
@@ -1,18 +1,22 @@
1
- You are a multilingual translator AI assistant capable of professionally translating many languages. Translate the text from the user presented in the new prompt below to {{TARGET_LANG}} in a way that the new sentence sounds connected to the preceding text in the "messages".If there is specific translation that should be used for a particular expression, the user present the translation in a pair parentheses right after the original expression, which is enclose by a pair of brackets. Check both current and preceding user messages and use those specific translations every time a corresponding expression appears in the user input.
1
+ {{SYSTEM}}
2
+
3
+ Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you. Make your response as detailed as possible.
2
4
 
3
5
  NEW PROMPT: {{PROMPT}}
4
6
 
5
- Your response must be returned in the form of a JSON object having the structure shown below:
7
+ PAST MESSAGES:
8
+ {{MESSAGES}}
9
+
10
+ JSON:
6
11
 
7
12
  ```json
8
13
  {
9
14
  "mode": "translate",
10
- "turns": 2,
11
- "prompt": "これは日本語の文です。",
15
+ "turns": 0,
16
+ "prompt": "これは日本語(Japanese)の文(sentence)です。",
12
17
  "response": "This is a sentence in Japanese.\n\n###\n\n",
13
18
  "target_lang": "English",
14
- "tokens": 194,
15
- "messages": [{"user": "Original and translated text follow(続きます).", "assistant": "原文と翻訳文が続きます。\n\n###\n\n"}, {"user": "これは日本語の文(sentence)です。", "assistant": "This is a sentence in Japanese.\n\n###\n\n"}]
19
+ "tokens": 194
16
20
  }
17
21
  ```
18
22
 
@@ -20,17 +24,16 @@ Make sure the following requirements are all fulfilled:
20
24
 
21
25
  - keep the value of the "mode" property at "translate"
22
26
  - set the text in the new prompt presented above to the "prompt" property
23
- - translate the new prompt text to the language specified in the "target_lang" and set the translation to the "response" property
24
- - insert the new prompt text and the newly created "response" after all the existing items in the "messages"
27
+ - translate the new prompt text to the language specified in the "target_lang" set it to "response"
28
+ and set the translation to the "response" property
25
29
  - update the value of "tokens" with the number of tokens of the resulting JSON object"
26
30
 
27
31
  Make sure the following formal requirements are all fulfilled:
28
32
 
29
33
  - do not use invalid characters in the JSON object
30
34
  - escape double quotes and other special characters in the text values in the resulting JSON object
31
- - increment the value of "turns" by 1 and update the property so that the value of "turns" equals the number of the items in the "messages" of the resulting JSON object
35
+ - increment the value of "turns" by 1
32
36
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
33
- - wrap the JSON object with "<JSON>\n" and "\n</JSON>" (IMPORTANT)
34
37
 
35
38
  Add "\n\n###\n\n" at the end of the "response" value.
36
39
 
@@ -25,54 +25,54 @@ class Translate < MonadicApp
25
25
  "{{TARGET_LANG}}" => "Enter target language"
26
26
  }
27
27
  method = OpenAI.model_to_method(params["model"])
28
- template = case method
29
- when "completions"
30
- TEMPLATES["research/translate"]
31
- when "chat/completions"
32
- TEMPLATES["normal/translate"]
33
- end
34
- super(params,
35
- template,
36
- replacements,
37
- "messages",
38
- "response",
39
- proc do |res|
28
+ case method
29
+ when "completions"
30
+ tjson = TEMPLATES["normal/translate"]
31
+ tmarkdown = TEMPLATES["research/translate"]
32
+ when "chat/completions"
33
+ tjson = TEMPLATES["normal/translate"]
34
+ tmarkdown = nil
35
+ end
36
+ super(params: params,
37
+ tjson: tjson,
38
+ tmarkdown: tmarkdown,
39
+ placeholders: replacements,
40
+ prop_accumulator: "messages",
41
+ prop_newdata: "response",
42
+ update_proc: proc do
40
43
  case method
41
44
  when "completions"
42
- # obj = objectify
43
45
  ############################################################
44
- # Research mode recuder defined here #
45
- # obj: old Hash object (uncomment a line above before use) #
46
- # res: new response Hash object to be modified #
46
+ # Research mode reduder defined here #
47
+ # @messages: messages to this point #
48
+ # @metadata: currently available metdata sent from GPT #
47
49
  ############################################################
50
+
48
51
  conditions = [
49
- res["messages"].size > 1,
50
- res["tokens"].to_i > params["max_tokens"].to_i / 2
52
+ @messages.size > 1,
53
+ @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
51
54
  ]
52
- if conditions.all?
53
- res["messages"].shift(1)
54
- res["turns"] = res["turns"].to_i - 1
55
- end
56
- res
55
+
56
+ @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
57
+
57
58
  when "chat/completions"
58
- # obj = objectify
59
59
  ############################################################
60
60
  # Normal mode recuder defined here #
61
- # obj: old Hash object (uncomment a line above before use) #
62
- # res: new response Hash object to be modified #
61
+ # @messages: messages to this point #
63
62
  ############################################################
63
+
64
64
  conditions = [
65
- res.size > @num_retained_turns * 2 + 1
65
+ @messages.size > @num_retained_turns * 2 + 1
66
66
  ]
67
+
67
68
  if conditions.all?
68
- res.each_with_index do |ele, i|
69
+ @messages.each_with_index do |ele, i|
69
70
  if ele["role"] != "system"
70
- res.delete_at i
71
+ @messages.delete_at i
71
72
  break
72
73
  end
73
74
  end
74
75
  end
75
- res
76
76
  end
77
77
  end
78
78
  )
data/bin/monadic-chat CHANGED
@@ -57,6 +57,7 @@ module MonadicMenu
57
57
  openai_completion ||= MonadicChat.authenticate
58
58
  exit unless openai_completion
59
59
 
60
+ max_app_name_width = APPS.reduce(8) { |accum, app| app.length > accum ? app.length : accum } + 2
60
61
  parameter = PROMPT_SYSTEM.select(" Current mode: #{print_mode.call(mode)}\n\nSelect item:",
61
62
  per_page: 10,
62
63
  cycle: true,
@@ -67,18 +68,18 @@ module MonadicMenu
67
68
  next unless TEMPLATES["#{mode}/#{app}"]
68
69
 
69
70
  desc = eval("#{app.capitalize}::DESC", binding, __FILE__, __LINE__)
70
- menu.choice "#{BULLET} #{PASTEL.bold(app.capitalize)} #{desc}", app
71
+ menu.choice "#{BULLET} #{PASTEL.bold(app.capitalize.ljust(max_app_name_width))} #{desc}", app
71
72
  end
72
73
 
73
74
  case mode
74
75
  when "research"
75
- menu.choice "#{BULLET} #{PASTEL.bold("Mode")} Switch from #{PASTEL.bold.red("Research")} (current) to #{PASTEL.bold.green("Normal")}", "mode"
76
+ menu.choice "#{BULLET} #{PASTEL.bold("Mode".ljust(max_app_name_width))} Switch from #{PASTEL.bold.red("Research")} (current) to #{PASTEL.bold.green("Normal")}", "mode"
76
77
  when "normal"
77
- menu.choice "#{BULLET} #{PASTEL.bold("Mode")} Switch from #{PASTEL.bold.green("Normal")} (current) to #{PASTEL.bold.red("Research")}", "mode"
78
+ menu.choice "#{BULLET} #{PASTEL.bold("Mode".ljust(max_app_name_width))} Switch from #{PASTEL.bold.green("Normal")} (current) to #{PASTEL.bold.red("Research")}", "mode"
78
79
  end
79
80
 
80
- menu.choice "#{BULLET} #{PASTEL.bold("Readme")} Open Readme/Documentation", "readme"
81
- menu.choice "#{BULLET} #{PASTEL.bold("Quit")} Quit/Exit/Bye", "exit"
81
+ menu.choice "#{BULLET} #{PASTEL.bold("Readme".ljust(max_app_name_width))} Open Readme/Documentation", "readme"
82
+ menu.choice "#{BULLET} #{PASTEL.bold("Quit".ljust(max_app_name_width))} Quit/Exit/Bye", "exit"
82
83
  end
83
84
 
84
85
  begin
Binary file
Binary file
data/lib/monadic_app.rb CHANGED
@@ -12,26 +12,34 @@ Thread.abort_on_exception = false
12
12
 
13
13
  class MonadicApp
14
14
  include MonadicChat
15
- attr_reader :template
15
+ attr_reader :template, :messages
16
16
 
17
- def initialize(params, template, placeholders, prop_accumulated, prop_newdata, update_proc)
17
+ def initialize(params:, tjson:, tmarkdown:, placeholders:, prop_accumulator:, prop_newdata:, update_proc:)
18
18
  @threads = Thread::Queue.new
19
19
  @responses = Thread::Queue.new
20
20
  @placeholders = placeholders
21
- @prop_accumulated = prop_accumulated
21
+ @prop_accumulator = prop_accumulator
22
22
  @prop_newdata = prop_newdata
23
23
  @completion = nil
24
24
  @update_proc = update_proc
25
- @params_original = params
26
- @params = @params_original.dup
27
- @template_original = File.read(template)
25
+ @params_initial = params
26
+ @params = @params_initial.dup
27
+ @html = false
28
+
28
29
  @method = OpenAI.model_to_method @params["model"]
29
30
 
31
+ @metadata = {}
32
+
33
+ @messages_initial = JSON.parse(File.read(tjson))["messages"]
34
+ @messages = @messages_initial.dup
35
+
30
36
  case @method
31
37
  when "completions"
32
- @template = @template_original.dup
38
+ @template_initial = File.read(tmarkdown)
39
+ @template = @template_initial.dup
33
40
  when "chat/completions"
34
- @template = JSON.parse @template_original
41
+ @template_initial = ""
42
+ @template = ""
35
43
  end
36
44
  end
37
45
 
@@ -54,7 +62,8 @@ class MonadicApp
54
62
  when /\A\s*(?:data|context)\s*\z/i
55
63
  show_data
56
64
  when /\A\s*(?:html)\s*\z/i
57
- set_html
65
+ @html = true
66
+ show_html
58
67
  when /\A\s*(?:save)\s*\z/i
59
68
  save_data
60
69
  when /\A\s*(?:load)\s*\z/i