monadic-chat 0.2.1 → 0.3.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +1 -1
- data/Gemfile.lock +4 -1
- data/README.md +91 -115
- data/apps/chat/chat.json +5 -1
- data/apps/chat/chat.md +4 -5
- data/apps/chat/chat.rb +13 -18
- data/apps/code/code.md +2 -4
- data/apps/code/code.rb +13 -18
- data/apps/linguistic/linguistic.json +1 -1
- data/apps/linguistic/linguistic.md +2 -4
- data/apps/linguistic/linguistic.rb +13 -18
- data/apps/novel/novel.md +2 -4
- data/apps/novel/novel.rb +13 -18
- data/apps/translate/translate.md +2 -4
- data/apps/translate/translate.rb +13 -18
- data/lib/monadic_app.rb +7 -22
- data/lib/monadic_chat/formatting.rb +1 -4
- data/lib/monadic_chat/interaction.rb +12 -12
- data/lib/monadic_chat/internals.rb +35 -181
- data/lib/monadic_chat/menu.rb +8 -8
- data/lib/monadic_chat/open_ai.rb +29 -18
- data/lib/monadic_chat/parameters.rb +12 -11
- data/lib/monadic_chat/version.rb +1 -1
- data/lib/monadic_chat.rb +43 -13
- data/monadic_chat.gemspec +1 -0
- metadata +16 -4
- data/doc/img/extra-template-json.png +0 -0
- data/doc/img/langacker-2001.svg +0 -41
data/apps/code/code.rb
CHANGED
@@ -8,36 +8,31 @@ class Code < MonadicApp
|
|
8
8
|
|
9
9
|
attr_accessor :template, :config, :params, :completion
|
10
10
|
|
11
|
-
def initialize(openai_completion, research_mode: false, stream: true)
|
11
|
+
def initialize(openai_completion, research_mode: false, stream: true, params: {})
|
12
12
|
@num_retained_turns = 10
|
13
13
|
params = {
|
14
14
|
"temperature" => 0.0,
|
15
15
|
"top_p" => 1.0,
|
16
16
|
"presence_penalty" => 0.0,
|
17
17
|
"frequency_penalty" => 0.0,
|
18
|
-
"model" =>
|
18
|
+
"model" => openai_completion.model_name(research_mode: research_mode),
|
19
19
|
"max_tokens" => 2000,
|
20
20
|
"stream" => stream,
|
21
21
|
"stop" => nil
|
22
|
-
}
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
tmarkdown = nil
|
31
|
-
end
|
32
|
-
super(params: params,
|
33
|
-
tjson: tjson,
|
34
|
-
tmarkdown: tmarkdown,
|
22
|
+
}.merge(params)
|
23
|
+
mode = research_mode ? :research : :normal
|
24
|
+
template_json = TEMPLATES["normal/code"]
|
25
|
+
template_md = TEMPLATES["research/code"]
|
26
|
+
super(mode: mode,
|
27
|
+
params: params,
|
28
|
+
template_json: template_json,
|
29
|
+
template_md: template_md,
|
35
30
|
placeholders: {},
|
36
31
|
prop_accumulator: "messages",
|
37
32
|
prop_newdata: "response",
|
38
33
|
update_proc: proc do
|
39
|
-
case
|
40
|
-
when
|
34
|
+
case mode
|
35
|
+
when :research
|
41
36
|
############################################################
|
42
37
|
# Research mode reduder defined here #
|
43
38
|
# @messages: messages to this point #
|
@@ -51,7 +46,7 @@ class Code < MonadicApp
|
|
51
46
|
|
52
47
|
@metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
|
53
48
|
|
54
|
-
when
|
49
|
+
when :normal
|
55
50
|
############################################################
|
56
51
|
# Normal mode recuder defined here #
|
57
52
|
# @messages: messages to this point #
|
@@ -1,6 +1,6 @@
|
|
1
1
|
{"messages": [
|
2
2
|
{"role": "system",
|
3
|
-
"content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user
|
3
|
+
"content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. Let the user know if parsing the given sentence is impossible."
|
4
4
|
},
|
5
5
|
{"role": "user",
|
6
6
|
"content": "\"We saw a beautiful sunset.\""
|
@@ -14,7 +14,7 @@ JSON:
|
|
14
14
|
```json
|
15
15
|
{
|
16
16
|
"prompt": "\"We didn't have a camera.\"",
|
17
|
-
"response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]
|
17
|
+
"response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`",
|
18
18
|
"mode": "linguistic",
|
19
19
|
"turns": 3,
|
20
20
|
"sentence_type": ["declarative"],
|
@@ -41,6 +41,4 @@ Make sure the following formal requirements are all fulfilled:
|
|
41
41
|
- escape double quotes and other special characters in the text values in the resulting JSON object
|
42
42
|
- check the validity of the generated JSON object and correct any possible parsing problems before returning it
|
43
43
|
|
44
|
-
|
45
|
-
|
46
|
-
Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
|
44
|
+
Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
|
@@ -8,36 +8,31 @@ class Linguistic < MonadicApp
|
|
8
8
|
|
9
9
|
attr_accessor :template, :config, :params, :completion
|
10
10
|
|
11
|
-
def initialize(openai_completion, research_mode: false, stream: true)
|
11
|
+
def initialize(openai_completion, research_mode: false, stream: true, params: {})
|
12
12
|
@num_retained_turns = 10
|
13
13
|
params = {
|
14
14
|
"temperature" => 0.0,
|
15
15
|
"top_p" => 1.0,
|
16
16
|
"presence_penalty" => 0.0,
|
17
17
|
"frequency_penalty" => 0.0,
|
18
|
-
"model" =>
|
18
|
+
"model" => openai_completion.model_name(research_mode: research_mode),
|
19
19
|
"max_tokens" => 2000,
|
20
20
|
"stream" => stream,
|
21
21
|
"stop" => nil
|
22
|
-
}
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
tmarkdown = nil
|
31
|
-
end
|
32
|
-
super(params: params,
|
33
|
-
tjson: tjson,
|
34
|
-
tmarkdown: tmarkdown,
|
22
|
+
}.merge(params)
|
23
|
+
mode = research_mode ? :research : :normal
|
24
|
+
template_json = TEMPLATES["normal/linguistic"]
|
25
|
+
template_md = TEMPLATES["research/linguistic"]
|
26
|
+
super(mode: mode,
|
27
|
+
params: params,
|
28
|
+
template_json: template_json,
|
29
|
+
template_md: template_md,
|
35
30
|
placeholders: {},
|
36
31
|
prop_accumulator: "messages",
|
37
32
|
prop_newdata: "response",
|
38
33
|
update_proc: proc do
|
39
|
-
case
|
40
|
-
when
|
34
|
+
case mode
|
35
|
+
when :research
|
41
36
|
############################################################
|
42
37
|
# Research mode reduder defined here #
|
43
38
|
# @messages: messages to this point #
|
@@ -51,7 +46,7 @@ class Linguistic < MonadicApp
|
|
51
46
|
|
52
47
|
@metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
|
53
48
|
|
54
|
-
when
|
49
|
+
when :normal
|
55
50
|
############################################################
|
56
51
|
# Normal mode recuder defined here #
|
57
52
|
# @messages: messages to this point #
|
data/apps/novel/novel.md
CHANGED
@@ -12,7 +12,7 @@ JSON:
|
|
12
12
|
```json
|
13
13
|
{
|
14
14
|
"prompt": "The preface to the novel is presented",
|
15
|
-
"response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel
|
15
|
+
"response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.",
|
16
16
|
"mode": "novel",
|
17
17
|
"turns": 1,
|
18
18
|
"tokens": 147
|
@@ -35,6 +35,4 @@ Make sure the following formal requirements are all fulfilled:
|
|
35
35
|
- increment the value of "turns" by 1
|
36
36
|
- check the validity of the generated JSON object and correct any possible parsing problems before returning it
|
37
37
|
|
38
|
-
|
39
|
-
|
40
|
-
Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
|
38
|
+
Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
|
data/apps/novel/novel.rb
CHANGED
@@ -8,36 +8,31 @@ class Novel < MonadicApp
|
|
8
8
|
|
9
9
|
attr_accessor :template, :config, :params, :completion
|
10
10
|
|
11
|
-
def initialize(openai_completion, research_mode: false, stream: true)
|
11
|
+
def initialize(openai_completion, research_mode: false, stream: true, params: {})
|
12
12
|
@num_retained_turns = 10
|
13
13
|
params = {
|
14
14
|
"temperature" => 0.3,
|
15
15
|
"top_p" => 1.0,
|
16
16
|
"presence_penalty" => 0.1,
|
17
17
|
"frequency_penalty" => 0.1,
|
18
|
-
"model" =>
|
18
|
+
"model" => openai_completion.model_name(research_mode: research_mode),
|
19
19
|
"max_tokens" => 2000,
|
20
20
|
"stream" => stream,
|
21
21
|
"stop" => nil
|
22
|
-
}
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
tmarkdown = nil
|
31
|
-
end
|
32
|
-
super(params: params,
|
33
|
-
tjson: tjson,
|
34
|
-
tmarkdown: tmarkdown,
|
22
|
+
}.merge(params)
|
23
|
+
mode = research_mode ? :research : :normal
|
24
|
+
template_json = TEMPLATES["normal/novel"]
|
25
|
+
template_md = TEMPLATES["research/novel"]
|
26
|
+
super(mode: research_mode ? :research : :normal,
|
27
|
+
params: params,
|
28
|
+
template_json: template_json,
|
29
|
+
template_md: template_md,
|
35
30
|
placeholders: {},
|
36
31
|
prop_accumulator: "messages",
|
37
32
|
prop_newdata: "response",
|
38
33
|
update_proc: proc do
|
39
|
-
case
|
40
|
-
when
|
34
|
+
case mode
|
35
|
+
when :research
|
41
36
|
############################################################
|
42
37
|
# Research mode reduder defined here #
|
43
38
|
# @messages: messages to this point #
|
@@ -51,7 +46,7 @@ class Novel < MonadicApp
|
|
51
46
|
|
52
47
|
@metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
|
53
48
|
|
54
|
-
when
|
49
|
+
when :normal
|
55
50
|
############################################################
|
56
51
|
# Normal mode recuder defined here #
|
57
52
|
# @messages: messages to this point #
|
data/apps/translate/translate.md
CHANGED
@@ -14,7 +14,7 @@ JSON:
|
|
14
14
|
"mode": "translate",
|
15
15
|
"turns": 0,
|
16
16
|
"prompt": "これは日本語(Japanese)の文(sentence)です。",
|
17
|
-
"response": "This is a sentence in Japanese
|
17
|
+
"response": "This is a sentence in Japanese.",
|
18
18
|
"target_lang": "English",
|
19
19
|
"tokens": 194
|
20
20
|
}
|
@@ -35,6 +35,4 @@ Make sure the following formal requirements are all fulfilled:
|
|
35
35
|
- increment the value of "turns" by 1
|
36
36
|
- check the validity of the generated JSON object and correct any possible parsing problems before returning it
|
37
37
|
|
38
|
-
|
39
|
-
|
40
|
-
Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
|
38
|
+
Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
|
data/apps/translate/translate.rb
CHANGED
@@ -8,40 +8,35 @@ class Translate < MonadicApp
|
|
8
8
|
|
9
9
|
attr_accessor :template, :config, :params, :completion
|
10
10
|
|
11
|
-
def initialize(openai_completion, replacements: nil, research_mode: false, stream: true)
|
11
|
+
def initialize(openai_completion, replacements: nil, research_mode: false, stream: true, params: {})
|
12
12
|
@num_retained_turns = 10
|
13
13
|
params = {
|
14
14
|
"temperature" => 0.2,
|
15
15
|
"top_p" => 1.0,
|
16
16
|
"presence_penalty" => 0.0,
|
17
17
|
"frequency_penalty" => 0.0,
|
18
|
-
"model" =>
|
18
|
+
"model" => openai_completion.model_name(research_mode: research_mode),
|
19
19
|
"max_tokens" => 2000,
|
20
20
|
"stream" => stream,
|
21
21
|
"stop" => nil
|
22
|
-
}
|
22
|
+
}.merge(params)
|
23
23
|
replacements ||= {
|
24
24
|
"mode" => :interactive,
|
25
25
|
"{{TARGET_LANG}}" => "Enter target language"
|
26
26
|
}
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
tmarkdown = nil
|
35
|
-
end
|
36
|
-
super(params: params,
|
37
|
-
tjson: tjson,
|
38
|
-
tmarkdown: tmarkdown,
|
27
|
+
mode = research_mode ? :research : :normal
|
28
|
+
template_json = TEMPLATES["normal/translate"]
|
29
|
+
template_md = TEMPLATES["research/translate"]
|
30
|
+
super(mode: research_mode ? :research : :normal,
|
31
|
+
params: params,
|
32
|
+
template_json: template_json,
|
33
|
+
template_md: template_md,
|
39
34
|
placeholders: replacements,
|
40
35
|
prop_accumulator: "messages",
|
41
36
|
prop_newdata: "response",
|
42
37
|
update_proc: proc do
|
43
|
-
case
|
44
|
-
when
|
38
|
+
case mode
|
39
|
+
when :research
|
45
40
|
############################################################
|
46
41
|
# Research mode reduder defined here #
|
47
42
|
# @messages: messages to this point #
|
@@ -55,7 +50,7 @@ class Translate < MonadicApp
|
|
55
50
|
|
56
51
|
@metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
|
57
52
|
|
58
|
-
when
|
53
|
+
when :normal
|
59
54
|
############################################################
|
60
55
|
# Normal mode recuder defined here #
|
61
56
|
# @messages: messages to this point #
|
data/lib/monadic_app.rb
CHANGED
@@ -8,15 +8,12 @@ require_relative "./monadic_chat/menu"
|
|
8
8
|
require_relative "./monadic_chat/parameters"
|
9
9
|
require_relative "./monadic_chat/internals"
|
10
10
|
|
11
|
-
Thread.abort_on_exception = false
|
12
|
-
|
13
11
|
class MonadicApp
|
14
12
|
include MonadicChat
|
15
13
|
attr_reader :template, :messages
|
16
14
|
|
17
|
-
def initialize(params:,
|
18
|
-
@
|
19
|
-
@responses = Thread::Queue.new
|
15
|
+
def initialize(mode:, params:, template_json:, template_md:, placeholders:, prop_accumulator:, prop_newdata:, update_proc:)
|
16
|
+
@mode = mode.to_sym
|
20
17
|
@placeholders = placeholders
|
21
18
|
@prop_accumulator = prop_accumulator
|
22
19
|
@prop_newdata = prop_newdata
|
@@ -26,21 +23,14 @@ class MonadicApp
|
|
26
23
|
@params = @params_initial.dup
|
27
24
|
@html = false
|
28
25
|
|
29
|
-
@method = OpenAI.model_to_method
|
26
|
+
@method = OpenAI.model_to_method(@params["model"])
|
30
27
|
|
31
28
|
@metadata = {}
|
32
|
-
|
33
|
-
@messages_initial = JSON.parse(File.read(tjson))["messages"]
|
29
|
+
@messages_initial = JSON.parse(File.read(template_json))["messages"]
|
34
30
|
@messages = @messages_initial.dup
|
35
31
|
|
36
|
-
|
37
|
-
|
38
|
-
@template_initial = File.read(tmarkdown)
|
39
|
-
@template = @template_initial.dup
|
40
|
-
when NORMAL_MODE
|
41
|
-
@template_initial = ""
|
42
|
-
@template = ""
|
43
|
-
end
|
32
|
+
@template_initial = File.read(template_md)
|
33
|
+
@template = @template_initial.dup
|
44
34
|
end
|
45
35
|
|
46
36
|
##################################################
|
@@ -75,12 +65,7 @@ class MonadicApp
|
|
75
65
|
else
|
76
66
|
if input && confirm_query(input)
|
77
67
|
begin
|
78
|
-
|
79
|
-
when RESEARCH_MODE
|
80
|
-
bind_research_mode(input, num_retry: NUM_RETRY)
|
81
|
-
when NORMAL_MODE
|
82
|
-
bind_normal_mode(input, num_retry: NUM_RETRY)
|
83
|
-
end
|
68
|
+
bind(input, num_retry: NUM_RETRY)
|
84
69
|
rescue StandardError => e
|
85
70
|
input = ask_retrial(input, e.message)
|
86
71
|
next
|
@@ -17,7 +17,7 @@ class MonadicApp
|
|
17
17
|
end
|
18
18
|
|
19
19
|
@messages.each do |m|
|
20
|
-
accumulator << "#{m["role"].capitalize}: #{m["content"]}"
|
20
|
+
accumulator << "#{m["role"].capitalize}: #{m["content"]}"
|
21
21
|
end
|
22
22
|
|
23
23
|
h1 = "# Monadic :: Chat / #{self.class.name}"
|
@@ -32,8 +32,6 @@ class MonadicApp
|
|
32
32
|
def show_data
|
33
33
|
print PROMPT_SYSTEM.prefix
|
34
34
|
|
35
|
-
wait
|
36
|
-
|
37
35
|
res = format_data
|
38
36
|
print "\n#{TTY::Markdown.parse(res, indent: 0)}"
|
39
37
|
end
|
@@ -50,7 +48,6 @@ class MonadicApp
|
|
50
48
|
end
|
51
49
|
|
52
50
|
def show_html
|
53
|
-
wait
|
54
51
|
set_html
|
55
52
|
print PROMPT_SYSTEM.prefix
|
56
53
|
print "HTML is ready\n"
|
@@ -6,25 +6,25 @@ class MonadicApp
|
|
6
6
|
##################################################
|
7
7
|
|
8
8
|
def user_input(text = "")
|
9
|
-
if count_lines_below < 1
|
10
|
-
|
11
|
-
|
12
|
-
else
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
end
|
9
|
+
# if count_lines_below < 1
|
10
|
+
# ask_clear
|
11
|
+
# user_input
|
12
|
+
# else
|
13
|
+
res = PROMPT_USER.readline(text)
|
14
|
+
print TTY::Cursor.clear_line_after
|
15
|
+
res == "" ? nil : res
|
16
|
+
# end
|
17
17
|
end
|
18
18
|
|
19
19
|
def show_greet
|
20
|
-
current_mode = case @
|
21
|
-
when
|
20
|
+
current_mode = case @mode
|
21
|
+
when :research
|
22
22
|
PASTEL.red("Research")
|
23
|
-
when
|
23
|
+
when :normal
|
24
24
|
PASTEL.green("Normal")
|
25
25
|
end
|
26
26
|
greet_md = <<~GREET
|
27
|
-
- You are currently in **#{current_mode}** mode
|
27
|
+
- You are currently in **#{current_mode}** mode (#{@params["model"]})
|
28
28
|
- Type **help** or **menu** to see available commands
|
29
29
|
GREET
|
30
30
|
print PROMPT_SYSTEM.prefix
|