monadic-chat 0.2.2 → 0.3.1

Sign up to get free protection for your applications and to get access to all the features.
data/apps/code/code.rb CHANGED
@@ -8,36 +8,31 @@ class Code < MonadicApp
8
8
 
9
9
  attr_accessor :template, :config, :params, :completion
10
10
 
11
- def initialize(openai_completion, research_mode: false, stream: true)
11
+ def initialize(openai_completion, research_mode: false, stream: true, params: {})
12
12
  @num_retained_turns = 10
13
13
  params = {
14
14
  "temperature" => 0.0,
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.0,
17
17
  "frequency_penalty" => 0.0,
18
- "model" => OpenAI.model_name(research_mode: research_mode),
18
+ "model" => openai_completion.model_name(research_mode: research_mode),
19
19
  "max_tokens" => 2000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
- }
23
- method = OpenAI.model_to_method(params["model"])
24
- case method
25
- when RESEARCH_MODE
26
- tjson = TEMPLATES["normal/code"]
27
- tmarkdown = TEMPLATES["research/code"]
28
- when NORMAL_MODE
29
- tjson = TEMPLATES["normal/code"]
30
- tmarkdown = nil
31
- end
32
- super(params: params,
33
- tjson: tjson,
34
- tmarkdown: tmarkdown,
22
+ }.merge(params)
23
+ mode = research_mode ? :research : :normal
24
+ template_json = TEMPLATES["normal/code"]
25
+ template_md = TEMPLATES["research/code"]
26
+ super(mode: mode,
27
+ params: params,
28
+ template_json: template_json,
29
+ template_md: template_md,
35
30
  placeholders: {},
36
31
  prop_accumulator: "messages",
37
32
  prop_newdata: "response",
38
33
  update_proc: proc do
39
- case method
40
- when RESEARCH_MODE
34
+ case mode
35
+ when :research
41
36
  ############################################################
42
37
  # Research mode reduder defined here #
43
38
  # @messages: messages to this point #
@@ -51,7 +46,7 @@ class Code < MonadicApp
51
46
 
52
47
  @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
48
 
54
- when NORMAL_MODE
49
+ when :normal
55
50
  ############################################################
56
51
  # Normal mode recuder defined here #
57
52
  # @messages: messages to this point #
@@ -1,6 +1,6 @@
1
1
  {"messages": [
2
2
  {"role": "system",
3
- "content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user below and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. The sentence must always be parsed if the user's input sentence is enclosed in double quotes. Let the user know if parsing the sentence is difficult or the input must be enclosed in double quotes."
3
+ "content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. Let the user know if parsing the given sentence is impossible."
4
4
  },
5
5
  {"role": "user",
6
6
  "content": "\"We saw a beautiful sunset.\""
@@ -14,7 +14,7 @@ JSON:
14
14
  ```json
15
15
  {
16
16
  "prompt": "\"We didn't have a camera.\"",
17
- "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`\n\n###\n\n",
17
+ "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`",
18
18
  "mode": "linguistic",
19
19
  "turns": 3,
20
20
  "sentence_type": ["declarative"],
@@ -41,6 +41,4 @@ Make sure the following formal requirements are all fulfilled:
41
41
  - escape double quotes and other special characters in the text values in the resulting JSON object
42
42
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
43
43
 
44
- Add "\n\n###\n\n" at the end of the "response" value.
45
-
46
- Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
44
+ Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
@@ -8,36 +8,31 @@ class Linguistic < MonadicApp
8
8
 
9
9
  attr_accessor :template, :config, :params, :completion
10
10
 
11
- def initialize(openai_completion, research_mode: false, stream: true)
11
+ def initialize(openai_completion, research_mode: false, stream: true, params: {})
12
12
  @num_retained_turns = 10
13
13
  params = {
14
14
  "temperature" => 0.0,
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.0,
17
17
  "frequency_penalty" => 0.0,
18
- "model" => OpenAI.model_name(research_mode: research_mode),
18
+ "model" => openai_completion.model_name(research_mode: research_mode),
19
19
  "max_tokens" => 2000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
- }
23
- method = OpenAI.model_to_method(params["model"])
24
- case method
25
- when RESEARCH_MODE
26
- tjson = TEMPLATES["normal/linguistic"]
27
- tmarkdown = TEMPLATES["research/linguistic"]
28
- when NORMAL_MODE
29
- tjson = TEMPLATES["normal/linguistic"]
30
- tmarkdown = nil
31
- end
32
- super(params: params,
33
- tjson: tjson,
34
- tmarkdown: tmarkdown,
22
+ }.merge(params)
23
+ mode = research_mode ? :research : :normal
24
+ template_json = TEMPLATES["normal/linguistic"]
25
+ template_md = TEMPLATES["research/linguistic"]
26
+ super(mode: mode,
27
+ params: params,
28
+ template_json: template_json,
29
+ template_md: template_md,
35
30
  placeholders: {},
36
31
  prop_accumulator: "messages",
37
32
  prop_newdata: "response",
38
33
  update_proc: proc do
39
- case method
40
- when RESEARCH_MODE
34
+ case mode
35
+ when :research
41
36
  ############################################################
42
37
  # Research mode reduder defined here #
43
38
  # @messages: messages to this point #
@@ -51,7 +46,7 @@ class Linguistic < MonadicApp
51
46
 
52
47
  @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
48
 
54
- when NORMAL_MODE
49
+ when :normal
55
50
  ############################################################
56
51
  # Normal mode recuder defined here #
57
52
  # @messages: messages to this point #
data/apps/novel/novel.md CHANGED
@@ -12,7 +12,7 @@ JSON:
12
12
  ```json
13
13
  {
14
14
  "prompt": "The preface to the novel is presented",
15
- "response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.\n\n###\n\n",
15
+ "response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.",
16
16
  "mode": "novel",
17
17
  "turns": 1,
18
18
  "tokens": 147
@@ -35,6 +35,4 @@ Make sure the following formal requirements are all fulfilled:
35
35
  - increment the value of "turns" by 1
36
36
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
37
37
 
38
- Add "\n\n###\n\n" at the end of the "response" value.
39
-
40
- Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
38
+ Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
data/apps/novel/novel.rb CHANGED
@@ -8,36 +8,31 @@ class Novel < MonadicApp
8
8
 
9
9
  attr_accessor :template, :config, :params, :completion
10
10
 
11
- def initialize(openai_completion, research_mode: false, stream: true)
11
+ def initialize(openai_completion, research_mode: false, stream: true, params: {})
12
12
  @num_retained_turns = 10
13
13
  params = {
14
14
  "temperature" => 0.3,
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.1,
17
17
  "frequency_penalty" => 0.1,
18
- "model" => OpenAI.model_name(research_mode: research_mode),
18
+ "model" => openai_completion.model_name(research_mode: research_mode),
19
19
  "max_tokens" => 2000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
- }
23
- method = OpenAI.model_to_method(params["model"])
24
- case method
25
- when RESEARCH_MODE
26
- tjson = TEMPLATES["normal/novel"]
27
- tmarkdown = TEMPLATES["research/novel"]
28
- when NORMAL_MODE
29
- tjson = TEMPLATES["normal/novel"]
30
- tmarkdown = nil
31
- end
32
- super(params: params,
33
- tjson: tjson,
34
- tmarkdown: tmarkdown,
22
+ }.merge(params)
23
+ mode = research_mode ? :research : :normal
24
+ template_json = TEMPLATES["normal/novel"]
25
+ template_md = TEMPLATES["research/novel"]
26
+ super(mode: research_mode ? :research : :normal,
27
+ params: params,
28
+ template_json: template_json,
29
+ template_md: template_md,
35
30
  placeholders: {},
36
31
  prop_accumulator: "messages",
37
32
  prop_newdata: "response",
38
33
  update_proc: proc do
39
- case method
40
- when RESEARCH_MODE
34
+ case mode
35
+ when :research
41
36
  ############################################################
42
37
  # Research mode reduder defined here #
43
38
  # @messages: messages to this point #
@@ -51,7 +46,7 @@ class Novel < MonadicApp
51
46
 
52
47
  @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
53
48
 
54
- when NORMAL_MODE
49
+ when :normal
55
50
  ############################################################
56
51
  # Normal mode recuder defined here #
57
52
  # @messages: messages to this point #
@@ -14,7 +14,7 @@ JSON:
14
14
  "mode": "translate",
15
15
  "turns": 0,
16
16
  "prompt": "これは日本語(Japanese)の文(sentence)です。",
17
- "response": "This is a sentence in Japanese.\n\n###\n\n",
17
+ "response": "This is a sentence in Japanese.",
18
18
  "target_lang": "English",
19
19
  "tokens": 194
20
20
  }
@@ -35,6 +35,4 @@ Make sure the following formal requirements are all fulfilled:
35
35
  - increment the value of "turns" by 1
36
36
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
37
37
 
38
- Add "\n\n###\n\n" at the end of the "response" value.
39
-
40
- Wrap the JSON object with "<JSON>\n" and "\n</JSON>".
38
+ Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
@@ -8,40 +8,35 @@ class Translate < MonadicApp
8
8
 
9
9
  attr_accessor :template, :config, :params, :completion
10
10
 
11
- def initialize(openai_completion, replacements: nil, research_mode: false, stream: true)
11
+ def initialize(openai_completion, replacements: nil, research_mode: false, stream: true, params: {})
12
12
  @num_retained_turns = 10
13
13
  params = {
14
14
  "temperature" => 0.2,
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.0,
17
17
  "frequency_penalty" => 0.0,
18
- "model" => OpenAI.model_name(research_mode: research_mode),
18
+ "model" => openai_completion.model_name(research_mode: research_mode),
19
19
  "max_tokens" => 2000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
- }
22
+ }.merge(params)
23
23
  replacements ||= {
24
24
  "mode" => :interactive,
25
25
  "{{TARGET_LANG}}" => "Enter target language"
26
26
  }
27
- method = OpenAI.model_to_method(params["model"])
28
- case method
29
- when RESEARCH_MODE
30
- tjson = TEMPLATES["normal/translate"]
31
- tmarkdown = TEMPLATES["research/translate"]
32
- when NORMAL_MODE
33
- tjson = TEMPLATES["normal/translate"]
34
- tmarkdown = nil
35
- end
36
- super(params: params,
37
- tjson: tjson,
38
- tmarkdown: tmarkdown,
27
+ mode = research_mode ? :research : :normal
28
+ template_json = TEMPLATES["normal/translate"]
29
+ template_md = TEMPLATES["research/translate"]
30
+ super(mode: research_mode ? :research : :normal,
31
+ params: params,
32
+ template_json: template_json,
33
+ template_md: template_md,
39
34
  placeholders: replacements,
40
35
  prop_accumulator: "messages",
41
36
  prop_newdata: "response",
42
37
  update_proc: proc do
43
- case method
44
- when RESEARCH_MODE
38
+ case mode
39
+ when :research
45
40
  ############################################################
46
41
  # Research mode reduder defined here #
47
42
  # @messages: messages to this point #
@@ -55,7 +50,7 @@ class Translate < MonadicApp
55
50
 
56
51
  @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
57
52
 
58
- when NORMAL_MODE
53
+ when :normal
59
54
  ############################################################
60
55
  # Normal mode recuder defined here #
61
56
  # @messages: messages to this point #
data/lib/monadic_app.rb CHANGED
@@ -8,15 +8,12 @@ require_relative "./monadic_chat/menu"
8
8
  require_relative "./monadic_chat/parameters"
9
9
  require_relative "./monadic_chat/internals"
10
10
 
11
- Thread.abort_on_exception = false
12
-
13
11
  class MonadicApp
14
12
  include MonadicChat
15
13
  attr_reader :template, :messages
16
14
 
17
- def initialize(params:, tjson:, tmarkdown:, placeholders:, prop_accumulator:, prop_newdata:, update_proc:)
18
- @threads = Thread::Queue.new
19
- @responses = Thread::Queue.new
15
+ def initialize(mode:, params:, template_json:, template_md:, placeholders:, prop_accumulator:, prop_newdata:, update_proc:)
16
+ @mode = mode.to_sym
20
17
  @placeholders = placeholders
21
18
  @prop_accumulator = prop_accumulator
22
19
  @prop_newdata = prop_newdata
@@ -26,21 +23,14 @@ class MonadicApp
26
23
  @params = @params_initial.dup
27
24
  @html = false
28
25
 
29
- @method = OpenAI.model_to_method @params["model"]
26
+ @method = OpenAI.model_to_method(@params["model"])
30
27
 
31
28
  @metadata = {}
32
-
33
- @messages_initial = JSON.parse(File.read(tjson))["messages"]
29
+ @messages_initial = JSON.parse(File.read(template_json))["messages"]
34
30
  @messages = @messages_initial.dup
35
31
 
36
- case @method
37
- when RESEARCH_MODE
38
- @template_initial = File.read(tmarkdown)
39
- @template = @template_initial.dup
40
- when NORMAL_MODE
41
- @template_initial = ""
42
- @template = ""
43
- end
32
+ @template_initial = File.read(template_md)
33
+ @template = @template_initial.dup
44
34
  end
45
35
 
46
36
  ##################################################
@@ -75,12 +65,7 @@ class MonadicApp
75
65
  else
76
66
  if input && confirm_query(input)
77
67
  begin
78
- case @method
79
- when RESEARCH_MODE
80
- bind_research_mode(input, num_retry: NUM_RETRY)
81
- when NORMAL_MODE
82
- bind_normal_mode(input, num_retry: NUM_RETRY)
83
- end
68
+ bind(input, num_retry: NUM_RETRY)
84
69
  rescue StandardError => e
85
70
  input = ask_retrial(input, e.message)
86
71
  next
@@ -17,7 +17,7 @@ class MonadicApp
17
17
  end
18
18
 
19
19
  @messages.each do |m|
20
- accumulator << "#{m["role"].capitalize}: #{m["content"]}".sub("\n\n###\n\n", "")
20
+ accumulator << "#{m["role"].capitalize}: #{m["content"]}"
21
21
  end
22
22
 
23
23
  h1 = "# Monadic :: Chat / #{self.class.name}"
@@ -32,8 +32,6 @@ class MonadicApp
32
32
  def show_data
33
33
  print PROMPT_SYSTEM.prefix
34
34
 
35
- wait
36
-
37
35
  res = format_data
38
36
  print "\n#{TTY::Markdown.parse(res, indent: 0)}"
39
37
  end
@@ -50,7 +48,6 @@ class MonadicApp
50
48
  end
51
49
 
52
50
  def show_html
53
- wait
54
51
  set_html
55
52
  print PROMPT_SYSTEM.prefix
56
53
  print "HTML is ready\n"
@@ -6,25 +6,25 @@ class MonadicApp
6
6
  ##################################################
7
7
 
8
8
  def user_input(text = "")
9
- if count_lines_below < 1
10
- ask_clear
11
- user_input
12
- else
13
- res = PROMPT_USER.readline(text)
14
- print TTY::Cursor.clear_line_after
15
- res == "" ? nil : res
16
- end
9
+ # if count_lines_below < 1
10
+ # ask_clear
11
+ # user_input
12
+ # else
13
+ res = PROMPT_USER.readline(text)
14
+ print TTY::Cursor.clear_line_after
15
+ res == "" ? nil : res
16
+ # end
17
17
  end
18
18
 
19
19
  def show_greet
20
- current_mode = case @method
21
- when RESEARCH_MODE
20
+ current_mode = case @mode
21
+ when :research
22
22
  PASTEL.red("Research")
23
- when NORMAL_MODE
23
+ when :normal
24
24
  PASTEL.green("Normal")
25
25
  end
26
26
  greet_md = <<~GREET
27
- - You are currently in **#{current_mode}** mode
27
+ - You are currently in **#{current_mode}** mode (#{@params["model"]})
28
28
  - Type **help** or **menu** to see available commands
29
29
  GREET
30
30
  print PROMPT_SYSTEM.prefix