monadic-chat 0.3.2 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/apps/novel/novel.rb CHANGED
@@ -15,8 +15,8 @@ class Novel < MonadicApp
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.1,
17
17
  "frequency_penalty" => 0.1,
18
- "model" => openai_completion.model_name(research_mode: research_mode),
19
- "max_tokens" => 2000,
18
+ "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"],
19
+ "max_tokens" => 1000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
22
  }.merge(params)
@@ -38,14 +38,23 @@ class Novel < MonadicApp
38
38
  # @messages: messages to this point #
39
39
  # @metadata: currently available metdata sent from GPT #
40
40
  ############################################################
41
-
42
41
  conditions = [
43
42
  @messages.size > 1,
44
- @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
43
+ @messages.size > @num_retained_turns * 2 + 1
45
44
  ]
46
45
 
47
- @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
48
-
46
+ if conditions.all?
47
+ to_delete = []
48
+ new_num_messages = @messages.size
49
+ @messages.each_with_index do |ele, i|
50
+ if ele["role"] != "system"
51
+ to_delete << i
52
+ new_num_messages -= 1
53
+ end
54
+ break if new_num_messages <= @num_retained_turns * 2 + 1
55
+ end
56
+ @messages.delete_if.with_index { |_, i| to_delete.include? i }
57
+ end
49
58
  when :normal
50
59
  ############################################################
51
60
  # Normal mode recuder defined here #
@@ -53,16 +62,21 @@ class Novel < MonadicApp
53
62
  ############################################################
54
63
 
55
64
  conditions = [
65
+ @messages.size > 1,
56
66
  @messages.size > @num_retained_turns * 2 + 1
57
67
  ]
58
68
 
59
69
  if conditions.all?
70
+ to_delete = []
71
+ new_num_messages = @messages.size
60
72
  @messages.each_with_index do |ele, i|
61
73
  if ele["role"] != "system"
62
- @messages.delete_at i
63
- break
74
+ to_delete << i
75
+ new_num_messages -= 1
64
76
  end
77
+ break if new_num_messages <= @num_retained_turns * 2 + 1
65
78
  end
79
+ @messages.delete_if.with_index { |_, i| to_delete.include? i }
66
80
  end
67
81
  end
68
82
  end
@@ -12,27 +12,23 @@ JSON:
12
12
  ```json
13
13
  {
14
14
  "mode": "translate",
15
- "turns": 0,
16
- "prompt": "これは日本語(Japanese)の文(sentence)です。",
17
15
  "response": "This is a sentence in Japanese.",
18
- "target_lang": "English",
19
- "tokens": 194
16
+ "dictioanry": {"日本語": "Japanese", "文": "sentence"},
17
+ "target_lang": "English"
20
18
  }
21
19
  ```
22
20
 
23
21
  Make sure the following requirements are all fulfilled:
24
22
 
25
23
  - keep the value of the "mode" property at "translate"
26
- - set the text in the new prompt presented above to the "prompt" property
27
- - translate the new prompt text to the language specified in the "target_lang" set it to "response"
28
- and set the translation to the "response" property
29
- - update the value of "tokens" with the number of tokens of the resulting JSON object"
24
+ - translate the new prompt text to the language specified in the "target_lang" set it to "response" and set the translation to the "response" property
25
+ - update the "dictionary" property with translation suggested by the user (using parentheses) for specific expressions
26
+ - add user-suggested translations (translations in parentheses) to the "dictionary" property
30
27
 
31
28
  Make sure the following formal requirements are all fulfilled:
32
29
 
33
30
  - do not use invalid characters in the JSON object
34
31
  - escape double quotes and other special characters in the text values in the resulting JSON object
35
- - increment the value of "turns" by 1
36
32
  - check the validity of the generated JSON object and correct any possible parsing problems before returning it
37
33
 
38
34
  Return your response consisting solely of the JSON object wrapped in "<JSON>\n" and "\n</JSON>" tags.
@@ -15,8 +15,8 @@ class Translate < MonadicApp
15
15
  "top_p" => 1.0,
16
16
  "presence_penalty" => 0.0,
17
17
  "frequency_penalty" => 0.0,
18
- "model" => openai_completion.model_name(research_mode: research_mode),
19
- "max_tokens" => 2000,
18
+ "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"],
19
+ "max_tokens" => 1000,
20
20
  "stream" => stream,
21
21
  "stop" => nil
22
22
  }.merge(params)
@@ -42,14 +42,23 @@ class Translate < MonadicApp
42
42
  # @messages: messages to this point #
43
43
  # @metadata: currently available metdata sent from GPT #
44
44
  ############################################################
45
-
46
45
  conditions = [
47
46
  @messages.size > 1,
48
- @metadata["tokens"].to_i > params["max_tokens"].to_i / 2
47
+ @messages.size > @num_retained_turns * 2 + 1
49
48
  ]
50
49
 
51
- @metadata["turns"] = @metadata["turns"].to_i - 1 if conditions.all?
52
-
50
+ if conditions.all?
51
+ to_delete = []
52
+ new_num_messages = @messages.size
53
+ @messages.each_with_index do |ele, i|
54
+ if ele["role"] != "system"
55
+ to_delete << i
56
+ new_num_messages -= 1
57
+ end
58
+ break if new_num_messages <= @num_retained_turns * 2 + 1
59
+ end
60
+ @messages.delete_if.with_index { |_, i| to_delete.include? i }
61
+ end
53
62
  when :normal
54
63
  ############################################################
55
64
  # Normal mode recuder defined here #
@@ -57,16 +66,21 @@ class Translate < MonadicApp
57
66
  ############################################################
58
67
 
59
68
  conditions = [
69
+ @messages.size > 1,
60
70
  @messages.size > @num_retained_turns * 2 + 1
61
71
  ]
62
72
 
63
73
  if conditions.all?
74
+ to_delete = []
75
+ new_num_messages = @messages.size
64
76
  @messages.each_with_index do |ele, i|
65
77
  if ele["role"] != "system"
66
- @messages.delete_at i
67
- break
78
+ to_delete << i
79
+ new_num_messages -= 1
68
80
  end
81
+ break if new_num_messages <= @num_retained_turns * 2 + 1
69
82
  end
83
+ @messages.delete_if.with_index { |_, i| to_delete.include? i }
70
84
  end
71
85
  end
72
86
  end
data/assets/gpt2.bin ADDED
Binary file
data/bin/monadic-chat CHANGED
@@ -54,11 +54,12 @@ module MonadicMenu
54
54
  clear_screen
55
55
  print "\n", banner.strip, "\n"
56
56
 
57
+ print TTY::Cursor.save
57
58
  openai_completion ||= MonadicChat.authenticate
58
59
  exit unless openai_completion
59
60
 
60
61
  max_app_name_width = APPS.reduce(8) { |accum, app| app.length > accum ? app.length : accum } + 2
61
- parameter = PROMPT_SYSTEM.select(" Current mode: #{print_mode.call(mode)}\n\nSelect item:",
62
+ parameter = PROMPT_SYSTEM.select("Current mode: #{print_mode.call(mode)}\n\nSelect item:",
62
63
  per_page: 10,
63
64
  cycle: true,
64
65
  filter: true,
@@ -120,5 +121,34 @@ module MonadicMenu
120
121
  end
121
122
  end
122
123
 
123
- MonadicMenu.clear_screen
124
- MonadicMenu.run
124
+ case ARGV.size
125
+ when 0
126
+ MonadicMenu.clear_screen
127
+ MonadicMenu.run
128
+ when 1
129
+ case ARGV[0]
130
+ when "readme", "-h"
131
+ MonadicChat.open_readme
132
+ when "version", "-v"
133
+ puts MonadicChat::VERSION
134
+ else
135
+ MonadicChat::APPS.each do |app|
136
+ next unless app == ARGV[0]
137
+
138
+ openai_completion ||= MonadicChat.authenticate(message: false)
139
+ eval(app.capitalize, binding, __FILE__, __LINE__).new(openai_completion, research_mode: false).run
140
+ exit
141
+ end
142
+ puts "Unknown command"
143
+ end
144
+ else
145
+ MonadicChat::APPS.each do |app|
146
+ next unless app == ARGV[0]
147
+
148
+ openai_completion ||= MonadicChat.authenticate(message: false)
149
+ app_obj = eval(app.capitalize, binding, __FILE__, __LINE__).new(openai_completion, research_mode: false, params: { "model" => "gpt-4" })
150
+ app_obj.bind(ARGV[1..].join(" "), num_retry: 2)
151
+ exit
152
+ end
153
+ puts "Unknown command"
154
+ end