ripperdoc 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. ripperdoc/__init__.py +1 -1
  2. ripperdoc/cli/cli.py +9 -1
  3. ripperdoc/cli/commands/agents_cmd.py +93 -53
  4. ripperdoc/cli/commands/mcp_cmd.py +3 -0
  5. ripperdoc/cli/commands/models_cmd.py +768 -283
  6. ripperdoc/cli/commands/permissions_cmd.py +107 -52
  7. ripperdoc/cli/commands/resume_cmd.py +61 -51
  8. ripperdoc/cli/commands/themes_cmd.py +31 -1
  9. ripperdoc/cli/ui/agents_tui/__init__.py +3 -0
  10. ripperdoc/cli/ui/agents_tui/textual_app.py +1138 -0
  11. ripperdoc/cli/ui/choice.py +376 -0
  12. ripperdoc/cli/ui/interrupt_listener.py +233 -0
  13. ripperdoc/cli/ui/message_display.py +7 -0
  14. ripperdoc/cli/ui/models_tui/__init__.py +5 -0
  15. ripperdoc/cli/ui/models_tui/textual_app.py +698 -0
  16. ripperdoc/cli/ui/panels.py +19 -4
  17. ripperdoc/cli/ui/permissions_tui/__init__.py +3 -0
  18. ripperdoc/cli/ui/permissions_tui/textual_app.py +526 -0
  19. ripperdoc/cli/ui/provider_options.py +220 -80
  20. ripperdoc/cli/ui/rich_ui.py +91 -83
  21. ripperdoc/cli/ui/tips.py +89 -0
  22. ripperdoc/cli/ui/wizard.py +98 -45
  23. ripperdoc/core/config.py +3 -0
  24. ripperdoc/core/permissions.py +66 -104
  25. ripperdoc/core/providers/anthropic.py +11 -0
  26. ripperdoc/protocol/stdio.py +3 -1
  27. ripperdoc/tools/bash_tool.py +2 -0
  28. ripperdoc/tools/file_edit_tool.py +100 -181
  29. ripperdoc/tools/file_read_tool.py +101 -25
  30. ripperdoc/tools/multi_edit_tool.py +239 -91
  31. ripperdoc/tools/notebook_edit_tool.py +11 -29
  32. ripperdoc/utils/file_editing.py +164 -0
  33. ripperdoc/utils/permissions/tool_permission_utils.py +11 -0
  34. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/METADATA +3 -2
  35. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/RECORD +39 -30
  36. ripperdoc/cli/ui/interrupt_handler.py +0 -208
  37. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/WHEEL +0 -0
  38. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/entry_points.txt +0 -0
  39. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/licenses/LICENSE +0 -0
  40. {ripperdoc-0.3.0.dist-info → ripperdoc-0.3.2.dist-info}/top_level.txt +0 -0
@@ -81,39 +81,20 @@ def default_model_for_protocol(protocol: ProviderType) -> str:
81
81
 
82
82
  KNOWN_PROVIDERS = ProviderRegistry(
83
83
  providers=[
84
- ProviderOption(
85
- key="deepseek",
86
- protocol=ProviderType.OPENAI_COMPATIBLE,
87
- default_model="deepseek-chat",
88
- model_suggestions=("deepseek-chat", "deepseek-reasoner"),
89
- default_api_base="https://api.deepseek.com/v1",
90
- ),
84
+ # === Major Cloud Providers ===
91
85
  ProviderOption(
92
86
  key="openai",
93
87
  protocol=ProviderType.OPENAI_COMPATIBLE,
94
88
  default_model="gpt-4o-mini",
95
89
  model_suggestions=(
96
- "gpt-5.1",
97
- "gpt-5.1-chat",
98
- "gpt-5.1-codex",
99
90
  "gpt-4o",
91
+ "gpt-4o-mini",
100
92
  "gpt-4-turbo",
101
93
  "o1-preview",
102
94
  "o1-mini",
103
95
  ),
104
96
  default_api_base="https://api.openai.com/v1",
105
97
  ),
106
- ProviderOption(
107
- key="openrouter",
108
- protocol=ProviderType.OPENAI_COMPATIBLE,
109
- default_model="openai/gpt-4o-mini",
110
- model_suggestions=(
111
- "openai/gpt-4o-mini",
112
- "meta-llama/llama-3.1-8b-instruct",
113
- "google/gemini-flash-1.5",
114
- ),
115
- default_api_base="https://openrouter.ai/api/v1",
116
- ),
117
98
  ProviderOption(
118
99
  key="anthropic",
119
100
  protocol=ProviderType.ANTHROPIC,
@@ -122,118 +103,277 @@ KNOWN_PROVIDERS = ProviderRegistry(
122
103
  "claude-3-5-sonnet-20241022",
123
104
  "claude-3-5-haiku-20241022",
124
105
  "claude-3-opus-20240229",
125
- "claude-3-sonnet-20240229",
126
- "claude-3-haiku-20240307",
127
106
  ),
128
107
  default_api_base=None,
129
108
  ),
130
109
  ProviderOption(
131
- key="openai_compatible",
110
+ key="google",
111
+ protocol=ProviderType.GEMINI,
112
+ default_model="gemini-1.5-pro",
113
+ model_suggestions=(
114
+ "gemini-2.0-flash-exp",
115
+ "gemini-1.5-pro",
116
+ "gemini-1.5-flash",
117
+ ),
118
+ default_api_base="https://generativelanguage.googleapis.com/v1beta",
119
+ ),
120
+ # === Aggregators & Open Router ===
121
+ ProviderOption(
122
+ key="openrouter",
132
123
  protocol=ProviderType.OPENAI_COMPATIBLE,
133
- default_model="gpt-4o-mini",
124
+ default_model="openai/gpt-4o-mini",
134
125
  model_suggestions=(
135
- "gpt-4o-mini",
136
- "gpt-4o",
137
- "gpt-3.5-turbo",
126
+ "openai/gpt-4o-mini",
127
+ "anthropic/claude-3.5-sonnet",
128
+ "google/gemini-flash-1.5",
129
+ "meta-llama/llama-3.1-70b-instruct",
138
130
  ),
139
- default_api_base=None,
131
+ default_api_base="https://openrouter.ai/api/v1",
140
132
  ),
141
133
  ProviderOption(
142
- key="mistralai",
134
+ key="poe",
143
135
  protocol=ProviderType.OPENAI_COMPATIBLE,
144
- default_model="mistral-small-creative",
136
+ default_model="gpt-4o",
145
137
  model_suggestions=(
146
- "mistral-small-creative",
147
- "mistral-large-latest",
148
- "mistral-small-latest",
149
- "devstral-2512",
150
- "ministral-14b-2512",
151
- "ministral-8b-2512",
152
- "codestral-latest",
153
- "pixtral-large-latest",
138
+ "gpt-4o",
139
+ "claude-3.5-sonnet",
140
+ "gemini-1.5-pro",
141
+ "mistral-large",
154
142
  ),
155
- default_api_base="https://api.mistral.ai/v1",
143
+ default_api_base="https://api.poe.com/v1",
156
144
  ),
145
+ # === Chinese Providers ===
157
146
  ProviderOption(
158
- key="google",
159
- protocol=ProviderType.GEMINI,
160
- default_model="gemini-1.5-pro",
147
+ key="deepseek",
148
+ protocol=ProviderType.ANTHROPIC,
149
+ default_model="deepseek-chat",
161
150
  model_suggestions=(
162
- "gemini-2.5-pro",
163
- "gemini-2.5-flash-lite",
164
- "gemini-2.5-flash",
165
- "gemini-3-pro-preview",
166
- "gemini-3-flash-preview",
151
+ "deepseek-chat",
152
+ "deepseek-reasoner",
167
153
  ),
168
- default_api_base="https://generativelanguage.googleapis.com/v1beta",
154
+ default_api_base="https://api.deepseek.com/v1",
155
+ ),
156
+ ProviderOption(
157
+ key="zhipu",
158
+ protocol=ProviderType.ANTHROPIC,
159
+ default_model="glm-4-flash",
160
+ model_suggestions=(
161
+ "glm-4-plus",
162
+ "glm-4-flash",
163
+ "glm-4.7",
164
+ "glm-4.6",
165
+ "glm-4.5",
166
+ "glm-4-air",
167
+ ),
168
+ default_api_base="https://open.bigmodel.cn/api/anthropic",
169
169
  ),
170
170
  ProviderOption(
171
171
  key="moonshot",
172
172
  protocol=ProviderType.OPENAI_COMPATIBLE,
173
- default_model="kimi-k2-turbo-preview",
173
+ default_model="moonshot-v1-auto",
174
174
  model_suggestions=(
175
- "kimi-k2-0905-preview",
175
+ "moonshot-v1-auto",
176
176
  "kimi-k2-0711-preview",
177
177
  "kimi-k2-turbo-preview",
178
178
  "kimi-k2-thinking",
179
- "kimi-k2-thinking-turbo",
179
+ "kimi-k2-0905-preview",
180
180
  ),
181
181
  default_api_base="https://api.moonshot.cn/v1",
182
182
  ),
183
183
  ProviderOption(
184
- key="qwen",
184
+ key="volcengine",
185
185
  protocol=ProviderType.OPENAI_COMPATIBLE,
186
- default_model="qwen-turbo",
186
+ default_model="doubao-pro-32k",
187
+ model_suggestions=(
188
+ # Doubao Pro 系列
189
+ "doubao-pro-32k",
190
+ "doubao-pro-256k",
191
+ "doubao-pro-32k-functioncall-241028",
192
+ "doubao-pro-32k-character-241215",
193
+ # Doubao 1.5 系列
194
+ "Doubao-1.5-pro",
195
+ "doubao-1.5-pro-32k",
196
+ "doubao-1.5-pro-32k-character",
197
+ "Doubao-1.5-pro-256k",
198
+ "Doubao-1.5-vision-pro",
199
+ "doubao-1.5-vision-pro",
200
+ "Doubao-1.5-lite-32k",
201
+ # Doubao Lite 系列
202
+ "Doubao-lite-32k",
203
+ "Doubao-lite-128k",
204
+ "Doubao-lite-4k-character-240828",
205
+ "Doubao-lite-32k-character-241015",
206
+ # DeepSeek 系列
207
+ "DeepSeek-V3",
208
+ "DeepSeek-R1",
209
+ "DeepSeek-R1-Distill-Qwen-32B",
210
+ "DeepSeek-R1-Distill-Qwen-7B",
211
+ # Vision 系列
212
+ "Doubao-vision-lite-32k",
213
+ ),
214
+ default_api_base="https://ark.cn-beijing.volces.com/api/v3",
215
+ ),
216
+ ProviderOption(
217
+ key="aliyun",
218
+ protocol=ProviderType.OPENAI_COMPATIBLE,
219
+ default_model="qwen-plus",
187
220
  model_suggestions=(
188
- "qwen-turbo",
189
221
  "qwen-plus",
222
+ "qwen-turbo",
190
223
  "qwen-max",
191
- "qwen2.5-32b",
192
- "qwen2.5-coder-32b",
224
+ "qwen-coder-plus",
193
225
  ),
194
226
  default_api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
195
227
  ),
196
228
  ProviderOption(
197
- key="zhipu",
229
+ key="minimax",
230
+ protocol=ProviderType.OPENAI_COMPATIBLE,
231
+ default_model="abab6.5s",
232
+ model_suggestions=(
233
+ # abab 系列
234
+ "abab6.5s",
235
+ "abab6.5g",
236
+ "abab6.5t",
237
+ "abab6",
238
+ "abab5.5s",
239
+ "abab5",
240
+ # 01 系列
241
+ "minimax-01",
242
+ # M2 系列
243
+ "MiniMax-M2",
244
+ "MiniMax-M2-Stable",
245
+ ),
246
+ default_api_base="https://api.minimax.chat/v1",
247
+ ),
248
+ ProviderOption(
249
+ key="z.ai",
198
250
  protocol=ProviderType.OPENAI_COMPATIBLE,
199
251
  default_model="glm-4-flash",
200
252
  model_suggestions=(
253
+ "glm-4-flash",
201
254
  "glm-4-plus",
202
- "glm-4-air-250414",
203
- "glm-4-airx",
204
- "glm-4-long",
205
- "glm-4-flashx",
206
- "glm-4-flash-250414",
207
255
  "glm-4.6",
208
- "glm-4.5",
209
- "glm-4.5-air",
210
- "glm-4.5-airx",
211
- "glm-4.5-x",
212
- "glm-4.5-flash",
213
256
  ),
214
- default_api_base="https://open.bigmodel.cn/api/paas/v4",
257
+ default_api_base="https://api.z.ai/api/paas/v4",
215
258
  ),
259
+ # === Western AI Companies ===
216
260
  ProviderOption(
217
- key="minimax",
261
+ key="mistralai",
218
262
  protocol=ProviderType.OPENAI_COMPATIBLE,
219
- default_model="MiniMax-M2",
220
- model_suggestions=("MiniMax-M2",),
221
- default_api_base="https://api.minimax.chat/v1",
263
+ default_model="mistral-large-latest",
264
+ model_suggestions=(
265
+ # Mistral Chat 系列
266
+ "mistral-large-latest",
267
+ "mistral-small-latest",
268
+ "mistral-nemo",
269
+ "mistral-mini",
270
+ # 免费模型
271
+ "mistral-7b",
272
+ "mistral-8b",
273
+ # Mistral Code 系列
274
+ "codestral-latest",
275
+ # 多模态
276
+ "pixtral-large-latest",
277
+ ),
278
+ default_api_base="https://api.mistral.ai/v1",
279
+ ),
280
+ ProviderOption(
281
+ key="groq",
282
+ protocol=ProviderType.OPENAI_COMPATIBLE,
283
+ default_model="llama-3.3-70b-versatile",
284
+ model_suggestions=(
285
+ # Llama 系列
286
+ "llama-3.3-70b-versatile",
287
+ "llama-3.1-8b-instant",
288
+ "llama3-70b-8192",
289
+ "llama3-8b-8192",
290
+ # Gemma 系列
291
+ "gemma2-9b-it",
292
+ "gemma-7b-it",
293
+ # Mistral 系列
294
+ "mistral-saba-24b",
295
+ "mixtral-8x7b-32768",
296
+ ),
297
+ default_api_base="https://api.groq.com/openai/v1",
298
+ ),
299
+ ProviderOption(
300
+ key="grok",
301
+ protocol=ProviderType.OPENAI_COMPATIBLE,
302
+ default_model="grok-3",
303
+ model_suggestions=(
304
+ "grok-4",
305
+ "grok-3",
306
+ "grok-3-fast",
307
+ "grok-3-mini",
308
+ "grok-3-mini-fast",
309
+ ),
310
+ default_api_base="https://api.x.ai/v1",
311
+ ),
312
+ ProviderOption(
313
+ key="cohere",
314
+ protocol=ProviderType.OPENAI_COMPATIBLE,
315
+ default_model="command-r-plus-08-2024",
316
+ model_suggestions=(
317
+ "command-r-plus-08-2024",
318
+ "command-r-08-2024",
319
+ "command-r7b-12-2024",
320
+ ),
321
+ default_api_base="https://api.cohere.ai/v1",
322
+ ),
323
+ ProviderOption(
324
+ key="together",
325
+ protocol=ProviderType.OPENAI_COMPATIBLE,
326
+ default_model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
327
+ model_suggestions=(
328
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
329
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
330
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
331
+ ),
332
+ default_api_base="https://api.together.xyz/v1",
333
+ ),
334
+ ProviderOption(
335
+ key="perplexity",
336
+ protocol=ProviderType.OPENAI_COMPATIBLE,
337
+ default_model="llama-3.1-sonar-small-128k-online",
338
+ model_suggestions=(
339
+ "llama-3.1-sonar-small-128k-online",
340
+ "llama-3.1-sonar-large-128k-online",
341
+ ),
342
+ default_api_base="https://api.perplexity.ai",
222
343
  ),
223
344
  ProviderOption(
224
345
  key="siliconflow",
225
346
  protocol=ProviderType.OPENAI_COMPATIBLE,
226
- default_model="deepseek-ai/DeepSeek-V3.2",
347
+ default_model="Qwen/Qwen2.5-72B-Instruct",
227
348
  model_suggestions=(
228
- "deepseek-ai/DeepSeek-V3.2",
229
- "Qwen/Qwen2.5-32B-Instruct",
230
- "Qwen/Qwen3-Coder-480B-A35B-Instruct",
231
- "zai-org/GLM-4.6",
232
- "moonshotai/Kimi-K2-Thinking",
233
- "MiniMaxAI/MiniMax-M2",
349
+ "Qwen/Qwen2.5-72B-Instruct",
350
+ "deepseek-ai/DeepSeek-V3",
351
+ "01-ai/Yi-1.5-34B-Chat",
234
352
  ),
235
353
  default_api_base="https://api.siliconflow.cn/v1",
236
354
  ),
355
+ # === Generic / Custom ===
356
+ ProviderOption(
357
+ key="openai_compatible",
358
+ protocol=ProviderType.OPENAI_COMPATIBLE,
359
+ default_model="gpt-4o-mini",
360
+ model_suggestions=(
361
+ "gpt-4o-mini",
362
+ "gpt-4o",
363
+ "llama-3.1-70b",
364
+ ),
365
+ default_api_base=None,
366
+ ),
367
+ ProviderOption(
368
+ key="anthropic_compatible",
369
+ protocol=ProviderType.ANTHROPIC,
370
+ default_model="claude-3-5-sonnet-20241022",
371
+ model_suggestions=(
372
+ "claude-3-5-sonnet-20241022",
373
+ "claude-3-5-haiku-20241022",
374
+ ),
375
+ default_api_base=None,
376
+ ),
237
377
  ],
238
378
  default_key="deepseek",
239
379
  )