wcgw 5.0.1__py3-none-any.whl → 5.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wcgw might be problematic. Click here for more details.

Files changed (39) hide show
  1. wcgw/client/bash_state/bash_state.py +2 -2
  2. wcgw/client/diff-instructions.txt +2 -2
  3. wcgw/client/file_ops/diff_edit.py +14 -2
  4. wcgw/client/file_ops/extensions.py +137 -0
  5. wcgw/client/file_ops/search_replace.py +1 -2
  6. wcgw/client/mcp_server/server.py +10 -18
  7. wcgw/client/memory.py +4 -1
  8. wcgw/client/tool_prompts.py +16 -15
  9. wcgw/client/tools.py +99 -39
  10. {wcgw-5.0.1.dist-info → wcgw-5.1.0.dist-info}/METADATA +2 -1
  11. wcgw-5.1.0.dist-info/RECORD +37 -0
  12. wcgw_cli/anthropic_client.py +8 -4
  13. wcgw_cli/openai_client.py +7 -3
  14. mcp_wcgw/__init__.py +0 -114
  15. mcp_wcgw/client/__init__.py +0 -0
  16. mcp_wcgw/client/__main__.py +0 -79
  17. mcp_wcgw/client/session.py +0 -234
  18. mcp_wcgw/client/sse.py +0 -142
  19. mcp_wcgw/client/stdio.py +0 -128
  20. mcp_wcgw/py.typed +0 -0
  21. mcp_wcgw/server/__init__.py +0 -514
  22. mcp_wcgw/server/__main__.py +0 -50
  23. mcp_wcgw/server/models.py +0 -16
  24. mcp_wcgw/server/session.py +0 -288
  25. mcp_wcgw/server/sse.py +0 -178
  26. mcp_wcgw/server/stdio.py +0 -83
  27. mcp_wcgw/server/websocket.py +0 -61
  28. mcp_wcgw/shared/__init__.py +0 -0
  29. mcp_wcgw/shared/context.py +0 -14
  30. mcp_wcgw/shared/exceptions.py +0 -9
  31. mcp_wcgw/shared/memory.py +0 -87
  32. mcp_wcgw/shared/progress.py +0 -40
  33. mcp_wcgw/shared/session.py +0 -288
  34. mcp_wcgw/shared/version.py +0 -3
  35. mcp_wcgw/types.py +0 -1060
  36. wcgw-5.0.1.dist-info/RECORD +0 -58
  37. {wcgw-5.0.1.dist-info → wcgw-5.1.0.dist-info}/WHEEL +0 -0
  38. {wcgw-5.0.1.dist-info → wcgw-5.1.0.dist-info}/entry_points.txt +0 -0
  39. {wcgw-5.0.1.dist-info → wcgw-5.1.0.dist-info}/licenses/LICENSE +0 -0
wcgw/client/tools.py CHANGED
@@ -55,6 +55,7 @@ from ..types_ import (
55
55
  WriteIfEmpty,
56
56
  )
57
57
  from .encoder import EncoderDecoder, get_default_encoder
58
+ from .file_ops.extensions import select_max_tokens
58
59
  from .file_ops.search_replace import (
59
60
  DIVIDER_MARKER,
60
61
  REPLACE_MARKER,
@@ -99,7 +100,8 @@ def initialize(
99
100
  any_workspace_path: str,
100
101
  read_files_: list[str],
101
102
  task_id_to_resume: str,
102
- max_tokens: Optional[int],
103
+ coding_max_tokens: Optional[int],
104
+ noncoding_max_tokens: Optional[int],
103
105
  mode: ModesConfig,
104
106
  chat_id: str,
105
107
  ) -> tuple[str, Context, dict[str, list[tuple[int, int]]]]:
@@ -115,7 +117,7 @@ def initialize(
115
117
  # Try to load state from the chat ID
116
118
  if not context.bash_state.load_state_from_chat_id(chat_id):
117
119
  return (
118
- f"Error: No saved bash state found for chat ID {chat_id}",
120
+ f"Error: No saved bash state found for chat ID {chat_id}. Please re-initialize to get a new id or use correct id.",
119
121
  context,
120
122
  {},
121
123
  )
@@ -128,7 +130,8 @@ def initialize(
128
130
  try:
129
131
  project_root_path, task_mem, loaded_state = load_memory(
130
132
  task_id_to_resume,
131
- max_tokens,
133
+ coding_max_tokens,
134
+ noncoding_max_tokens,
132
135
  lambda x: default_enc.encoder(x),
133
136
  lambda x: default_enc.decoder(x),
134
137
  )
@@ -248,7 +251,7 @@ def initialize(
248
251
  for f in read_files_
249
252
  ]
250
253
  initial_files, initial_paths_with_ranges, _ = read_files(
251
- read_files_, max_tokens, context
254
+ read_files_, coding_max_tokens, noncoding_max_tokens, context
252
255
  )
253
256
  initial_files_context = f"---\n# Requested files\n{initial_files}\n---\n"
254
257
 
@@ -316,7 +319,7 @@ def reset_wcgw(
316
319
  if chat_id != context.bash_state.current_chat_id:
317
320
  # Try to load state from the chat ID
318
321
  if not context.bash_state.load_state_from_chat_id(chat_id):
319
- return f"Error: No saved bash state found for chat ID {chat_id}"
322
+ return f"Error: No saved bash state found for chat ID {chat_id}. Please re-initialize to get a new id or use correct id."
320
323
  if mode_name:
321
324
  # update modes if they're relative
322
325
  if isinstance(change_mode, CodeWriterMode):
@@ -455,7 +458,11 @@ def read_image_from_shell(file_path: str, context: Context) -> ImageData:
455
458
 
456
459
 
457
460
  def get_context_for_errors(
458
- errors: list[tuple[int, int]], file_content: str, max_tokens: Optional[int]
461
+ errors: list[tuple[int, int]],
462
+ file_content: str,
463
+ filename: str,
464
+ coding_max_tokens: Optional[int],
465
+ noncoding_max_tokens: Optional[int],
459
466
  ) -> str:
460
467
  file_lines = file_content.split("\n")
461
468
  min_line_num = max(0, min([error[0] for error in errors]) - 10)
@@ -463,6 +470,7 @@ def get_context_for_errors(
463
470
  context_lines = file_lines[min_line_num:max_line_num]
464
471
  context = "\n".join(context_lines)
465
472
 
473
+ max_tokens = select_max_tokens(filename, coding_max_tokens, noncoding_max_tokens)
466
474
  if max_tokens is not None and max_tokens > 0:
467
475
  ntokens = len(default_enc.encoder(context))
468
476
  if ntokens > max_tokens:
@@ -473,7 +481,8 @@ def get_context_for_errors(
473
481
  def write_file(
474
482
  writefile: WriteIfEmpty,
475
483
  error_on_exist: bool,
476
- max_tokens: Optional[int],
484
+ coding_max_tokens: Optional[int],
485
+ noncoding_max_tokens: Optional[int],
477
486
  context: Context,
478
487
  ) -> tuple[
479
488
  str, dict[str, list[tuple[int, int]]]
@@ -536,7 +545,7 @@ def write_file(
536
545
  msg = f"Error: you need to read existing file {path_} at least once before it can be overwritten.\n\n"
537
546
  # Read the entire file
538
547
  file_content_str, truncated, _, _, line_range = read_file(
539
- path_, max_tokens, context, False
548
+ path_, coding_max_tokens, noncoding_max_tokens, context, False
540
549
  )
541
550
  file_ranges = [line_range]
542
551
 
@@ -558,7 +567,7 @@ def write_file(
558
567
  msg = "Error: the file has changed since last read.\n\n"
559
568
  # Read the entire file again
560
569
  file_content_str, truncated, _, _, line_range = read_file(
561
- path_, max_tokens, context, False
570
+ path_, coding_max_tokens, noncoding_max_tokens, context, False
562
571
  )
563
572
  file_ranges = [line_range]
564
573
 
@@ -591,7 +600,8 @@ def write_file(
591
600
  )
592
601
  readfiles, file_ranges_dict, truncated = read_files(
593
602
  paths_readfiles.file_paths,
594
- max_tokens,
603
+ coding_max_tokens,
604
+ noncoding_max_tokens,
595
605
  context,
596
606
  show_line_numbers=False,
597
607
  start_line_nums=paths_readfiles.start_line_nums,
@@ -631,7 +641,11 @@ def write_file(
631
641
  syntax_errors += "\nNote: Ignore if 'tagged template literals' are used, they may raise false positive errors in tree-sitter."
632
642
 
633
643
  context_for_errors = get_context_for_errors(
634
- check.errors, writefile.file_content, max_tokens
644
+ check.errors,
645
+ writefile.file_content,
646
+ path_,
647
+ coding_max_tokens,
648
+ noncoding_max_tokens,
635
649
  )
636
650
  context.console.print(f"W: Syntax errors encountered: {syntax_errors}")
637
651
  warnings.append(f"""
@@ -656,10 +670,13 @@ Syntax errors:
656
670
 
657
671
 
658
672
  def do_diff_edit(
659
- fedit: FileEdit, max_tokens: Optional[int], context: Context
673
+ fedit: FileEdit,
674
+ coding_max_tokens: Optional[int],
675
+ noncoding_max_tokens: Optional[int],
676
+ context: Context,
660
677
  ) -> tuple[str, dict[str, list[tuple[int, int]]]]:
661
678
  try:
662
- return _do_diff_edit(fedit, max_tokens, context)
679
+ return _do_diff_edit(fedit, coding_max_tokens, noncoding_max_tokens, context)
663
680
  except Exception as e:
664
681
  # Try replacing \"
665
682
  try:
@@ -669,14 +686,19 @@ def do_diff_edit(
669
686
  '\\"', '"'
670
687
  ),
671
688
  )
672
- return _do_diff_edit(fedit, max_tokens, context)
689
+ return _do_diff_edit(
690
+ fedit, coding_max_tokens, noncoding_max_tokens, context
691
+ )
673
692
  except Exception:
674
693
  pass
675
694
  raise e
676
695
 
677
696
 
678
697
  def _do_diff_edit(
679
- fedit: FileEdit, max_tokens: Optional[int], context: Context
698
+ fedit: FileEdit,
699
+ coding_max_tokens: Optional[int],
700
+ noncoding_max_tokens: Optional[int],
701
+ context: Context,
680
702
  ) -> tuple[str, dict[str, list[tuple[int, int]]]]:
681
703
  context.console.log(f"Editing file: {fedit.file_path}")
682
704
 
@@ -736,7 +758,11 @@ def _do_diff_edit(
736
758
  syntax_errors = check.description
737
759
  if syntax_errors:
738
760
  context_for_errors = get_context_for_errors(
739
- check.errors, apply_diff_to, max_tokens
761
+ check.errors,
762
+ apply_diff_to,
763
+ path_,
764
+ coding_max_tokens,
765
+ noncoding_max_tokens,
740
766
  )
741
767
  if extension in {"tsx", "ts"}:
742
768
  syntax_errors += "\nNote: Ignore if 'tagged template literals' are used, they may raise false positive errors in tree-sitter."
@@ -782,7 +808,8 @@ def _is_edit(content: str, percentage: int) -> bool:
782
808
 
783
809
  def file_writing(
784
810
  file_writing_args: FileWriteOrEdit,
785
- max_tokens: Optional[int],
811
+ coding_max_tokens: Optional[int],
812
+ noncoding_max_tokens: Optional[int],
786
813
  context: Context,
787
814
  ) -> tuple[
788
815
  str, dict[str, list[tuple[int, int]]]
@@ -797,7 +824,7 @@ def file_writing(
797
824
  # Try to load state from the chat ID
798
825
  if not context.bash_state.load_state_from_chat_id(file_writing_args.chat_id):
799
826
  return (
800
- f"Error: No saved bash state found for chat ID {file_writing_args.chat_id}. Please initialize first with this ID.",
827
+ f"Error: No saved bash state found for chat ID {file_writing_args.chat_id}. Please re-initialize to get a new id or use correct id.",
801
828
  {},
802
829
  )
803
830
 
@@ -820,7 +847,8 @@ def file_writing(
820
847
  file_content=file_writing_args.file_content_or_search_replace_blocks,
821
848
  ),
822
849
  True,
823
- max_tokens,
850
+ coding_max_tokens,
851
+ noncoding_max_tokens,
824
852
  context,
825
853
  )
826
854
  return result, paths
@@ -831,7 +859,8 @@ def file_writing(
831
859
  file_path=path_,
832
860
  file_edit_using_search_replace_blocks=file_writing_args.file_content_or_search_replace_blocks,
833
861
  ),
834
- max_tokens,
862
+ coding_max_tokens,
863
+ noncoding_max_tokens,
835
864
  context,
836
865
  )
837
866
  return result, paths
@@ -888,7 +917,8 @@ def get_tool_output(
888
917
  enc: EncoderDecoder[int],
889
918
  limit: float,
890
919
  loop_call: Callable[[str, float], tuple[str, float]],
891
- max_tokens: Optional[int],
920
+ coding_max_tokens: Optional[int],
921
+ noncoding_max_tokens: Optional[int],
892
922
  ) -> tuple[list[str | ImageData], float]:
893
923
  global TOOL_CALLS
894
924
  if isinstance(args, dict):
@@ -906,13 +936,15 @@ def get_tool_output(
906
936
  context.console.print("Calling execute bash tool")
907
937
 
908
938
  output_str, cost = execute_bash(
909
- context.bash_state, enc, arg, max_tokens, arg.wait_for_seconds
939
+ context.bash_state, enc, arg, noncoding_max_tokens, arg.wait_for_seconds
910
940
  )
911
941
  output = output_str, cost
912
942
  elif isinstance(arg, WriteIfEmpty):
913
943
  context.console.print("Calling write file tool")
914
944
 
915
- result, write_paths = write_file(arg, True, max_tokens, context)
945
+ result, write_paths = write_file(
946
+ arg, True, coding_max_tokens, noncoding_max_tokens, context
947
+ )
916
948
  output = result, 0
917
949
  # Add write paths with their ranges to our tracking dictionary
918
950
  for path, ranges in write_paths.items():
@@ -923,7 +955,9 @@ def get_tool_output(
923
955
  elif isinstance(arg, FileEdit):
924
956
  context.console.print("Calling full file edit tool")
925
957
 
926
- result, edit_paths = do_diff_edit(arg, max_tokens, context)
958
+ result, edit_paths = do_diff_edit(
959
+ arg, coding_max_tokens, noncoding_max_tokens, context
960
+ )
927
961
  output = result, 0.0
928
962
  # Add edit paths with their ranges to our tracking dictionary
929
963
  for path, ranges in edit_paths.items():
@@ -934,7 +968,9 @@ def get_tool_output(
934
968
  elif isinstance(arg, FileWriteOrEdit):
935
969
  context.console.print("Calling file writing tool")
936
970
 
937
- result, write_edit_paths = file_writing(arg, max_tokens, context)
971
+ result, write_edit_paths = file_writing(
972
+ arg, coding_max_tokens, noncoding_max_tokens, context
973
+ )
938
974
  output = result, 0.0
939
975
  # Add write/edit paths with their ranges to our tracking dictionary
940
976
  for path, ranges in write_edit_paths.items():
@@ -951,7 +987,8 @@ def get_tool_output(
951
987
  # Access line numbers through properties
952
988
  result, file_ranges_dict, _ = read_files(
953
989
  arg.file_paths,
954
- max_tokens,
990
+ coding_max_tokens,
991
+ noncoding_max_tokens,
955
992
  context,
956
993
  bool(arg.show_line_numbers_reason),
957
994
  arg.start_line_nums,
@@ -995,7 +1032,8 @@ def get_tool_output(
995
1032
  arg.any_workspace_path,
996
1033
  arg.initial_files_to_read,
997
1034
  arg.task_id_to_resume,
998
- max_tokens,
1035
+ coding_max_tokens,
1036
+ noncoding_max_tokens,
999
1037
  arg.mode,
1000
1038
  arg.chat_id,
1001
1039
  )
@@ -1024,7 +1062,9 @@ def get_tool_output(
1024
1062
  relevant_files.extend(globs[:1000])
1025
1063
  if not globs:
1026
1064
  warnings += f"Warning: No files found for the glob: {fglob}\n"
1027
- relevant_files_data, _, _ = read_files(relevant_files[:10_000], None, context)
1065
+ relevant_files_data, _, _ = read_files(
1066
+ relevant_files[:10_000], None, None, context
1067
+ )
1028
1068
  save_path = save_memory(
1029
1069
  arg, relevant_files_data, context.bash_state.serialize()
1030
1070
  )
@@ -1069,7 +1109,8 @@ def range_format(start_line_num: Optional[int], end_line_num: Optional[int]) ->
1069
1109
 
1070
1110
  def read_files(
1071
1111
  file_paths: list[str],
1072
- max_tokens: Optional[int],
1112
+ coding_max_tokens: Optional[int],
1113
+ noncoding_max_tokens: Optional[int],
1073
1114
  context: Context,
1074
1115
  show_line_numbers: bool = False,
1075
1116
  start_line_nums: Optional[list[Optional[int]]] = None,
@@ -1105,7 +1146,8 @@ def read_files(
1105
1146
  # if they weren't provided as parameters
1106
1147
  content, truncated, tokens, path, line_range = read_file(
1107
1148
  file,
1108
- max_tokens,
1149
+ coding_max_tokens,
1150
+ noncoding_max_tokens,
1109
1151
  context,
1110
1152
  show_line_numbers,
1111
1153
  start_line_num,
@@ -1121,13 +1163,20 @@ def read_files(
1121
1163
  message += f"\n{file}: {str(e)}\n"
1122
1164
  continue
1123
1165
 
1124
- if max_tokens:
1125
- max_tokens = max_tokens - tokens
1166
+ if coding_max_tokens:
1167
+ coding_max_tokens = coding_max_tokens - tokens
1168
+ if noncoding_max_tokens:
1169
+ noncoding_max_tokens = noncoding_max_tokens - tokens
1126
1170
 
1127
1171
  range_formatted = range_format(start_line_num, end_line_num)
1128
1172
  message += f"\n{file}{range_formatted}\n```\n{content}\n"
1129
1173
 
1130
- if truncated or (max_tokens and max_tokens <= 0):
1174
+ # Check if we've hit either token limit
1175
+ if (
1176
+ truncated
1177
+ or (coding_max_tokens is not None and coding_max_tokens <= 0)
1178
+ or (noncoding_max_tokens is not None and noncoding_max_tokens <= 0)
1179
+ ):
1131
1180
  not_reading = file_paths[i + 1 :]
1132
1181
  if not_reading:
1133
1182
  message += f"\nNot reading the rest of the files: {', '.join(not_reading)} due to token limit, please call again"
@@ -1139,7 +1188,8 @@ def read_files(
1139
1188
 
1140
1189
  def read_file(
1141
1190
  file_path: str,
1142
- max_tokens: Optional[int],
1191
+ coding_max_tokens: Optional[int],
1192
+ noncoding_max_tokens: Optional[int],
1143
1193
  context: Context,
1144
1194
  show_line_numbers: bool = False,
1145
1195
  start_line_num: Optional[int] = None,
@@ -1201,6 +1251,9 @@ def read_file(
1201
1251
  truncated = False
1202
1252
  tokens_counts = 0
1203
1253
 
1254
+ # Select the appropriate max_tokens based on file type
1255
+ max_tokens = select_max_tokens(file_path, coding_max_tokens, noncoding_max_tokens)
1256
+
1204
1257
  # Handle token limit if specified
1205
1258
  if max_tokens is not None:
1206
1259
  tokens = default_enc.encoder(content)
@@ -1220,7 +1273,10 @@ def read_file(
1220
1273
  content = truncated_content
1221
1274
  # Add informative message about truncation with total line count
1222
1275
  total_lines = len(all_lines)
1223
- content += f"\n(...truncated) Only showing till line number {last_line_shown} of {total_lines} total lines due to the token limit, please continue reading from {last_line_shown + 1} if required"
1276
+ content += (
1277
+ f"\n(...truncated) Only showing till line number {last_line_shown} of {total_lines} total lines due to the token limit, please continue reading from {last_line_shown + 1} if required"
1278
+ f" using syntax {file_path}:{last_line_shown + 1}-{total_lines}"
1279
+ )
1224
1280
  truncated = True
1225
1281
 
1226
1282
  # Update effective_end if truncated
@@ -1262,7 +1318,8 @@ if __name__ == "__main__":
1262
1318
  default_enc,
1263
1319
  0,
1264
1320
  lambda x, y: ("", 0),
1265
- None,
1321
+ 24000, # coding_max_tokens
1322
+ 8000, # noncoding_max_tokens
1266
1323
  )
1267
1324
  )
1268
1325
  print(
@@ -1275,7 +1332,8 @@ if __name__ == "__main__":
1275
1332
  default_enc,
1276
1333
  0,
1277
1334
  lambda x, y: ("", 0),
1278
- None,
1335
+ 24000, # coding_max_tokens
1336
+ 8000, # noncoding_max_tokens
1279
1337
  )
1280
1338
  )
1281
1339
 
@@ -1289,7 +1347,8 @@ if __name__ == "__main__":
1289
1347
  default_enc,
1290
1348
  0,
1291
1349
  lambda x, y: ("", 0),
1292
- 15000,
1350
+ 24000, # coding_max_tokens
1351
+ 8000, # noncoding_max_tokens
1293
1352
  )[0][0]
1294
1353
  )
1295
1354
 
@@ -1305,6 +1364,7 @@ if __name__ == "__main__":
1305
1364
  default_enc,
1306
1365
  0,
1307
1366
  lambda x, y: ("", 0),
1308
- 800,
1367
+ 24000, # coding_max_tokens
1368
+ 8000, # noncoding_max_tokens
1309
1369
  )[0][0]
1310
1370
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: wcgw
3
- Version: 5.0.1
3
+ Version: 5.1.0
4
4
  Summary: Shell and coding agent for Claude and other mcp clients
5
5
  Project-URL: Homepage, https://github.com/rusiaaman/wcgw
6
6
  Author-email: Aman Rusia <gapypi@arcfu.com>
@@ -8,6 +8,7 @@ License-File: LICENSE
8
8
  Requires-Python: >=3.11
9
9
  Requires-Dist: anthropic>=0.39.0
10
10
  Requires-Dist: fastapi>=0.115.0
11
+ Requires-Dist: mcp>=1.7.0
11
12
  Requires-Dist: openai>=1.46.0
12
13
  Requires-Dist: petname>=2.6
13
14
  Requires-Dist: pexpect>=4.9.0
@@ -0,0 +1,37 @@
1
+ wcgw/__init__.py,sha256=JgAY25VsA208v8E7QTIU0E50nsk-TCJ4FWTEHmnssYU,127
2
+ wcgw/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ wcgw/types_.py,sha256=y60Lv_uUA1_sGIfADLUKy7rFPTax8jxor5GGCDKBfZ0,7533
4
+ wcgw/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ wcgw/client/common.py,sha256=OCH7Tx64jojz3M3iONUrGMadE07W21DiZs5sOxWX1Qc,1456
6
+ wcgw/client/diff-instructions.txt,sha256=eKRFA86yXWIGwNxIDaegTgTzIrFIBDWWiN1yP8Hf3i4,1685
7
+ wcgw/client/memory.py,sha256=U2Nw2si3Zg7n_RhNAuaYcmrrDtZ_Mooi-kfAOKflT-I,3079
8
+ wcgw/client/modes.py,sha256=roH6SPBokJMr5IzAlccdI-vJyvyS5vqSMMyth7TE86A,10315
9
+ wcgw/client/tool_prompts.py,sha256=Qgj8aqEFeKilF9otDu6ZQkV0Ig4l6JaeM99uWu4ecDQ,4802
10
+ wcgw/client/tools.py,sha256=lKGvkHBEBTrm80o6sFTNUv9VtMHEs-y4cT_mdkYjJNM,48276
11
+ wcgw/client/bash_state/bash_state.py,sha256=GXBh9zcCqSgUCKn9ZtVLosfqOkJRwrzO-qzyETNHTeE,41744
12
+ wcgw/client/bash_state/parser/__init__.py,sha256=AnlNSmoQTSoqqlLOLX4P1uXfzc5VGeCGJsGgtisq2zE,207
13
+ wcgw/client/bash_state/parser/bash_statement_parser.py,sha256=9a8vPO1r3_tXmaAcubTQ5UY-NseWlalgm8LZA17LXuY,6058
14
+ wcgw/client/encoder/__init__.py,sha256=Y-8f43I6gMssUCWpX5rLYiAFv3D-JPRs4uNEejPlke8,1514
15
+ wcgw/client/file_ops/diff_edit.py,sha256=AwLq6-pY7czv1y-JA5O2Q4rgbvn82YmSL9jD8XB3Vo4,19019
16
+ wcgw/client/file_ops/extensions.py,sha256=CmfD7ON6SY24Prh2tRZdV9KbhuOrWqqk8qL1VtshzB8,3608
17
+ wcgw/client/file_ops/search_replace.py,sha256=bB1S-xkDdOX4h7UHHxfCHaQiS9KyYkIdU6UGvIrgwQM,6820
18
+ wcgw/client/mcp_server/Readme.md,sha256=2Z88jj1mf9daYGW1CWaldcJ0moy8owDumhR2glBY3A8,109
19
+ wcgw/client/mcp_server/__init__.py,sha256=mm7xhBIPwJpRT3u-Qsj4cKVMpVyucJoKRlbMP_gRRB0,343
20
+ wcgw/client/mcp_server/server.py,sha256=jjwrmZZ8X0tXD0rsPZ9fKjEpdXpXCfdhEsN3Ho_tC8I,4989
21
+ wcgw/client/repo_ops/display_tree.py,sha256=uOGX2IbXTKXwtXT2wdDszuH4ODmSYsHm0toU55e1vYI,4021
22
+ wcgw/client/repo_ops/file_stats.py,sha256=AUA0Br7zFRpylWFYZPGMeGPJy3nWp9e2haKi34JptHE,4887
23
+ wcgw/client/repo_ops/path_prob.py,sha256=SWf0CDn37rtlsYRQ51ufSxay-heaQoVIhr1alB9tZ4M,2144
24
+ wcgw/client/repo_ops/paths_model.vocab,sha256=M1pXycYDQehMXtpp-qAgU7rtzeBbCOiJo4qcYFY0kqk,315087
25
+ wcgw/client/repo_ops/paths_tokens.model,sha256=jiwwE4ae8ADKuTZISutXuM5Wfyc_FBmN5rxTjoNnCos,1569052
26
+ wcgw/client/repo_ops/repo_context.py,sha256=e_w-1VfxWQiZT3r66N13nlmPt6AGm0uvG3A7aYSgaCI,9632
27
+ wcgw_cli/__init__.py,sha256=TNxXsTPgb52OhakIda9wTRh91cqoBqgQRx5TxjzQQFU,21
28
+ wcgw_cli/__main__.py,sha256=wcCrL4PjG51r5wVKqJhcoJPTLfHW0wNbD31DrUN0MWI,28
29
+ wcgw_cli/anthropic_client.py,sha256=8bjDY59-aioyTJgpB-NBHZNhZaq6rqcTJcOf81kzCyA,19886
30
+ wcgw_cli/cli.py,sha256=-7FBe_lahKyUOhf65iurTA1M1gXXXAiT0OVKQVcZKKo,948
31
+ wcgw_cli/openai_client.py,sha256=GOqoSFazTV-cFjpdZGPM0DIwec8Up2TEcKUbsN40AGY,15990
32
+ wcgw_cli/openai_utils.py,sha256=xGOb3W5ALrIozV7oszfGYztpj0FnXdD7jAxm5lEIVKY,2439
33
+ wcgw-5.1.0.dist-info/METADATA,sha256=VUNkqbng5Y1O7WtFaJkjuCkTtUSSZBpCjv6aHGZcdq0,14866
34
+ wcgw-5.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
35
+ wcgw-5.1.0.dist-info/entry_points.txt,sha256=UnjK-MAH4Qssh0tGJDMeij1oi-oRKokItkknP_BwShE,94
36
+ wcgw-5.1.0.dist-info/licenses/LICENSE,sha256=BvY8xqjOfc3X2qZpGpX3MZEmF-4Dp0LqgKBbT6L_8oI,11142
37
+ wcgw-5.1.0.dist-info/RECORD,,
@@ -130,7 +130,8 @@ def loop(
130
130
  try:
131
131
  _, memory, _ = load_memory(
132
132
  resume,
133
- 8000,
133
+ 24000, # coding_max_tokens
134
+ 8000, # noncoding_max_tokens
134
135
  lambda x: default_enc.encoder(x),
135
136
  lambda x: default_enc.decoder(x),
136
137
  )
@@ -197,7 +198,7 @@ def loop(
197
198
  tools = [
198
199
  ToolParam(
199
200
  name=tool.name,
200
- description=tool.description,
201
+ description=tool.description or "", # Ensure it's not None
201
202
  input_schema=tool.inputSchema,
202
203
  )
203
204
  for tool in TOOL_PROMPTS
@@ -223,8 +224,10 @@ def loop(
223
224
  os.getcwd(),
224
225
  [],
225
226
  resume if (memory and resume) else "",
226
- max_tokens=8000,
227
+ 24000, # coding_max_tokens
228
+ 8000, # noncoding_max_tokens
227
229
  mode="wcgw",
230
+ chat_id="",
228
231
  )
229
232
 
230
233
  if history:
@@ -412,7 +415,8 @@ def loop(
412
415
  default_enc,
413
416
  limit - cost,
414
417
  loop,
415
- max_tokens=8000,
418
+ 24000, # coding_max_tokens
419
+ 8000, # noncoding_max_tokens
416
420
  )
417
421
  except Exception as e:
418
422
  output_or_dones = [
wcgw_cli/openai_client.py CHANGED
@@ -120,7 +120,8 @@ def loop(
120
120
  try:
121
121
  _, memory, _ = load_memory(
122
122
  resume,
123
- 8000,
123
+ 24000, # coding_max_tokens
124
+ 8000, # noncoding_max_tokens
124
125
  lambda x: default_enc.encoder(x),
125
126
  lambda x: default_enc.decoder(x),
126
127
  )
@@ -187,8 +188,10 @@ def loop(
187
188
  os.getcwd(),
188
189
  [],
189
190
  resume if (memory and resume) else "",
190
- max_tokens=8000,
191
+ 24000, # coding_max_tokens
192
+ 8000, # noncoding_max_tokens
191
193
  mode="wcgw",
194
+ chat_id="",
192
195
  )
193
196
 
194
197
  if not history:
@@ -283,7 +286,8 @@ def loop(
283
286
  enc,
284
287
  limit - cost,
285
288
  loop,
286
- max_tokens=8000,
289
+ 24000, # coding_max_tokens
290
+ 8000, # noncoding_max_tokens
287
291
  )
288
292
  output_or_done = output_or_dones[0]
289
293
  except Exception as e:
mcp_wcgw/__init__.py DELETED
@@ -1,114 +0,0 @@
1
- from .client.session import ClientSession
2
- from .client.stdio import StdioServerParameters, stdio_client
3
- from .server.session import ServerSession
4
- from .server.stdio import stdio_server
5
- from .shared.exceptions import McpError
6
- from .types import (
7
- CallToolRequest,
8
- ClientCapabilities,
9
- ClientNotification,
10
- ClientRequest,
11
- ClientResult,
12
- CompleteRequest,
13
- CreateMessageRequest,
14
- CreateMessageResult,
15
- ErrorData,
16
- GetPromptRequest,
17
- GetPromptResult,
18
- Implementation,
19
- IncludeContext,
20
- InitializedNotification,
21
- InitializeRequest,
22
- InitializeResult,
23
- JSONRPCError,
24
- JSONRPCRequest,
25
- JSONRPCResponse,
26
- ListPromptsRequest,
27
- ListPromptsResult,
28
- ListResourcesRequest,
29
- ListResourcesResult,
30
- ListToolsResult,
31
- LoggingLevel,
32
- LoggingMessageNotification,
33
- Notification,
34
- PingRequest,
35
- ProgressNotification,
36
- PromptsCapability,
37
- ReadResourceRequest,
38
- ReadResourceResult,
39
- Resource,
40
- ResourcesCapability,
41
- ResourceUpdatedNotification,
42
- RootsCapability,
43
- SamplingMessage,
44
- ServerCapabilities,
45
- ServerNotification,
46
- ServerRequest,
47
- ServerResult,
48
- SetLevelRequest,
49
- StopReason,
50
- SubscribeRequest,
51
- Tool,
52
- ToolsCapability,
53
- UnsubscribeRequest,
54
- )
55
- from .types import (
56
- Role as SamplingRole,
57
- )
58
-
59
- __all__ = [
60
- "CallToolRequest",
61
- "ClientCapabilities",
62
- "ClientNotification",
63
- "ClientRequest",
64
- "ClientResult",
65
- "ClientSession",
66
- "CreateMessageRequest",
67
- "CreateMessageResult",
68
- "ErrorData",
69
- "GetPromptRequest",
70
- "GetPromptResult",
71
- "Implementation",
72
- "IncludeContext",
73
- "InitializeRequest",
74
- "InitializeResult",
75
- "InitializedNotification",
76
- "JSONRPCError",
77
- "JSONRPCRequest",
78
- "ListPromptsRequest",
79
- "ListPromptsResult",
80
- "ListResourcesRequest",
81
- "ListResourcesResult",
82
- "ListToolsResult",
83
- "LoggingLevel",
84
- "LoggingMessageNotification",
85
- "McpError",
86
- "Notification",
87
- "PingRequest",
88
- "ProgressNotification",
89
- "PromptsCapability",
90
- "ReadResourceRequest",
91
- "ReadResourceResult",
92
- "ResourcesCapability",
93
- "ResourceUpdatedNotification",
94
- "Resource",
95
- "RootsCapability",
96
- "SamplingMessage",
97
- "SamplingRole",
98
- "ServerCapabilities",
99
- "ServerNotification",
100
- "ServerRequest",
101
- "ServerResult",
102
- "ServerSession",
103
- "SetLevelRequest",
104
- "StdioServerParameters",
105
- "StopReason",
106
- "SubscribeRequest",
107
- "Tool",
108
- "ToolsCapability",
109
- "UnsubscribeRequest",
110
- "stdio_client",
111
- "stdio_server",
112
- "CompleteRequest",
113
- "JSONRPCResponse",
114
- ]
File without changes