zrb 1.13.1__py3-none-any.whl → 1.21.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. zrb/__init__.py +2 -6
  2. zrb/attr/type.py +10 -7
  3. zrb/builtin/__init__.py +2 -0
  4. zrb/builtin/git.py +12 -1
  5. zrb/builtin/group.py +31 -15
  6. zrb/builtin/http.py +7 -8
  7. zrb/builtin/llm/attachment.py +40 -0
  8. zrb/builtin/llm/chat_completion.py +287 -0
  9. zrb/builtin/llm/chat_session.py +130 -144
  10. zrb/builtin/llm/chat_session_cmd.py +288 -0
  11. zrb/builtin/llm/chat_trigger.py +78 -0
  12. zrb/builtin/llm/history.py +4 -4
  13. zrb/builtin/llm/llm_ask.py +218 -110
  14. zrb/builtin/llm/tool/api.py +74 -62
  15. zrb/builtin/llm/tool/cli.py +56 -21
  16. zrb/builtin/llm/tool/code.py +57 -47
  17. zrb/builtin/llm/tool/file.py +292 -255
  18. zrb/builtin/llm/tool/note.py +84 -0
  19. zrb/builtin/llm/tool/rag.py +25 -18
  20. zrb/builtin/llm/tool/search/__init__.py +1 -0
  21. zrb/builtin/llm/tool/search/brave.py +66 -0
  22. zrb/builtin/llm/tool/search/searxng.py +61 -0
  23. zrb/builtin/llm/tool/search/serpapi.py +61 -0
  24. zrb/builtin/llm/tool/sub_agent.py +53 -26
  25. zrb/builtin/llm/tool/web.py +94 -157
  26. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  27. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  28. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  29. zrb/builtin/searxng/config/settings.yml +5671 -0
  30. zrb/builtin/searxng/start.py +21 -0
  31. zrb/builtin/setup/latex/ubuntu.py +1 -0
  32. zrb/builtin/setup/ubuntu.py +1 -1
  33. zrb/builtin/shell/autocomplete/bash.py +4 -3
  34. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  35. zrb/config/config.py +297 -79
  36. zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
  37. zrb/config/default_prompt/interactive_system_prompt.md +25 -28
  38. zrb/config/default_prompt/persona.md +1 -1
  39. zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
  40. zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
  41. zrb/config/default_prompt/summarization_prompt.md +57 -16
  42. zrb/config/default_prompt/system_prompt.md +29 -25
  43. zrb/config/llm_config.py +129 -24
  44. zrb/config/llm_context/config.py +127 -90
  45. zrb/config/llm_context/config_parser.py +1 -7
  46. zrb/config/llm_context/workflow.py +81 -0
  47. zrb/config/llm_rate_limitter.py +100 -47
  48. zrb/context/any_shared_context.py +7 -1
  49. zrb/context/context.py +8 -2
  50. zrb/context/shared_context.py +6 -8
  51. zrb/group/any_group.py +12 -5
  52. zrb/group/group.py +67 -3
  53. zrb/input/any_input.py +5 -1
  54. zrb/input/base_input.py +18 -6
  55. zrb/input/option_input.py +13 -1
  56. zrb/input/text_input.py +7 -24
  57. zrb/runner/cli.py +21 -20
  58. zrb/runner/common_util.py +24 -19
  59. zrb/runner/web_route/task_input_api_route.py +5 -5
  60. zrb/runner/web_route/task_session_api_route.py +1 -4
  61. zrb/runner/web_util/user.py +7 -3
  62. zrb/session/any_session.py +12 -6
  63. zrb/session/session.py +39 -18
  64. zrb/task/any_task.py +24 -3
  65. zrb/task/base/context.py +17 -9
  66. zrb/task/base/execution.py +15 -8
  67. zrb/task/base/lifecycle.py +8 -4
  68. zrb/task/base/monitoring.py +12 -7
  69. zrb/task/base_task.py +69 -5
  70. zrb/task/base_trigger.py +12 -5
  71. zrb/task/llm/agent.py +130 -145
  72. zrb/task/llm/agent_runner.py +152 -0
  73. zrb/task/llm/config.py +45 -13
  74. zrb/task/llm/conversation_history.py +110 -29
  75. zrb/task/llm/conversation_history_model.py +4 -179
  76. zrb/task/llm/default_workflow/coding/workflow.md +41 -0
  77. zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
  78. zrb/task/llm/default_workflow/git/workflow.md +118 -0
  79. zrb/task/llm/default_workflow/golang/workflow.md +128 -0
  80. zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
  81. zrb/task/llm/default_workflow/java/workflow.md +146 -0
  82. zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
  83. zrb/task/llm/default_workflow/python/workflow.md +160 -0
  84. zrb/task/llm/default_workflow/researching/workflow.md +153 -0
  85. zrb/task/llm/default_workflow/rust/workflow.md +162 -0
  86. zrb/task/llm/default_workflow/shell/workflow.md +299 -0
  87. zrb/task/llm/file_replacement.py +206 -0
  88. zrb/task/llm/file_tool_model.py +57 -0
  89. zrb/task/llm/history_processor.py +206 -0
  90. zrb/task/llm/history_summarization.py +2 -192
  91. zrb/task/llm/print_node.py +192 -64
  92. zrb/task/llm/prompt.py +198 -153
  93. zrb/task/llm/subagent_conversation_history.py +41 -0
  94. zrb/task/llm/tool_confirmation_completer.py +41 -0
  95. zrb/task/llm/tool_wrapper.py +216 -55
  96. zrb/task/llm/workflow.py +76 -0
  97. zrb/task/llm_task.py +122 -70
  98. zrb/task/make_task.py +2 -3
  99. zrb/task/rsync_task.py +25 -10
  100. zrb/task/scheduler.py +4 -4
  101. zrb/util/attr.py +54 -39
  102. zrb/util/cli/markdown.py +12 -0
  103. zrb/util/cli/text.py +30 -0
  104. zrb/util/file.py +27 -11
  105. zrb/util/git.py +2 -2
  106. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  107. zrb/util/string/conversion.py +1 -1
  108. zrb/util/truncate.py +23 -0
  109. zrb/util/yaml.py +204 -0
  110. zrb/xcom/xcom.py +10 -0
  111. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/METADATA +40 -20
  112. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/RECORD +114 -83
  113. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/WHEEL +1 -1
  114. zrb/task/llm/default_workflow/coding.md +0 -24
  115. zrb/task/llm/default_workflow/copywriting.md +0 -17
  116. zrb/task/llm/default_workflow/researching.md +0 -18
  117. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/entry_points.txt +0 -0
zrb/task/llm/config.py CHANGED
@@ -1,20 +1,43 @@
1
- from typing import TYPE_CHECKING, Any, Callable
1
+ from typing import TYPE_CHECKING, Callable
2
2
 
3
3
  if TYPE_CHECKING:
4
4
  from pydantic_ai.models import Model
5
5
  from pydantic_ai.settings import ModelSettings
6
6
 
7
- from zrb.attr.type import StrAttr, fstring
7
+ from zrb.attr.type import BoolAttr, StrAttr, StrListAttr
8
8
  from zrb.config.llm_config import LLMConfig, llm_config
9
9
  from zrb.context.any_context import AnyContext
10
- from zrb.context.any_shared_context import AnySharedContext
11
- from zrb.util.attr import get_attr
10
+ from zrb.util.attr import get_attr, get_bool_attr, get_str_list_attr
11
+
12
+
13
+ def get_yolo_mode(
14
+ ctx: AnyContext,
15
+ yolo_mode_attr: (
16
+ Callable[[AnyContext], list[str] | bool | None] | StrListAttr | BoolAttr | None
17
+ ) = None,
18
+ render_yolo_mode: bool = True,
19
+ ) -> bool | list[str]:
20
+ if yolo_mode_attr is None:
21
+ return llm_config.default_yolo_mode
22
+ try:
23
+ return get_bool_attr(
24
+ ctx,
25
+ yolo_mode_attr,
26
+ False,
27
+ auto_render=render_yolo_mode,
28
+ )
29
+ except Exception:
30
+ return get_str_list_attr(
31
+ ctx,
32
+ yolo_mode_attr,
33
+ auto_render=render_yolo_mode,
34
+ )
12
35
 
13
36
 
14
37
  def get_model_settings(
15
38
  ctx: AnyContext,
16
39
  model_settings_attr: (
17
- "ModelSettings | Callable[[AnySharedContext], ModelSettings] | None"
40
+ "ModelSettings | Callable[[AnyContext], ModelSettings] | None"
18
41
  ) = None,
19
42
  ) -> "ModelSettings | None":
20
43
  """Gets the model settings, resolving callables if necessary."""
@@ -47,7 +70,7 @@ def get_model_api_key(
47
70
  ) -> str | None:
48
71
  """Gets the model API key, rendering if configured."""
49
72
  api_key = get_attr(ctx, model_api_key_attr, None, auto_render=render_model_api_key)
50
- if api_key is None and llm_config.default_api_key is not None:
73
+ if api_key is None and llm_config.default_model_api_key is not None:
51
74
  return llm_config.default_model_api_key
52
75
  if isinstance(api_key, str) or api_key is None:
53
76
  return api_key
@@ -56,18 +79,21 @@ def get_model_api_key(
56
79
 
57
80
  def get_model(
58
81
  ctx: AnyContext,
59
- model_attr: "Callable[[AnySharedContext], Model | str | fstring] | Model | None",
82
+ model_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
60
83
  render_model: bool,
61
- model_base_url_attr: StrAttr | None = None,
84
+ model_base_url_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
62
85
  render_model_base_url: bool = True,
63
- model_api_key_attr: StrAttr | None = None,
86
+ model_api_key_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None" = None,
64
87
  render_model_api_key: bool = True,
65
- ) -> "str | Model | None":
88
+ is_small_model: bool = False,
89
+ ) -> "str | Model":
66
90
  """Gets the model instance or name, handling defaults and configuration."""
67
91
  from pydantic_ai.models import Model
68
92
 
69
93
  model = get_attr(ctx, model_attr, None, auto_render=render_model)
70
94
  if model is None:
95
+ if is_small_model:
96
+ return llm_config.default_small_model
71
97
  return llm_config.default_model
72
98
  if isinstance(model, str):
73
99
  model_base_url = get_model_base_url(
@@ -76,11 +102,11 @@ def get_model(
76
102
  model_api_key = get_model_api_key(ctx, model_api_key_attr, render_model_api_key)
77
103
  new_llm_config = LLMConfig(
78
104
  default_model_name=model,
79
- default_base_url=model_base_url,
80
- default_api_key=model_api_key,
105
+ default_model_base_url=model_base_url,
106
+ default_model_api_key=model_api_key,
81
107
  )
82
108
  if model_base_url is None and model_api_key is None:
83
- default_model_provider = llm_config.default_model_provider
109
+ default_model_provider = _get_default_model_provider(is_small_model)
84
110
  if default_model_provider is not None:
85
111
  new_llm_config.set_default_model_provider(default_model_provider)
86
112
  return new_llm_config.default_model
@@ -88,3 +114,9 @@ def get_model(
88
114
  if isinstance(model, Model):
89
115
  return model
90
116
  raise ValueError(f"Invalid model type resolved: {type(model)}, value: {model}")
117
+
118
+
119
+ def _get_default_model_provider(is_small_model: bool = False):
120
+ if is_small_model:
121
+ return llm_config.default_small_model_provider
122
+ return llm_config.default_model_provider
@@ -1,16 +1,73 @@
1
1
  import json
2
+ import os
2
3
  from collections.abc import Callable
3
- from copy import deepcopy
4
4
  from typing import Any
5
5
 
6
6
  from zrb.attr.type import StrAttr
7
+ from zrb.config.llm_context.config import llm_context_config
7
8
  from zrb.context.any_context import AnyContext
8
- from zrb.context.any_shared_context import AnySharedContext
9
9
  from zrb.task.llm.conversation_history_model import ConversationHistory
10
10
  from zrb.task.llm.typing import ListOfDict
11
11
  from zrb.util.attr import get_str_attr
12
- from zrb.util.file import write_file
12
+ from zrb.util.file import read_file, write_file
13
+ from zrb.util.markdown import make_markdown_section
13
14
  from zrb.util.run import run_async
15
+ from zrb.xcom.xcom import Xcom
16
+
17
+
18
+ def _get_global_subagent_messages_xcom(ctx: AnyContext) -> Xcom:
19
+ if "_global_subagents" not in ctx.xcom:
20
+ ctx.xcom["_global_subagents"] = Xcom([{}])
21
+ if not isinstance(ctx.xcom["_global_subagents"], Xcom):
22
+ raise ValueError("ctx.xcom._global_subagents must be an Xcom")
23
+ return ctx.xcom["_global_subagents"]
24
+
25
+
26
+ def inject_subagent_history_into_ctx(
27
+ ctx: AnyContext, conversation_history: ConversationHistory
28
+ ):
29
+ subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
30
+ existing_subagent_history = subagent_messages_xcom.get({})
31
+ subagent_messages_xcom.set(
32
+ {**existing_subagent_history, **conversation_history.subagent_history}
33
+ )
34
+
35
+
36
+ def set_ctx_subagent_history(ctx: AnyContext, subagent_name: str, messages: ListOfDict):
37
+ subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
38
+ subagent_history = subagent_messages_xcom.get({})
39
+ subagent_history[subagent_name] = messages
40
+ subagent_messages_xcom.set(subagent_history)
41
+
42
+
43
+ def get_subagent_histories_from_ctx(ctx: AnyContext) -> dict[str, ListOfDict]:
44
+ subagent_messsages_xcom = _get_global_subagent_messages_xcom(ctx)
45
+ return subagent_messsages_xcom.get({})
46
+
47
+
48
+ def inject_conversation_history_notes(conversation_history: ConversationHistory):
49
+ conversation_history.long_term_note = _fetch_long_term_note(
50
+ conversation_history.project_path
51
+ )
52
+ conversation_history.contextual_note = _fetch_contextual_note(
53
+ conversation_history.project_path
54
+ )
55
+
56
+
57
+ def _fetch_long_term_note(project_path: str) -> str:
58
+ contexts = llm_context_config.get_notes(cwd=project_path)
59
+ return contexts.get("/", "")
60
+
61
+
62
+ def _fetch_contextual_note(project_path: str) -> str:
63
+ contexts = llm_context_config.get_notes(cwd=project_path)
64
+ return "\n".join(
65
+ [
66
+ make_markdown_section(header, content)
67
+ for header, content in contexts.items()
68
+ if header != "/"
69
+ ]
70
+ )
14
71
 
15
72
 
16
73
  def get_history_file(
@@ -27,16 +84,62 @@ def get_history_file(
27
84
  )
28
85
 
29
86
 
87
+ async def _read_from_source(
88
+ ctx: AnyContext,
89
+ reader: (
90
+ Callable[[AnyContext], ConversationHistory | dict[str, Any] | list | None]
91
+ | None
92
+ ),
93
+ file_path: str | None,
94
+ ) -> "ConversationHistory | None":
95
+ # Priority 1: Reader function
96
+ if reader:
97
+ try:
98
+ raw_data = await run_async(reader(ctx))
99
+ if raw_data:
100
+ instance = ConversationHistory.parse_and_validate(
101
+ ctx, raw_data, "reader"
102
+ )
103
+ if instance:
104
+ return instance
105
+ except Exception as e:
106
+ ctx.log_warning(
107
+ f"Error executing conversation history reader: {e}. Ignoring."
108
+ )
109
+ # Priority 2: History file
110
+ if file_path and os.path.isfile(file_path):
111
+ try:
112
+ content = read_file(file_path)
113
+ raw_data = json.loads(content)
114
+ instance = ConversationHistory.parse_and_validate(
115
+ ctx, raw_data, f"file '{file_path}'"
116
+ )
117
+ if instance:
118
+ return instance
119
+ except json.JSONDecodeError:
120
+ ctx.log_warning(
121
+ f"Could not decode JSON from history file '{file_path}'. "
122
+ "Ignoring file content."
123
+ )
124
+ except Exception as e:
125
+ ctx.log_warning(
126
+ f"Error reading history file '{file_path}': {e}. "
127
+ "Ignoring file content."
128
+ )
129
+ # Fallback: Return default value
130
+ return None
131
+
132
+
30
133
  async def read_conversation_history(
31
134
  ctx: AnyContext,
32
135
  conversation_history_reader: (
33
- Callable[[AnySharedContext], ConversationHistory | dict | list | None] | None
136
+ Callable[[AnyContext], ConversationHistory | dict | list | None] | None
34
137
  ),
35
138
  conversation_history_file_attr: StrAttr | None,
36
139
  render_history_file: bool,
37
140
  conversation_history_attr: (
38
141
  ConversationHistory
39
- | Callable[[AnySharedContext], ConversationHistory | dict | list]
142
+ | Callable[[AnyContext], ConversationHistory | dict | list]
40
143
  | dict
41
144
  | list
42
145
  ),
@@ -46,7 +149,7 @@ async def read_conversation_history(
46
149
  ctx, conversation_history_file_attr, render_history_file
47
150
  )
48
151
  # Use the class method defined above
49
- history_data = await ConversationHistory.read_from_source(
152
+ history_data = await _read_from_source(
50
153
  ctx=ctx,
51
154
  reader=conversation_history_reader,
52
155
  file_path=history_file,
@@ -80,7 +183,7 @@ async def write_conversation_history(
80
183
  ctx: AnyContext,
81
184
  history_data: ConversationHistory,
82
185
  conversation_history_writer: (
83
- Callable[[AnySharedContext, ConversationHistory], None] | None
186
+ Callable[[AnyContext, ConversationHistory], None] | None
84
187
  ),
85
188
  conversation_history_file_attr: StrAttr | None,
86
189
  render_history_file: bool,
@@ -95,28 +198,6 @@ async def write_conversation_history(
95
198
  write_file(history_file, json.dumps(history_data.to_dict(), indent=2))
96
199
 
97
200
 
98
- def replace_system_prompt_in_history(
99
- history_list: ListOfDict, replacement: str = "<main LLM system prompt>"
100
- ) -> ListOfDict:
101
- """
102
- Returns a new history list where any part with part_kind 'system-prompt'
103
- has its 'content' replaced with the given replacement string.
104
- Args:
105
- history: List of history items (each item is a dict with a 'parts' list).
106
- replacement: The string to use in place of system-prompt content.
107
-
108
- Returns:
109
- A deep-copied list of history items with system-prompt content replaced.
110
- """
111
- new_history = deepcopy(history_list)
112
- for item in new_history:
113
- parts = item.get("parts", [])
114
- for part in parts:
115
- if part.get("part_kind") == "system-prompt":
116
- part["content"] = replacement
117
- return new_history
118
-
119
-
120
201
  def count_part_in_history_list(history_list: ListOfDict) -> int:
121
202
  """Calculates the total number of 'parts' in a history list."""
122
203
  history_part_len = 0
@@ -1,90 +1,38 @@
1
1
  import json
2
2
  import os
3
- from collections.abc import Callable
4
3
  from typing import Any
5
4
 
6
- from zrb.config.llm_context.config import llm_context_config
7
5
  from zrb.context.any_context import AnyContext
8
6
  from zrb.task.llm.typing import ListOfDict
9
- from zrb.util.file import read_file
10
- from zrb.util.llm.prompt import make_prompt_section
11
- from zrb.util.run import run_async
12
7
 
13
8
 
14
9
  class ConversationHistory:
15
10
 
16
11
  def __init__(
17
12
  self,
18
- past_conversation_summary: str = "",
19
- past_conversation_transcript: str = "",
20
13
  history: ListOfDict | None = None,
21
14
  contextual_note: str | None = None,
22
15
  long_term_note: str | None = None,
23
16
  project_path: str | None = None,
17
+ subagent_history: dict[str, ListOfDict] | None = None,
24
18
  ):
25
- self.past_conversation_transcript = past_conversation_transcript
26
- self.past_conversation_summary = past_conversation_summary
27
19
  self.history = history if history is not None else []
28
20
  self.contextual_note = contextual_note if contextual_note is not None else ""
29
21
  self.long_term_note = long_term_note if long_term_note is not None else ""
30
22
  self.project_path = project_path if project_path is not None else os.getcwd()
23
+ self.subagent_history = subagent_history if subagent_history is not None else {}
31
24
 
32
25
  def to_dict(self) -> dict[str, Any]:
33
26
  return {
34
- "past_conversation_summary": self.past_conversation_summary,
35
- "past_conversation_transcript": self.past_conversation_transcript,
36
27
  "history": self.history,
37
28
  "contextual_note": self.contextual_note,
38
29
  "long_term_note": self.long_term_note,
30
+ "subagent_history": self.subagent_history,
39
31
  }
40
32
 
41
33
  def model_dump_json(self, indent: int = 2) -> str:
42
34
  return json.dumps(self.to_dict(), indent=indent)
43
35
 
44
- @classmethod
45
- async def read_from_source(
46
- cls,
47
- ctx: AnyContext,
48
- reader: Callable[[AnyContext], dict[str, Any] | list | None] | None,
49
- file_path: str | None,
50
- ) -> "ConversationHistory | None":
51
- # Priority 1: Reader function
52
- if reader:
53
- try:
54
- raw_data = await run_async(reader(ctx))
55
- if raw_data:
56
- instance = cls.parse_and_validate(ctx, raw_data, "reader")
57
- if instance:
58
- return instance
59
- except Exception as e:
60
- ctx.log_warning(
61
- f"Error executing conversation history reader: {e}. Ignoring."
62
- )
63
- # Priority 2: History file
64
- if file_path and os.path.isfile(file_path):
65
- try:
66
- content = read_file(file_path)
67
- raw_data = json.loads(content)
68
- instance = cls.parse_and_validate(ctx, raw_data, f"file '{file_path}'")
69
- if instance:
70
- return instance
71
- except json.JSONDecodeError:
72
- ctx.log_warning(
73
- f"Could not decode JSON from history file '{file_path}'. "
74
- "Ignoring file content."
75
- )
76
- except Exception as e:
77
- ctx.log_warning(
78
- f"Error reading history file '{file_path}': {e}. "
79
- "Ignoring file content."
80
- )
81
- # Fallback: Return default value
82
- return None
83
-
84
- def fetch_newest_notes(self):
85
- self._fetch_long_term_note()
86
- self._fetch_contextual_note()
87
-
88
36
  @classmethod
89
37
  def parse_and_validate(
90
38
  cls, ctx: AnyContext, data: Any, source: str
@@ -93,15 +41,11 @@ class ConversationHistory:
93
41
  if isinstance(data, cls):
94
42
  return data # Already a valid instance
95
43
  if isinstance(data, dict):
96
- # This handles both the new format and the old {'context': ..., 'history': ...}
97
44
  return cls(
98
- past_conversation_summary=data.get("past_conversation_summary", ""),
99
- past_conversation_transcript=data.get(
100
- "past_conversation_transcript", ""
101
- ),
102
45
  history=data.get("history", data.get("messages", [])),
103
46
  contextual_note=data.get("contextual_note", ""),
104
47
  long_term_note=data.get("long_term_note", ""),
48
+ subagent_history=data.get("subagent_history", {}),
105
49
  )
106
50
  elif isinstance(data, list):
107
51
  # Handle very old format (just a list) - wrap it
@@ -121,122 +65,3 @@ class ConversationHistory:
121
65
  f"Error validating/parsing history data from {source}: {e}. Ignoring."
122
66
  )
123
67
  return cls()
124
-
125
- def write_past_conversation_summary(self, past_conversation_summary: str):
126
- """
127
- Write or update the past conversation summary.
128
-
129
- Use this tool to store or update a summary of previous conversations for
130
- future reference. This is useful for providing context to LLMs or other tools
131
- that need a concise history.
132
-
133
- Args:
134
- past_conversation_summary (str): The summary text to store.
135
-
136
- Returns:
137
- str: A JSON object indicating the success or failure of the operation.
138
-
139
- Raises:
140
- Exception: If the summary cannot be written.
141
- """
142
- self.past_conversation_summary = past_conversation_summary
143
- return json.dumps({"success": True})
144
-
145
- def write_past_conversation_transcript(self, past_conversation_transcript: str):
146
- """
147
- Write or update the past conversation transcript.
148
-
149
- Use this tool to store or update the full transcript of previous conversations.
150
- This is useful for providing detailed context to LLMs or for record-keeping.
151
-
152
- Args:
153
- past_conversation_transcript (str): The transcript text to store.
154
-
155
- Returns:
156
- str: A JSON object indicating the success or failure of the operation.
157
-
158
- Raises:
159
- Exception: If the transcript cannot be written.
160
- """
161
- self.past_conversation_transcript = past_conversation_transcript
162
- return json.dumps({"success": True})
163
-
164
- def read_long_term_note(self) -> str:
165
- """
166
- Read the content of the long-term references.
167
-
168
- This tool helps you retrieve knowledge or notes stored for long-term reference.
169
- If the note does not exist, you may want to create it using the write tool.
170
-
171
- Returns:
172
- str: JSON with content of the notes.
173
-
174
- Raises:
175
- Exception: If the note cannot be read.
176
- """
177
- return json.dumps({"content": self._fetch_long_term_note()})
178
-
179
- def write_long_term_note(self, content: str) -> str:
180
- """
181
- Write the entire content of the long-term references.
182
- This will overwrite any existing long-term notes.
183
-
184
- Args:
185
- content (str): The full content of the long-term notes.
186
-
187
- Returns:
188
- str: JSON indicating success.
189
- """
190
- llm_context_config.write_context(content, context_path="/")
191
- return json.dumps({"success": True})
192
-
193
- def read_contextual_note(self) -> str:
194
- """
195
- Read the content of the contextual references for the current project.
196
-
197
- This tool helps you retrieve knowledge or notes stored for contextual reference.
198
- If the note does not exist, you may want to create it using the write tool.
199
-
200
- Returns:
201
- str: JSON with content of the notes.
202
-
203
- Raises:
204
- Exception: If the note cannot be read.
205
- """
206
- return json.dumps({"content": self._fetch_contextual_note()})
207
-
208
- def write_contextual_note(
209
- self, content: str, context_path: str | None = None
210
- ) -> str:
211
- """
212
- Write the entire content of the contextual references for a specific path.
213
- This will overwrite any existing contextual notes for that path.
214
-
215
- Args:
216
- content (str): The full content of the contextual notes.
217
- context_path (str, optional): The directory path for the context.
218
- Defaults to the current project path.
219
-
220
- Returns:
221
- str: JSON indicating success.
222
- """
223
- if context_path is None:
224
- context_path = self.project_path
225
- llm_context_config.write_context(content, context_path=context_path)
226
- return json.dumps({"success": True})
227
-
228
- def _fetch_long_term_note(self):
229
- contexts = llm_context_config.get_contexts(cwd=self.project_path)
230
- self.long_term_note = contexts.get("/", "")
231
- return self.long_term_note
232
-
233
- def _fetch_contextual_note(self):
234
- contexts = llm_context_config.get_contexts(cwd=self.project_path)
235
- self.contextual_note = "\n".join(
236
- [
237
- make_prompt_section(header, content)
238
- for header, content in contexts.items()
239
- if header != "/"
240
- ]
241
- )
242
- return self.contextual_note
@@ -0,0 +1,41 @@
1
+ ---
2
+ description: "A comprehensive workflow for software engineering tasks, including writing, modifying, and debugging code, as well as creating new applications. ALWAYS activate this workflow whenever you need to deal with software engineering tasks."
3
+ ---
4
+
5
+ This workflow provides a structured approach to software engineering tasks. Adhere to these guidelines to deliver high-quality, idiomatic code that respects the project's existing patterns and conventions.
6
+
7
+ # Workflow Loading Strategy
8
+
9
+ This is a general-purpose coding workflow. For tasks involving specific languages or tools, you **MUST** load the relevant specialized workflows.
10
+
11
+ - **If the task involves Python:** Load the `python` workflow.
12
+ - **If the task involves Git:** Load the `git` workflow.
13
+ - **If the task involves shell scripting:** Load the `shell` workflow.
14
+ - **If the task involves Go:** Load the `golang` workflow.
15
+ - **If the task involves Java:** Load the `java` workflow.
16
+ - **If the task involves Javascript/Typescript:** Load the `javascript` workflow.
17
+ - **If the task involves HTML/CSS:** Load the `html-css` workflow.
18
+ - **If the task involves Rust:** Load the `rust` workflow.
19
+
20
+ Always consider if a more specific workflow is available and appropriate for the task at hand.
21
+
22
+ # Core Mandates
23
+
24
+ - **Conventions:** Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code, tests, and configuration first.
25
+ - **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like 'package.json', 'Cargo.toml', 'requirements.txt', 'build.gradle', etc., or observe neighboring files) before employing it.
26
+ - **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
27
+ - **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
28
+ - **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add high-value comments if necessary for clarity or if requested by the user. Do not edit comments that are separate from the code you are changing. *NEVER* talk to the user or describe your changes through comments.
29
+ - **Proactiveness:** Fulfill the user's request thoroughly. When adding features or fixing bugs, this includes adding tests to ensure quality. Consider all created files, especially tests, to be permanent artifacts unless the user says otherwise.
30
+ - **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
31
+ - **Explaining Changes:** After completing a code modification or file operation *do not* provide summaries unless asked.
32
+ - **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes.
33
+
34
+ # Software Engineering Tasks
35
+ When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence:
36
+ 1. **Understand & Strategize:** Think about the user's request and the relevant codebase context. When the task involves **complex refactoring, codebase exploration or system-wide analysis**, your **first and primary tool** must be 'codebase_investigator'. Use it to build a comprehensive understanding of the code, its structure, and dependencies. For **simple, targeted searches** (like finding a specific function name, file path, or variable declaration), you should use 'search_file_content' or 'glob' directly.
37
+ 2. **Plan:** Build a coherent and grounded (based on the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should use an iterative development process that includes writing unit tests to verify your changes. Use output logs or debug statements as part of this process to arrive at a solution.
38
+ 3. **Implement:** Use the available tools (e.g., 'replace_in_file', 'write_to_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
39
+ 4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
40
+ 5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
41
+ 6. **Finalize:** After all verification passes, consider the task complete. Do not remove or revert any changes or created files (like tests). Await the user's next instruction.
@@ -0,0 +1,68 @@
1
+ ---
2
+ description: "A workflow for creating, refining, and organizing textual content."
3
+ ---
4
+ Follow this workflow to produce content that is not just correct, but compelling, clear, and perfectly suited to its purpose.
5
+
6
+ # Core Mandates
7
+
8
+ - **Audience-First:** Always understand who you're writing for and what they need to know
9
+ - **Purpose-Driven:** Every piece of content must serve a clear objective
10
+ - **Quality Standards:** Deliver polished, professional content that meets the highest standards
11
+ - **Iterative Refinement:** Content improves through multiple rounds of review and editing
12
+
13
+ # Tool Usage Guideline
14
+ - Use `read_from_file` to analyze existing content and style guides
15
+ - Use `write_to_file` for creating new content drafts
16
+ - Use `replace_in_file` for targeted edits and refinements
17
+
18
+ # Step 1: Understand Intent and Audience
19
+
20
+ 1. **Define the Goal:** What is this text supposed to achieve? (e.g., persuade, inform, entertain, sell). If the user is vague, ask for clarification.
21
+ 2. **Identify the Audience:** Who are you writing for? (e.g., experts, beginners, customers). This dictates your tone, vocabulary, and level of detail.
22
+ 3. **Determine the Tone:** Choose a voice that serves the goal and resonates with the audience (e.g., formal, witty, technical, urgent).
23
+ 4. **Analyze Existing Content:** Review any provided examples, style guides, or reference materials to understand established patterns.
24
+
25
+ # Step 2: Plan and Outline
26
+
27
+ 1. **Create Logical Structure:** Develop an outline that flows naturally from introduction to conclusion
28
+ 2. **Key Sections:** Identify main talking points and supporting arguments
29
+ 3. **Call-to-Action:** Define what you want the reader to do or think after reading
30
+ 4. **Get Approval:** Present the outline to the user for confirmation before proceeding
31
+
32
+ # Step 3: Draft with Purpose
33
+
34
+ 1. **Hook the Reader:** Start with a strong opening that grabs attention
35
+ 2. **Use Active Voice:** Make your writing direct and energetic
36
+ 3. **Show, Don't Just Tell:** Use examples, stories, and data to illustrate your points
37
+ 4. **Maintain Consistency:** Stick to the established tone and style throughout
38
+
39
+ # Step 4: Refine and Polish
40
+
41
+ 1. **Read Aloud:** Catch awkward phrasing and grammatical errors
42
+ 2. **Cut Mercilessly:** Remove anything that doesn't serve the goal
43
+ 3. **Enhance Readability:** Use short paragraphs, headings, bullet points, and bold text
44
+ 4. **Verify Accuracy:** Ensure all facts, figures, and claims are correct
45
+
46
+ # Step 5: Task-Specific Execution
47
+
48
+ ## Summarization
49
+ - Distill the essence while preserving key information
50
+ - Be objective and ruthless in cutting fluff
51
+ - Maintain the original meaning and context
52
+
53
+ ## Proofreading
54
+ - Correct grammar, spelling, and punctuation
55
+ - Improve sentence flow and clarity
56
+ - Preserve the original meaning and voice
57
+
58
+ ## Refining/Editing
59
+ - Sharpen the author's message
60
+ - Strengthen arguments and improve clarity
61
+ - Ensure consistent tone while respecting the original voice
62
+
63
+ # Step 6: Finalize and Deliver
64
+
65
+ - Present the final content to the user
66
+ - Be prepared to make additional refinements based on feedback
67
+ - Ensure the content meets all stated objectives
68
+ - Confirm the content is ready for its intended use