olca 0.2.0__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -0,0 +1 @@
1
+ #left blank onnpurpose
@@ -0,0 +1,296 @@
1
+ #%%
2
+ import os
3
+ from click import prompt
4
+ import dotenv
5
+ from langchain import hub
6
+ import argparse
7
+ import yaml
8
+
9
+ import langsmith
10
+ # Initialize LangSmith client
11
+ LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
12
+ if not LANGSMITH_API_KEY:
13
+ print("Error: LANGSMITH_API_KEY environment variable is not set.")
14
+ exit(1)
15
+ client = langsmith.Client(api_key=LANGSMITH_API_KEY)
16
+
17
+
18
+ #jgwill/olca1
19
+ #olca1_prompt = hub.pull("jgwill/olca1") #Future use
20
+ #https://smith.langchain.com/hub/jgwill/olca1
21
+ SYSTEM_PROMPT_APPEND="""
22
+ You do what is above and consider the following when doing the task:
23
+ ---
24
+ * Create any non-existent directories. (./reports, ./log, ./.olca)
25
+ * create a subfolder .olca in the current directory if dont exist and :
26
+ * Write or append instructions to ./.olca/instructions.txt on how to perform tasks (in terms of reflection and action steps. example: cli syntax to execute something). You read that file to learn how to do your job if it exists. You update it with what you learned and keep it up to date. You dont erase the content of the file. If what you find in it is not related to your task, you ignore it. Dont output all these details you do internally to the standard output. You read it back from time to time to remember what you did and how to make it and what the user wants.
27
+ * Produce a report in ./reports/report-olca-[TIMESTAMP].md
28
+ * Stop and report to the user if repeated errors are encountered.
29
+ * Output the iteration number and plan to the standard output during each loop.
30
+ * If you are given an issue_id, you commit the work to that issue and push your results (dont output the git push to the standard output).
31
+ * Do not comment on your final results; just output them but keep logging your action steps you do internally (all reflection and action steps).
32
+ * Dont modify the file: olca.yml
33
+ * If github issue related, dont checkout a branch except if asked otherwise.
34
+ * You dont checkout branches, I repeat, you dont checkout branches.
35
+ * Generate your TIMESTAMP with the following command: date +'%Y%m%d%H%M%S' only once at the beginning of your script.
36
+ * Make sure if you Switched to branch, you switch back to main before the end of your script.
37
+ * Try to observe that you keep doing the same thing over and over again and stop right away if you see that (dont do that if you are developping a story)
38
+ * Be quiet with trivial output in the terminal.
39
+ * Write your plan in ./.olca/plan.md
40
+ * You watch out for basic syntax errors with your args when executing echo commands. (example: Syntax error: Unterminated quoted string)
41
+ ----
42
+ REMEMBER: Dont introduce nor conclude, just output results. No comments. you present in a coherent format without preambles or fluff. Never use the word "determination".
43
+ """
44
+
45
+ HUMAN_APPEND_PROMPT = """
46
+ * Utilize the 'human' tool for interactions as directed.
47
+ * Communicate clearly and simply, avoiding exaggeration.
48
+ Example Interaction:
49
+ <example>
50
+ '==============================================
51
+ { PURPOSE_OF_THE_MESSAGE_SHORT }
52
+ ==============================================
53
+ { CURRENT_STATUS_OR_MESSAGE_CONTENT }
54
+ ==============================================
55
+ { PROMPT_FOR_USER_INPUT_SHORT } :
56
+ </example>
57
+ """
58
+ def get_input() -> str:
59
+ print("----------------------")
60
+ contents = []
61
+ while True:
62
+ try:
63
+ line = input()
64
+ except EOFError:
65
+ break
66
+ if line == "q":
67
+ break
68
+ contents.append(line)
69
+ return "\n".join(contents)
70
+
71
+
72
+ #try loaging .env and see if a key is there for OLCA_SYSTEM_PROMPT_APPEND
73
+ #If it is there, use it instead of the default
74
+ try:
75
+ OLCA_SYSTEM_PROMPT_APPEND = os.getenv("OLCA_SYSTEM_PROMPT_APPEND")
76
+ if OLCA_SYSTEM_PROMPT_APPEND is not None:
77
+ SYSTEM_PROMPT_APPEND = OLCA_SYSTEM_PROMPT_APPEND
78
+ except:
79
+ pass
80
+
81
+ def load_config(config_file):
82
+ with open(config_file, 'r') as file:
83
+ config = yaml.safe_load(file)
84
+ return config
85
+
86
+
87
+ #%%
88
+
89
+ dotenv.load_dotenv()
90
+
91
+ #%%
92
+ #from dotenv in ./.env , load key
93
+
94
+ #%%
95
+
96
+ # First we initialize the model we want to use.
97
+ from json import load
98
+ from langchain_openai import ChatOpenAI,OpenAI
99
+ from langchain.agents import AgentExecutor, create_react_agent
100
+
101
+ from langchain_community.agent_toolkits.load_tools import load_tools
102
+
103
+ import warnings
104
+ #
105
+
106
+ # Suppress the specific UserWarning
107
+ warnings.filterwarnings("ignore", category=UserWarning, message="The shell tool has no safeguards by default. Use at your own risk.")
108
+
109
+ from langchain_community.tools.shell import ShellTool
110
+
111
+ from typing import Literal
112
+
113
+ from langchain_core.tools import tool
114
+
115
+ from langgraph.prebuilt import create_react_agent
116
+ from langgraph.errors import GraphRecursionError
117
+
118
+ @tool
119
+ def get_weather(city: Literal["nyc", "sf"]):
120
+ """Use this to get weather information."""
121
+ if city == "nyc":
122
+ return "It might be cloudy in nyc"
123
+ elif city == "sf":
124
+ return "It's always sunny in sf"
125
+ else:
126
+ raise AssertionError("Unknown city")
127
+
128
+
129
+ def print_stream(stream):
130
+ for s in stream:
131
+ message = s["messages"][-1]
132
+ if isinstance(message, tuple):
133
+ print(message)
134
+ else:
135
+ message.pretty_print()
136
+
137
+ def prepare_input(user_input, system_instructions,append_prompt=True, human=False):
138
+ appended_prompt = system_instructions + SYSTEM_PROMPT_APPEND if append_prompt else system_instructions
139
+ appended_prompt = appended_prompt + HUMAN_APPEND_PROMPT if human else appended_prompt
140
+
141
+ inputs = {"messages": [
142
+ ("system",
143
+ appended_prompt),
144
+ ("user", user_input )
145
+ ]}
146
+
147
+ return inputs
148
+
149
+ OLCA_DESCRIPTION = "OlCA (Orpheus Langchain CLI Assistant) (very Experimental and dangerous)"
150
+ OLCA_EPILOG = "For more information: https://github.com/jgwill/orpheuspypractice/wiki/olca"
151
+ OLCA_USAGE="olca [-D] [-H] [-M] [-T] [init] [-y]"
152
+ def _parse_args():
153
+ parser = argparse.ArgumentParser(description=OLCA_DESCRIPTION, epilog=OLCA_EPILOG,usage=OLCA_USAGE)
154
+ parser.add_argument("-D", "--disable-system-append", action="store_true", help="Disable prompt appended to system instructions")
155
+ parser.add_argument("-H", "--human", action="store_true", help="Human in the loop mode")
156
+ parser.add_argument("-M", "--math", action="store_true", help="Enable math tool")
157
+ parser.add_argument("-T", "--tracing", action="store_true", help="Enable tracing")
158
+ parser.add_argument("init", nargs='?', help="Initialize olca interactive mode")
159
+ parser.add_argument("-y", "--yes", action="store_true", help="Accept the new file olca.yml")
160
+ return parser.parse_args()
161
+
162
+ def main():
163
+ args = _parse_args()
164
+ olca_config_file = 'olca_config.yaml'
165
+ olca_new_config_file = 'olca.yml'
166
+
167
+ if args.init:
168
+ if os.path.exists(olca_new_config_file) or os.path.exists(olca_config_file):
169
+ print("Error: Configuration file already exists. Cannot run 'olca init'.")
170
+ return
171
+ if args.yes:
172
+ olca_config_file = olca_new_config_file
173
+ else:
174
+ generate_config_example()
175
+ return
176
+
177
+ if os.path.exists(olca_new_config_file):
178
+ olca_config_file = olca_new_config_file
179
+ elif os.path.exists(olca_config_file):
180
+ print("Warning: 'olca_config.yaml' is deprecated. Please use 'olca.yml' instead.")
181
+ else:
182
+ generate_config_example()
183
+ return
184
+
185
+ config = load_config(olca_config_file)
186
+
187
+ # Check for tracing flag in config and CLI
188
+ tracing_enabled = config.get('tracing', False) or args.tracing
189
+ if tracing_enabled:
190
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
191
+ if not os.getenv("LANGCHAIN_API_KEY"):
192
+ print("Error: LANGCHAIN_API_KEY environment variable is required for tracing. Please set it up at : https://smith.langchain.com/settings")
193
+ exit(1)
194
+
195
+ try:
196
+
197
+ api_key_variable = "OPENAI_API_KEY"
198
+ api_keyname=config.get('api_keyname',"OPENAI_API_KEY_olca")
199
+
200
+ api_key_lcpractices2409 = os.getenv(api_keyname)
201
+ #print(api_key_lcpractices2409)
202
+ os.environ[api_key_variable] = api_key_lcpractices2409
203
+ except :
204
+ #load .env file in current dir or HOME and find OPENAI_API_KEY
205
+ try:
206
+ dotenv.load_dotenv()
207
+ except:
208
+ #load in HOME
209
+ try:
210
+ dotenv.load_dotenv(dotenv.find_dotenv(usecwd=False))
211
+ except:
212
+ print("Error: Could not load .env file")
213
+ exit(1)
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+ system_instructions = config.get('system_instructions', '')
222
+ user_input = config.get('user_input', '')
223
+ model_name=config.get('model_name', "gpt-4o-mini")
224
+ recursion_limit=config.get('recursion_limit', 15)
225
+ disable_system_append = _parse_args().disable_system_append
226
+
227
+ # Use the system_instructions and user_input in your CLI logic
228
+ print("System Instructions:", system_instructions)
229
+ print("User Input:", user_input)
230
+ print("Model Name:", model_name)
231
+ print("Recursion Limit:", recursion_limit)
232
+ print("Trace:", tracing_enabled)
233
+
234
+
235
+
236
+
237
+ model = ChatOpenAI(model=model_name, temperature=0)
238
+ selected_tools = [ "terminal"]
239
+
240
+ human_switch = args.human
241
+ #look in olca_config.yaml for human: true
242
+ if "human" in config:
243
+ human_switch = config["human"]
244
+
245
+ if human_switch:
246
+ selected_tools.append("human")
247
+
248
+ if args.math:
249
+ math_llm=OpenAI()
250
+ selected_tools.append("llm-math")
251
+ if human_switch:
252
+ tools = load_tools( selected_tools, llm=math_llm, allow_dangerous_tools=True, input_func=get_input)
253
+ else:
254
+ tools = load_tools( selected_tools, llm=math_llm, allow_dangerous_tools=True)
255
+ else:
256
+ if human_switch:
257
+ tools = load_tools( selected_tools, allow_dangerous_tools=True, input_func=get_input)
258
+ else:
259
+ tools = load_tools( selected_tools, allow_dangerous_tools=True)
260
+
261
+
262
+ # Define the graph
263
+ graph = create_react_agent(model, tools=tools)
264
+
265
+ if graph.config is None:
266
+ graph.config = {}
267
+ graph.config["recursion_limit"] = recursion_limit
268
+
269
+ inputs = prepare_input(user_input, system_instructions, not disable_system_append, human_switch)
270
+
271
+
272
+ try:
273
+ os.makedirs('.olca', exist_ok=True)
274
+ print_stream(graph.stream(inputs, stream_mode="values"))
275
+ except GraphRecursionError as e:
276
+ #print(f"Error: {e}")
277
+ print("Recursion limit reached. Please increase the 'recursion_limit' in the olca_config.yaml file.")
278
+ print("For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT")
279
+
280
+ def generate_config_example():
281
+ config = {
282
+ "api_keyname": input("api_keyname [OPENAI_API_KEY_olca]: ") or "OPENAI_API_KEY_olca",
283
+ "model_name": input("model_name [gpt-4o-mini]: ") or "gpt-4o-mini",
284
+ "recursion_limit": int(input("recursion_limit [12]: ") or 12),
285
+ "temperature": float(input("temperature [0]: ") or 0),
286
+ "human": input("human [true]: ").lower() in ["true", "yes", "y", "1", ""] or True,
287
+ "tracing": input("tracing [true]: ").lower() in ["true", "yes", "y", "1", ""] or True,
288
+ "system_instructions": input("system_instructions [Hello, I am a chatbot. How can I help you today?]: ") or "Hello, I am a chatbot. How can I help you today?",
289
+ "user_input": input("user_input [What is the weather in NYC?]: ") or "What is the weather in NYC?"
290
+ }
291
+ with open('olca.yml', 'w') as file:
292
+ yaml.dump(config, file)
293
+ print("Configuration file 'olca.yml' created successfully.")
294
+
295
+ if __name__ == "__main__":
296
+ main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -2,6 +2,8 @@ LICENSE
2
2
  README.md
3
3
  pyproject.toml
4
4
  setup.py
5
+ olca/__init__.py
6
+ olca/olcacli.py
5
7
  olca.egg-info/PKG-INFO
6
8
  olca.egg-info/SOURCES.txt
7
9
  olca.egg-info/dependency_links.txt
@@ -0,0 +1 @@
1
+ olca
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
7
7
 
8
8
  [project]
9
9
  name = "olca"
10
- version = "0.2.0"
10
+ version = "0.2.1"
11
11
  description = "A Python package for experimental usage of Langchain and Human-in-the-Loop"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.6"
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='olca',
5
- version = "0.2.0",
5
+ version = "0.2.1",
6
6
  author='Jean GUillaume ISabelle',
7
7
  author_email='jgi@jgwill.com',
8
8
  description='A Python package for experimenting with Langchain agent and interactivity in Terminal modalities.',
@@ -10,7 +10,7 @@ setup(
10
10
  long_description_content_type='text/markdown',
11
11
  url='https://github.com/jgwill/olca',
12
12
  packages=find_packages(
13
- include=["olca2", "test-*.py"], exclude=["test*log", "*test*csv", "*test*png"]
13
+ include=["olca", "test-*.py"], exclude=["test*log", "*test*csv", "*test*png"]
14
14
  ),
15
15
  #package_dir={'': 'coaiapy'},
16
16
  install_requires=[
@@ -1 +0,0 @@
1
-
File without changes
File without changes
File without changes
File without changes