olca 0.2.56__tar.gz → 0.2.58__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.56
3
+ Version: 0.2.58
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -365,6 +365,7 @@ Requires-Dist: langchain-experimental
365
365
  Requires-Dist: click
366
366
  Requires-Dist: langgraph
367
367
  Requires-Dist: langfuse
368
+ Requires-Dist: pytz
368
369
 
369
370
  # oLCa
370
371
 
@@ -96,6 +96,16 @@ def main():
96
96
  parser_list_scores = subparsers.add_parser('list_scores', help='List all scores', aliases=['ls'])
97
97
  parser_list_scores.add_argument('-o', '--output', type=str, help='Output JSON file path')
98
98
 
99
+ # search_traces command
100
+ parser_search = subparsers.add_parser('search_traces', help='Search and filter traces with advanced options', aliases=['st'])
101
+ parser_search.add_argument('--start_date', type=str, help='Start date in ISO format (e.g., 2024-01-01)')
102
+ parser_search.add_argument('--end_date', type=str, help='End date in ISO format (e.g., 2024-12-31)')
103
+ parser_search.add_argument('--keywords', nargs='*', help='Keywords to search in input or output')
104
+ parser_search.add_argument('--tags', nargs='*', help='Tags to filter traces')
105
+ parser_search.add_argument('--metadata', nargs='*', help='Metadata filters in key=value format')
106
+ parser_search.add_argument('-L', '--limit', type=int, default=100, help='Number of traces to fetch')
107
+ parser_search.add_argument('-o', '--output', type=str, help='Output JSON file path')
108
+
99
109
  args = parser.parse_args()
100
110
 
101
111
  if args.command == 'list_traces' or args.command == 'lt':
@@ -177,6 +187,38 @@ def main():
177
187
  print(json.dumps(scores, indent=2))
178
188
  else:
179
189
  print("No scores found.")
190
+ elif args.command == 'search_traces' or args.command == 'st':
191
+ metadata_filters = {}
192
+ if args.metadata:
193
+ for item in args.metadata:
194
+ if '=' in item:
195
+ key, value = item.split('=', 1)
196
+ metadata_filters[key] = value
197
+ else:
198
+ print(f"Ignoring invalid metadata filter: {item}")
199
+
200
+ traces = fu.search_traces(
201
+ start_date=args.start_date,
202
+ end_date=args.end_date,
203
+ keywords=args.keywords,
204
+ tags=args.tags,
205
+ metadata_filters=metadata_filters,
206
+ limit=args.limit
207
+ )
208
+
209
+ if traces:
210
+ if args.output:
211
+ try:
212
+ with open(args.output, 'w') as f:
213
+ json.dump([trace.__dict__ for trace in traces], f, indent=2)
214
+ print(f"Traces written to {os.path.realpath(args.output)}")
215
+ except Exception as e:
216
+ print(f"Error writing to file {args.output}: {e}")
217
+ else:
218
+ for trace in traces:
219
+ fu.print_trace(trace)
220
+ else:
221
+ print("No traces found matching the criteria.")
180
222
  else:
181
223
  parser.print_help()
182
224
  exit(1)
@@ -4,6 +4,8 @@ import json
4
4
  import dotenv
5
5
  import webbrowser
6
6
  import requests # Add this import
7
+ import datetime # Add this import
8
+ import pytz # Add this import
7
9
 
8
10
  # Load .env from the current working directory
9
11
  dotenv.load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"))
@@ -43,10 +45,6 @@ langfuse = Langfuse(
43
45
  secret_key=os.environ.get("LANGFUSE_SECRET_KEY"),
44
46
  host=os.environ.get("LANGFUSE_HOST")
45
47
  )
46
- def dummy():
47
- o=langfuse.get_observations()
48
- g=langfuse.get_generations()
49
- prompts_list=langfuse.get_dataset()
50
48
 
51
49
  def open_trace_in_browser(trace_id):
52
50
  base_url = os.environ.get("LANGFUSE_HOST")
@@ -186,4 +184,61 @@ def delete_dataset(name):
186
184
  dataset.delete()
187
185
 
188
186
  def get_trace_by_id(trace_id):
189
- return langfuse.get_trace(trace_id)
187
+ return langfuse.get_trace(trace_id)
188
+
189
+ def search_traces(
190
+ start_date=None,
191
+ end_date=None,
192
+ keywords=None,
193
+ tags=None,
194
+ metadata_filters=None,
195
+ limit=100
196
+ ):
197
+ """
198
+ Search and filter traces based on date range, keywords, tags, and metadata.
199
+
200
+ Parameters:
201
+ start_date (str): ISO format date string for the start of the date range.
202
+ end_date (str): ISO format date string for the end of the date range.
203
+ keywords (list): List of keywords to search in input or output.
204
+ tags (list): List of tags to filter traces.
205
+ metadata_filters (dict): Dictionary of metadata key-value pairs for filtering.
206
+ limit (int): Number of traces to fetch.
207
+
208
+ Returns:
209
+ list: Filtered list of traces.
210
+ """
211
+ try:
212
+ params = {}
213
+ if start_date:
214
+ from_timestamp = datetime.datetime.fromisoformat(start_date)
215
+ from_timestamp = from_timestamp.replace(tzinfo=pytz.UTC)
216
+ params['from_timestamp'] = from_timestamp
217
+ if end_date:
218
+ to_timestamp = datetime.datetime.fromisoformat(end_date)
219
+ to_timestamp = to_timestamp.replace(tzinfo=pytz.UTC)
220
+ params['to_timestamp'] = to_timestamp
221
+ if tags:
222
+ params['tags'] = tags
223
+ if metadata_filters:
224
+ for key, value in metadata_filters.items():
225
+ params[f'metadata.{key}'] = value
226
+
227
+ traces = langfuse.get_traces(limit=limit, **params)
228
+ if not traces:
229
+ return []
230
+
231
+ filtered_traces = traces.data
232
+
233
+ if keywords:
234
+ keyword_set = set(keyword.lower() for keyword in keywords)
235
+ filtered_traces = [
236
+ trace for trace in filtered_traces
237
+ if any(keyword in trace.input.lower() for keyword in keyword_set) or
238
+ any(keyword in trace.output.lower() for keyword in keyword_set)
239
+ ]
240
+
241
+ return filtered_traces
242
+ except Exception as e:
243
+ print(f"Error searching traces: {e}")
244
+ return []
@@ -8,46 +8,12 @@ import yaml
8
8
  from olca.utils import load_environment, initialize_langfuse
9
9
  from olca.tracing import TracingManager
10
10
  from olca.olcahelper import setup_required_directories, initialize_config_file
11
+ from prompts import SYSTEM_PROMPT_APPEND, HUMAN_APPEND_PROMPT
11
12
 
12
13
  #jgwill/olca1
13
14
  #olca1_prompt = hub.pull("jgwill/olca1") #Future use
14
15
  #https://smith.langchain.com/hub/jgwill/olca1
15
- SYSTEM_PROMPT_APPEND="""
16
- You do what is above and consider the following when doing the task:
17
- ---
18
- * Write or append instructions to ./.olca/instructions.txt on how to perform tasks (in terms of reflection and action steps. example: cli syntax to execute something). You read that file to learn how to do your job if it exists (example: when you starts, you read this to know more on what you learned to do in previous iterations (as it might not be the first time you ran, this file will intruct you where we are. You can also deduce this from the user message)). You update it with what you learned and keep it up to date. You dont erase the content of the file but if what you find in it is not related to your task or is semantically the same, you clean the whole think so its clear. Dont output all these details you do this internally, no output to the stardard output. You read it back from time to time to remember what you did, learned and how to make it and what the user wants.
19
- * Produce a report in ./reports/report-olca-[TIMESTAMP].md
20
- * Report to the user if repeated errors are encountered if human tool is accessible (you interact, stating the errors and asking for instructions from the user (you never quit without the user explicitly stating to end the flow)).
21
- * Output the iteration number and plan to the standard output during each loop.
22
- * If you are given an issue_id, you commit the work to that issue and push your results (dont output the git push to the standard output).
23
- * Do not comment on your final results; just output them but keep logging your action steps you do internally (all reflection and action steps).
24
- * Dont modify the file: olca.yml
25
- * If github issue related, dont checkout a branch except if asked otherwise.
26
- * You dont checkout branches, I repeat, you dont checkout branches.
27
- * Generate your TIMESTAMP with the following command: date +'%Y%m%d%H%M%S' only once at the beginning of your script.
28
- * Make sure if you Switched to branch, you switch back to main before the end of your script.
29
- * Try to observe that you keep doing the same thing over and over again and stop right away if you see that (dont do that if you are developping a story)
30
- * Be quiet with trivial output in the terminal.
31
- * Write and update your plan in ./.olca/plan.md
32
- * You watch out for basic syntax errors with your args when executing echo commands. (example: Syntax error: Unterminated quoted string, make sure to escape your single and double quotes)
33
- ----
34
- REMEMBER: Dont introduce nor conclude, just output results. No comments. you present in a coherent format without preambles or fluff. Never use the word "determination" and we never brainstorm (we conceptualize the result we want in the germination phase then transform it into vision by choice and work as assimilating the vision to until the last phase which is completing our work).
35
- """
36
16
 
37
- HUMAN_APPEND_PROMPT = """
38
- * Utilize the 'human' tool for interactions as directed.
39
- * Communicate clearly and simply, avoiding exaggeration.
40
- Example Interaction:
41
- <example>
42
- '==============================================
43
- { PURPOSE_OF_THE_MESSAGE_SHORT }
44
- ==============================================
45
- { CURRENT_STATUS_OR_MESSAGE_CONTENT }
46
- ==============================================
47
- { PROMPT_FOR_USER_INPUT_SHORT } :
48
- </example>
49
- REMEMBER: Never ask to brainstorm (NEVER USE THAT WORD)
50
- """
51
17
  def get_input() -> str:
52
18
  print("----------------------")
53
19
  contents = []
@@ -0,0 +1,38 @@
1
+ # Create a new file "prompts.py" to store the prompt-related constants.
2
+
3
+ SYSTEM_PROMPT_APPEND = """
4
+ You do what is above and consider the following when doing the task:
5
+ ---
6
+ * Write or append instructions to ./.olca/instructions.txt on how to perform tasks (in terms of reflection and action steps. example: cli syntax to execute something). You read that file to learn how to do your job if it exists (example: when you starts, you read this to know more on what you learned to do in previous iterations (as it might not be the first time you ran, this file will intruct you where we are. You can also deduce this from the user message)). You update it with what you learned and keep it up to date. You dont erase the content of the file but if what you find in it is not related to your task or is semantically the same, you clean the whole think so its clear. Dont output all these details you do this internally, no output to the stardard output. You read it back from time to time to remember what you did, learned and how to make it and what the user wants.
7
+ * Produce a report in ./reports/report-olca-[TIMESTAMP].md
8
+ * Report to the user if repeated errors are encountered if human tool is accessible (you interact, stating the errors and asking for instructions from the user (you never quit without the user explicitly stating to end the flow)).
9
+ * Output the iteration number and plan to the standard output during each loop.
10
+ * If you are given an issue_id, you commit the work to that issue and push your results (dont output the git push to the standard output).
11
+ * Do not comment on your final results; just output them but keep logging your action steps you do internally (all reflection and action steps).
12
+ * Dont modify the file: olca.yml
13
+ * If github issue related, dont checkout a branch except if asked otherwise.
14
+ * You dont checkout branches, I repeat, you dont checkout branches.
15
+ * Generate your TIMESTAMP with the following command: date +'%Y%m%d%H%M%S' only once at the beginning of your script.
16
+ * Make sure if you Switched to branch, you switch back to main before the end of your script.
17
+ * Try to observe that you keep doing the same thing over and over again and stop right away if you see that (dont do that if you are developping a story)
18
+ * Be quiet with trivial output in the terminal.
19
+ * Write and update your plan in ./.olca/plan.md
20
+ * You watch out for basic syntax errors with your args when executing echo commands. (example: Syntax error: Unterminated quoted string, make sure to escape your single and double quotes)
21
+ ----
22
+ REMEMBER: Dont introduce nor conclude, just output results. No comments. you present in a coherent format without preambles or fluff. Never use the word "determination" and we never brainstorm (we conceptualize the result we want in the germination phase then transform it into vision by choice and work as assimilating the vision to until the last phase which is completing our work).
23
+ """
24
+
25
+ HUMAN_APPEND_PROMPT = """
26
+ * Utilize the 'human' tool for interactions as directed.
27
+ * Communicate clearly and simply, avoiding exaggeration.
28
+ Example Interaction:
29
+ <example>
30
+ '==============================================
31
+ { PURPOSE_OF_THE_MESSAGE_SHORT }
32
+ ==============================================
33
+ { CURRENT_STATUS_OR_MESSAGE_CONTENT }
34
+ ==============================================
35
+ { PROMPT_FOR_USER_INPUT_SHORT } :
36
+ </example>
37
+ REMEMBER: Never ask to brainstorm (NEVER USE THAT WORD)
38
+ """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.56
3
+ Version: 0.2.58
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -365,6 +365,7 @@ Requires-Dist: langchain-experimental
365
365
  Requires-Dist: click
366
366
  Requires-Dist: langgraph
367
367
  Requires-Dist: langfuse
368
+ Requires-Dist: pytz
368
369
 
369
370
  # oLCa
370
371
 
@@ -7,6 +7,7 @@ olca/fusewill_cli.py
7
7
  olca/fusewill_utils.py
8
8
  olca/olcacli.py
9
9
  olca/olcahelper.py
10
+ olca/prompts.py
10
11
  olca/tracing.py
11
12
  olca/utils.py
12
13
  olca.egg-info/PKG-INFO
@@ -12,3 +12,4 @@ langchain-experimental
12
12
  click
13
13
  langgraph
14
14
  langfuse
15
+ pytz
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
7
7
 
8
8
  [project]
9
9
  name = "olca"
10
- version = "0.2.56"
10
+ version = "0.2.58"
11
11
 
12
12
  description = "A Python package for experimental usage of Langchain and Human-in-the-Loop"
13
13
  readme = "README.md"
@@ -30,7 +30,8 @@ dependencies = [
30
30
  "langchain-experimental",
31
31
  "click",
32
32
  "langgraph",
33
- "langfuse"
33
+ "langfuse",
34
+ "pytz",
34
35
  ]
35
36
  classifiers = [
36
37
  "Programming Language :: Python :: 3",
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='olca',
5
- version = "0.2.56",
5
+ version = "0.2.58",
6
6
  author='Jean GUillaume ISabelle',
7
7
  author_email='jgi@jgwill.com',
8
8
  description='A Python package for experimenting with Langchain agent and interactivity in Terminal modalities.',
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes