olca 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
olca/fusewill_cli.py ADDED
@@ -0,0 +1,81 @@
1
+ import os
2
+ import sys
3
+ sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
4
+ import argparse
5
+ from fusewill_utils import (
6
+ list_traces,
7
+ create_dataset,
8
+ create_prompt,
9
+ update_prompt,
10
+ delete_dataset,
11
+ get_trace_by_id
12
+ )
13
+ import dotenv
14
+ import json
15
+ dotenv.load_dotenv()
16
+
17
+ def main():
18
+ parser = argparse.ArgumentParser(description="Langfuse CLI Wrapper")
19
+ subparsers = parser.add_subparsers(dest="command", help="Available commands")
20
+
21
+ # list_traces command
22
+ parser_list = subparsers.add_parser('list_traces', help='List traces')
23
+ parser_list.add_argument('--limit', type=int, default=100, help='Number of traces to fetch')
24
+ parser_list.add_argument('--output_dir', type=str, default='../output/traces', help='Directory to save traces')
25
+
26
+ # create_dataset command
27
+ parser_create_dataset = subparsers.add_parser('create_dataset', help='Create a new dataset')
28
+ parser_create_dataset.add_argument('name', help='Name of the dataset')
29
+ parser_create_dataset.add_argument('--description', default='', help='Description of the dataset')
30
+ parser_create_dataset.add_argument('--metadata', type=str, default='{}', help='Metadata in JSON format')
31
+
32
+ # create_prompt command
33
+ parser_create_prompt = subparsers.add_parser('create_prompt', help='Create a new prompt')
34
+ parser_create_prompt.add_argument('name', help='Name of the prompt')
35
+ parser_create_prompt.add_argument('prompt_text', help='Prompt text')
36
+ parser_create_prompt.add_argument('--model_name', default='gpt-4o-mini', help='Model name')
37
+ parser_create_prompt.add_argument('--temperature', type=float, default=0.7, help='Temperature')
38
+ parser_create_prompt.add_argument('--labels', nargs='*', default=[], help='Labels for the prompt')
39
+ parser_create_prompt.add_argument('--supported_languages', nargs='*', default=[], help='Supported languages')
40
+
41
+ # update_prompt command
42
+ parser_update_prompt = subparsers.add_parser('update_prompt', help='Update an existing prompt')
43
+ parser_update_prompt.add_argument('name', help='Name of the prompt')
44
+ parser_update_prompt.add_argument('new_prompt_text', help='New prompt text')
45
+
46
+ # delete_dataset command
47
+ parser_delete_dataset = subparsers.add_parser('delete_dataset', help='Delete a dataset')
48
+ parser_delete_dataset.add_argument('name', help='Name of the dataset')
49
+
50
+ # get_trace_by_id command
51
+ parser_get_trace = subparsers.add_parser('get_trace_by_id', help='Get a trace by ID')
52
+ parser_get_trace.add_argument('trace_id', help='Trace ID')
53
+
54
+ args = parser.parse_args()
55
+
56
+ if args.command == 'list_traces':
57
+ list_traces(limit=args.limit, output_dir=args.output_dir)
58
+ elif args.command == 'create_dataset':
59
+ metadata = json.loads(args.metadata)
60
+ create_dataset(name=args.name, description=args.description, metadata=metadata)
61
+ elif args.command == 'create_prompt':
62
+ create_prompt(
63
+ name=args.name,
64
+ prompt_text=args.prompt_text,
65
+ model_name=args.model_name,
66
+ temperature=args.temperature,
67
+ labels=args.labels,
68
+ supported_languages=args.supported_languages
69
+ )
70
+ elif args.command == 'update_prompt':
71
+ update_prompt(name=args.name, new_prompt_text=args.new_prompt_text)
72
+ elif args.command == 'delete_dataset':
73
+ delete_dataset(name=args.name)
74
+ elif args.command == 'get_trace_by_id':
75
+ trace = get_trace_by_id(trace_id=args.trace_id)
76
+ print(trace)
77
+ else:
78
+ parser.print_help()
79
+
80
+ if __name__ == '__main__':
81
+ main()
olca/fusewill_utils.py ADDED
@@ -0,0 +1,66 @@
1
+
2
+ from langfuse import Langfuse
3
+ import os
4
+ import sys
5
+ sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
6
+ import json
7
+
8
+ import dotenv
9
+ dotenv.load_dotenv()
10
+
11
+ langfuse = Langfuse()
12
+
13
+ def list_traces(limit=100, output_dir="../output/traces"):
14
+ traces = langfuse.get_traces(limit=limit)
15
+ os.makedirs(output_dir, exist_ok=True)
16
+ for trace in traces.data:
17
+ print(f"-----Trace ID: {trace.id}--Name: {trace.name}----------")
18
+ print(f"<output>{trace.output}</output>")
19
+ print(f"<Metadata>{trace.metadata}</Metadata>")
20
+ print("---")
21
+ return traces
22
+
23
+ def add_score_to_a_trace(trace_id, generation_id, name, value, data_type="NUMERIC", comment=""):
24
+ langfuse.score(
25
+ trace_id=trace_id,
26
+ observation_id=generation_id,
27
+ name=name,
28
+ value=value,
29
+ data_type=data_type,
30
+ comment=comment
31
+ )
32
+
33
+ def create_dataset(name, description="", metadata=None):
34
+ langfuse.create_dataset(
35
+ name=name,
36
+ description=description,
37
+ metadata=metadata or {}
38
+ )
39
+ def get_dataset(name) :
40
+ return langfuse.get_dataset(name=name)
41
+
42
+ def create_prompt(name, prompt_text, model_name, temperature, labels=None, supported_languages=None):
43
+ langfuse.create_prompt(
44
+ name=name,
45
+ type="text",
46
+ prompt=prompt_text,
47
+ labels=labels or [],
48
+ config={
49
+ "model": model_name,
50
+ "temperature": temperature,
51
+ "supported_languages": supported_languages or [],
52
+ }
53
+ )
54
+ def get_prompt(name, label="production"):
55
+ return langfuse.get_prompt(name=name,label="production")
56
+
57
+ def update_prompt(name, new_prompt_text):
58
+ prompt = langfuse.get_prompt(name=name)
59
+ prompt.update(prompt=new_prompt_text)
60
+
61
+ def delete_dataset(name):
62
+ dataset = langfuse.get_dataset(name=name)
63
+ dataset.delete()
64
+
65
+ def get_trace_by_id(trace_id):
66
+ return langfuse.get_trace(trace_id)
olca/olcacli.py CHANGED
@@ -275,7 +275,7 @@ def main():
275
275
  def generate_config_example():
276
276
  try:
277
277
  config = {
278
- "api_keyname": input("api_keyname [OPENAI_API_KEY_olca]: ") or "OPENAI_API_KEY_olca",
278
+ "api_keyname": input("api_keyname [OPENAI_API_KEY]: ") or "OPENAI_API_KEY",
279
279
  "model_name": input("model_name [gpt-4o-mini]: ") or "gpt-4o-mini",
280
280
  "recursion_limit": int(input("recursion_limit [12]: ") or 12),
281
281
  "temperature": float(input("temperature [0]: ") or 0),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.9
3
+ Version: 0.2.11
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -364,6 +364,7 @@ Requires-Dist: langchain-openai
364
364
  Requires-Dist: langchain-experimental
365
365
  Requires-Dist: click
366
366
  Requires-Dist: langgraph
367
+ Requires-Dist: langfuse
367
368
 
368
369
  # oLCa
369
370
 
@@ -395,4 +396,61 @@ olca2 --help
395
396
 
396
397
 
397
398
 
399
+ ## fusewill
398
400
 
401
+ The `fusewill` command is a CLI tool that provides functionalities for interacting with Langfuse, including tracing, dataset management, and prompt operations.
402
+
403
+ ### Help
404
+
405
+ To see the available commands and options for `fusewill`, use the `--help` flag:
406
+
407
+
408
+ ----
409
+ IMPORTED README from olca1
410
+ ----
411
+
412
+
413
+ ### Olca
414
+
415
+ The olca.py script is designed to function as a command-line interface (CLI) agent. It performs various tasks based on given inputs and files present in the directory. The agent is capable of creating directories, producing reports, and writing instructions for self-learning. It operates within a GitHub repository environment and can commit and push changes if provided with an issue ID. The script ensures that it logs its internal actions and follows specific guidelines for handling tasks and reporting, without modifying certain configuration files or checking out branches unless explicitly instructed.
416
+
417
+ #### Tracing
418
+
419
+ Olca now supports tracing functionality to help monitor and debug its operations. You can enable tracing by using the `-T` or `--tracing` flag when running the script. Ensure that the `LANGCHAIN_API_KEY` environment variable is set for tracing to work.
420
+
421
+ #### Initialization
422
+
423
+ To initialize `olca`, you need to create a configuration file named `olca.yml`. This file contains various settings that `olca` will use to perform its tasks. Below is an example of the `olca.yml` file:
424
+
425
+ ```yaml
426
+ api_keyname: OPENAI_API_KEY__o450olca241128
427
+ human: true
428
+ model_name: gpt-4o-mini
429
+ recursion_limit: 300
430
+ system_instructions: You focus on interacting with human and do what they ask. Make sure you dont quit the program.
431
+ temperature: 0.0
432
+ tracing: true
433
+ user_input: Look in the file 3act.md and in ./story, we have created a story point by point and we need you to generate the next iteration of the book in the folder ./book. You use what you find in ./story to start the work. Give me your plan to correct or accept.
434
+ ```
435
+
436
+ #### Usage
437
+
438
+ To run `olca`, use the following command:
439
+
440
+ ```shell
441
+ olca -T
442
+ ```
443
+
444
+ This command will enable tracing and start the agent. You can also use the `--trace` flag to achieve the same result.
445
+
446
+ #### Configuration
447
+
448
+ The `olca.yml` file allows you to configure various aspects of `olca`, such as the API key (so you can know how much your experimetation cost you), model name, recursion limit, system instructions, temperature, and user input. You can customize these settings to suit your needs and preferences.
449
+
450
+ #### Command-Line Interface (CLI)
451
+
452
+ The `olca` script provides a user-friendly CLI that allows you to interact with the agent and perform various tasks. You can use flags and options to control the agent's behavior and provide input for its operations. The CLI also includes error handling mechanisms to notify you of any issues or missing configuration settings.
453
+
454
+ #### GitHub Integration
455
+
456
+ `olca` is designed to integrate seamlessly with GitHub workflows and issue management. You can provide an issue ID to the agent, and it will commit and push changes directly to the specified issue. This feature streamlines the development process and reduces the need for manual intervention. Additionally, `olca` maintains detailed logs of its actions and updates, ensuring transparency and traceability in its operations.
@@ -0,0 +1,10 @@
1
+ olca/__init__.py,sha256=3QyLLAys_KiiDIe-cfO_7QyY7di_qCaCS-sVziW2BOw,23
2
+ olca/fusewill_cli.py,sha256=Dwp_1GJ9_MNOCTqJ2UKiYuDt-BYQVIVhXVjrRi-VUV0,3563
3
+ olca/fusewill_utils.py,sha256=M83ENbc7lRNXXcCSRujB1gepVwMxEaPA7g4nNeHq-vU,1908
4
+ olca/olcacli.py,sha256=B3uZcl_873zBy4vmXmcyN6Z8ofQA-anzX2aLuvUTSpk,12281
5
+ olca-0.2.11.dist-info/LICENSE,sha256=gXf5dRMhNSbfLPYYTY_5hsZ1r7UU1OaKQEAQUhuIBkM,18092
6
+ olca-0.2.11.dist-info/METADATA,sha256=CVc8ESSBbuUG5fZgA6alze4gqe9_CFxgESC09rwfp48,25311
7
+ olca-0.2.11.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
8
+ olca-0.2.11.dist-info/entry_points.txt,sha256=LIcx7Vr6eM9YIQgcKcE23dJu7OEzvghyRp41_cuwNSM,74
9
+ olca-0.2.11.dist-info/top_level.txt,sha256=bGDtAReS-xlS0F6MM-DyD0IQUqjNdWmgemnM3vNtrpI,5
10
+ olca-0.2.11.dist-info/RECORD,,
@@ -1,2 +1,3 @@
1
1
  [console_scripts]
2
+ fusewill = olca.fusewill:main
2
3
  olca2 = olca.olcacli:main
@@ -1,8 +0,0 @@
1
- olca/__init__.py,sha256=3QyLLAys_KiiDIe-cfO_7QyY7di_qCaCS-sVziW2BOw,23
2
- olca/olcacli.py,sha256=phRtxwiVJ6pkrzAjo3JI2skI87XBidkSBsE3Ec9yqBQ,12291
3
- olca-0.2.9.dist-info/LICENSE,sha256=gXf5dRMhNSbfLPYYTY_5hsZ1r7UU1OaKQEAQUhuIBkM,18092
4
- olca-0.2.9.dist-info/METADATA,sha256=NrOZEY9Ew1Ct-pR_colcQFWh_v_ymW-tPlx7BKvMV8U,22012
5
- olca-0.2.9.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
6
- olca-0.2.9.dist-info/entry_points.txt,sha256=XwHDvSB6qlO1oIEtLCwrVJoVV91S405JL_iXl0G211A,44
7
- olca-0.2.9.dist-info/top_level.txt,sha256=bGDtAReS-xlS0F6MM-DyD0IQUqjNdWmgemnM3vNtrpI,5
8
- olca-0.2.9.dist-info/RECORD,,
File without changes
File without changes