olca 0.2.55__tar.gz → 0.2.56__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.55
3
+ Version: 0.2.56
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -92,6 +92,10 @@ def main():
92
92
  parser_list_by_score.add_argument('--max_value', type=float, help='Maximum score value')
93
93
  parser_list_by_score.add_argument('-L','--limit', type=int, default=100, help='Number of traces to fetch')
94
94
 
95
+ # list_scores command
96
+ parser_list_scores = subparsers.add_parser('list_scores', help='List all scores', aliases=['ls'])
97
+ parser_list_scores.add_argument('-o', '--output', type=str, help='Output JSON file path')
98
+
95
99
  args = parser.parse_args()
96
100
 
97
101
  if args.command == 'list_traces' or args.command == 'lt':
@@ -159,6 +163,20 @@ def main():
159
163
  for trace in traces:
160
164
  print_trace(trace)
161
165
  #print(f"Trace ID: {trace.id}, Name: {trace.name}")
166
+ elif args.command == 'list_scores' or args.command == 'ls':
167
+ scores = fu.list_scores()
168
+ if scores:
169
+ if args.output:
170
+ try:
171
+ with open(args.output, 'w') as f:
172
+ json.dump(scores, f, indent=2)
173
+ print(f"Scores written to {os.path.realpath(args.output)}")
174
+ except Exception as e:
175
+ print(f"Error writing to file {args.output}: {e}")
176
+ else:
177
+ print(json.dumps(scores, indent=2))
178
+ else:
179
+ print("No scores found.")
162
180
  else:
163
181
  parser.print_help()
164
182
  exit(1)
@@ -43,7 +43,11 @@ langfuse = Langfuse(
43
43
  secret_key=os.environ.get("LANGFUSE_SECRET_KEY"),
44
44
  host=os.environ.get("LANGFUSE_HOST")
45
45
  )
46
-
46
+ def dummy():
47
+ o=langfuse.get_observations()
48
+ g=langfuse.get_generations()
49
+ prompts_list=langfuse.get_dataset()
50
+
47
51
  def open_trace_in_browser(trace_id):
48
52
  base_url = os.environ.get("LANGFUSE_HOST")
49
53
  project_id = os.environ.get("LANGFUSE_PROJECT_ID")
@@ -68,6 +72,20 @@ def get_score_by_id(score_id):
68
72
  print(f"Error retrieving score {score_id}: {e}")
69
73
  return None
70
74
 
75
+ def list_scores():
76
+ """Retrieve all score configurations."""
77
+ base_url = os.environ.get("LANGFUSE_HOST")
78
+ public_key = os.environ.get("LANGFUSE_PUBLIC_KEY")
79
+ secret_key = os.environ.get("LANGFUSE_SECRET_KEY")
80
+ url = f"{base_url}/api/public/score-configs"
81
+ try:
82
+ response = requests.get(url, auth=(public_key, secret_key))
83
+ response.raise_for_status()
84
+ return response.json()
85
+ except Exception as e:
86
+ print(f"Error retrieving scores: {e}")
87
+ return None
88
+
71
89
  def print_trace(trace, show_comments=False):
72
90
  print(f"<Trace \n\tat=\"{trace.createdAt}\" \n\tid=\"{trace.id}\" \n\tname=\"{trace.name}\" \n\tsession_id=\"{trace.session_id}\" \n\tprojectId=\"{trace.projectId}\" >")
73
91
  print(f"<Input><CDATA[[\n{trace.input}\n]]></Input>")
@@ -157,7 +175,7 @@ def create_prompt(name, prompt_text, model_name, temperature, labels=None, suppo
157
175
  }
158
176
  )
159
177
  def get_prompt(name, label="production"):
160
- return langfuse.get_prompt(name=name,label="production")
178
+ return langfuse.get_prompt(name=name,label=label)
161
179
 
162
180
  def update_prompt(name, new_prompt_text):
163
181
  prompt = langfuse.get_prompt(name=name)
@@ -8,9 +8,6 @@ import yaml
8
8
  from olca.utils import load_environment, initialize_langfuse
9
9
  from olca.tracing import TracingManager
10
10
  from olca.olcahelper import setup_required_directories, initialize_config_file
11
- import time
12
- import httpx
13
- from openai.error import APIConnectionError
14
11
 
15
12
  #jgwill/olca1
16
13
  #olca1_prompt = hub.pull("jgwill/olca1") #Future use
@@ -308,19 +305,7 @@ def main():
308
305
  graph_config = {"callbacks": callbacks} if callbacks else {}
309
306
  if recursion_limit:
310
307
  graph_config["recursion_limit"] = recursion_limit
311
-
312
- retry_attempts = 3
313
- for attempt in range(retry_attempts):
314
- try:
315
- print_stream(graph.stream(inputs, config=graph_config))
316
- break
317
- except (httpx.ConnectError, APIConnectionError) as e:
318
- if attempt < retry_attempts - 1:
319
- print(f"Network error encountered: {e}. Retrying in 5 seconds...")
320
- time.sleep(5)
321
- else:
322
- print("Failed to connect after multiple attempts. Exiting.")
323
- raise e
308
+ print_stream(graph.stream(inputs, config=graph_config))
324
309
  except GraphRecursionError as e:
325
310
  print("Recursion limit reached. Please increase the 'recursion_limit' in the olca_config.yaml file.")
326
311
  print("For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: olca
3
- Version: 0.2.55
3
+ Version: 0.2.56
4
4
  Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
5
5
  Home-page: https://github.com/jgwill/olca
6
6
  Author: Jean GUillaume ISabelle
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
7
7
 
8
8
  [project]
9
9
  name = "olca"
10
- version = "0.2.55"
10
+ version = "0.2.56"
11
11
 
12
12
  description = "A Python package for experimental usage of Langchain and Human-in-the-Loop"
13
13
  readme = "README.md"
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='olca',
5
- version = "0.2.55",
5
+ version = "0.2.56",
6
6
  author='Jean GUillaume ISabelle',
7
7
  author_email='jgi@jgwill.com',
8
8
  description='A Python package for experimenting with Langchain agent and interactivity in Terminal modalities.',
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes