olca 0.2.59__tar.gz → 0.2.61__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {olca-0.2.59 → olca-0.2.61}/PKG-INFO +3 -1
- {olca-0.2.59 → olca-0.2.61}/olca/fusewill_cli.py +119 -1
- {olca-0.2.59 → olca-0.2.61}/olca/fusewill_utils.py +178 -5
- {olca-0.2.59 → olca-0.2.61}/olca/olcacli.py +32 -5
- {olca-0.2.59 → olca-0.2.61}/olca/prompts.py +5 -0
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/PKG-INFO +3 -1
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/requires.txt +2 -0
- {olca-0.2.59 → olca-0.2.61}/pyproject.toml +3 -1
- {olca-0.2.59 → olca-0.2.61}/setup.py +1 -1
- {olca-0.2.59 → olca-0.2.61}/LICENSE +0 -0
- {olca-0.2.59 → olca-0.2.61}/README.md +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca/__init__.py +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca/olcahelper.py +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca/tracing.py +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca/utils.py +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/SOURCES.txt +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/dependency_links.txt +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/entry_points.txt +0 -0
- {olca-0.2.59 → olca-0.2.61}/olca.egg-info/top_level.txt +0 -0
- {olca-0.2.59 → olca-0.2.61}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: olca
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.61
|
4
4
|
Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
|
5
5
|
Home-page: https://github.com/jgwill/olca
|
6
6
|
Author: Jean GUillaume ISabelle
|
@@ -361,11 +361,13 @@ Requires-Dist: requests
|
|
361
361
|
Requires-Dist: markdown
|
362
362
|
Requires-Dist: langchain
|
363
363
|
Requires-Dist: langchain-openai
|
364
|
+
Requires-Dist: langchain-ollama
|
364
365
|
Requires-Dist: langchain-experimental
|
365
366
|
Requires-Dist: click
|
366
367
|
Requires-Dist: langgraph
|
367
368
|
Requires-Dist: langfuse
|
368
369
|
Requires-Dist: pytz
|
370
|
+
Requires-Dist: google.generativeai
|
369
371
|
|
370
372
|
# oLCa
|
371
373
|
|
@@ -14,7 +14,9 @@ from fusewill_utils import (
|
|
14
14
|
open_trace_in_browser,
|
15
15
|
print_traces,
|
16
16
|
print_trace,
|
17
|
-
list_traces_by_score # Ensure the updated function is imported
|
17
|
+
list_traces_by_score, # Ensure the updated function is imported
|
18
|
+
export_traces,
|
19
|
+
import_traces
|
18
20
|
)
|
19
21
|
import dotenv
|
20
22
|
import json
|
@@ -106,6 +108,49 @@ def main():
|
|
106
108
|
parser_search.add_argument('-L', '--limit', type=int, default=100, help='Number of traces to fetch')
|
107
109
|
parser_search.add_argument('-o', '--output', type=str, help='Output JSON file path')
|
108
110
|
|
111
|
+
# export_traces command
|
112
|
+
parser_export = subparsers.add_parser('export_traces', help='Export traces', aliases=['et'])
|
113
|
+
parser_export.add_argument('--format', choices=['json','csv'], default='json', help='Export format')
|
114
|
+
parser_export.add_argument('-o','--output', type=str, help='Output file path')
|
115
|
+
parser_export.add_argument('--start_date', type=str, help='Start date in ISO format (e.g., 2024-01-01)')
|
116
|
+
parser_export.add_argument('--end_date', type=str, help='End date in ISO format (e.g., 2024-12-31)')
|
117
|
+
|
118
|
+
# import_traces command
|
119
|
+
parser_import = subparsers.add_parser('import_traces', help='Import traces', aliases=['it'])
|
120
|
+
parser_import.add_argument('--format', choices=['json','csv'], default='json', help='Import format')
|
121
|
+
parser_import.add_argument('--input', type=str, required=True, help='Input file path to read from')
|
122
|
+
|
123
|
+
# list_sessions command
|
124
|
+
parser_list_sessions = subparsers.add_parser('list_sessions', help='List sessions', aliases=['lss'])
|
125
|
+
parser_list_sessions.add_argument('-L','--limit', type=int, default=100, help='Number of sessions to fetch')
|
126
|
+
parser_list_sessions.add_argument('--start_date', type=str, help='Start date in ISO format (e.g., 2024-01-01)')
|
127
|
+
parser_list_sessions.add_argument('--end_date', type=str, help='End date in ISO format (e.g., 2024-12-31)')
|
128
|
+
parser_list_sessions.add_argument('--format', choices=['json','csv'], default='json', help='Output format (json or csv)')
|
129
|
+
parser_list_sessions.add_argument('-o','--output', type=str, help='Optional output file path')
|
130
|
+
|
131
|
+
# get_session command
|
132
|
+
parser_get_session = subparsers.add_parser('get_session', help='Get a session by ID', aliases=['gsess'])
|
133
|
+
parser_get_session.add_argument('session_id', help='Session ID')
|
134
|
+
parser_get_session.add_argument('-o','--output', type=str, help='Output file path (JSON or CSV)')
|
135
|
+
|
136
|
+
# get_media command
|
137
|
+
parser_get_media = subparsers.add_parser('get_media', help='Retrieve media details')
|
138
|
+
parser_get_media.add_argument('media_id', help='Media ID')
|
139
|
+
|
140
|
+
# get_upload_url command
|
141
|
+
parser_upload_url = subparsers.add_parser('get_upload_url', help='Get a presigned upload URL')
|
142
|
+
parser_upload_url.add_argument('trace_id', help='Trace ID')
|
143
|
+
parser_upload_url.add_argument('--content_type', required=True, help='Content-Type of the media')
|
144
|
+
parser_upload_url.add_argument('--content_length', type=int, required=True, help='Size of the media in bytes')
|
145
|
+
|
146
|
+
# get_daily_metrics command
|
147
|
+
parser_daily_metrics = subparsers.add_parser('get_daily_metrics', help='Fetch daily metrics', aliases=['gdm'])
|
148
|
+
parser_daily_metrics.add_argument('--trace_name', type=str, help='Optional trace name filter')
|
149
|
+
parser_daily_metrics.add_argument('--user_id', type=str, help='Optional user ID filter')
|
150
|
+
parser_daily_metrics.add_argument('--tags', nargs='*', help='Optional tags for filtering')
|
151
|
+
parser_daily_metrics.add_argument('--from_timestamp', type=str, help='Start date in ISO format')
|
152
|
+
parser_daily_metrics.add_argument('--to_timestamp', type=str, help='End date in ISO format')
|
153
|
+
|
109
154
|
args = parser.parse_args()
|
110
155
|
|
111
156
|
if args.command == 'list_traces' or args.command == 'lt':
|
@@ -219,6 +264,79 @@ def main():
|
|
219
264
|
fu.print_trace(trace)
|
220
265
|
else:
|
221
266
|
print("No traces found matching the criteria.")
|
267
|
+
elif args.command == 'export_traces' or args.command == 'et':
|
268
|
+
output_path = args.output
|
269
|
+
if output_path:
|
270
|
+
if not output_path.endswith(f".{args.format}"):
|
271
|
+
output_path += f".{args.format}"
|
272
|
+
fu.export_traces(format=args.format, output_path=output_path, start_date=args.start_date, end_date=args.end_date)
|
273
|
+
elif args.command == 'import_traces' or args.command == 'it':
|
274
|
+
fu.import_traces(format=args.format, input_path=args.input)
|
275
|
+
elif args.command == 'list_sessions' or args.command == 'lss':
|
276
|
+
sessions = fu.list_sessions(
|
277
|
+
limit=args.limit,
|
278
|
+
start_date=args.start_date,
|
279
|
+
end_date=args.end_date
|
280
|
+
)
|
281
|
+
|
282
|
+
if not sessions:
|
283
|
+
print("No sessions found.")
|
284
|
+
else:
|
285
|
+
if not args.output:
|
286
|
+
# Print to standard output
|
287
|
+
for s in sessions:
|
288
|
+
print(s)
|
289
|
+
else:
|
290
|
+
# Ensure output file extension matches --format
|
291
|
+
output_path = args.output
|
292
|
+
if not output_path.endswith(f".{args.format}"):
|
293
|
+
output_path += f".{args.format}"
|
294
|
+
|
295
|
+
if args.format == 'csv':
|
296
|
+
import csv
|
297
|
+
with open(output_path, 'w', newline='') as f:
|
298
|
+
writer = csv.DictWriter(f, fieldnames=sessions[0].keys())
|
299
|
+
writer.writeheader()
|
300
|
+
for s in sessions:
|
301
|
+
writer.writerow(s)
|
302
|
+
else: # default to JSON
|
303
|
+
import json
|
304
|
+
with open(output_path, 'w') as f:
|
305
|
+
json.dump(sessions, f, indent=2)
|
306
|
+
|
307
|
+
print(f"Sessions written to {os.path.realpath(output_path)}")
|
308
|
+
elif args.command == 'get_session' or args.command == 'gsess':
|
309
|
+
session = fu.get_session(args.session_id)
|
310
|
+
if session:
|
311
|
+
if args.output:
|
312
|
+
if args.output.endswith('.csv'):
|
313
|
+
import csv
|
314
|
+
with open(args.output, 'w', newline='') as f:
|
315
|
+
writer = csv.DictWriter(f, fieldnames=session.keys())
|
316
|
+
writer.writeheader()
|
317
|
+
writer.writerow(session)
|
318
|
+
print(f"Session written to {os.path.realpath(args.output)}")
|
319
|
+
else:
|
320
|
+
import json
|
321
|
+
with open(args.output, 'w') as f:
|
322
|
+
json.dump(session, f, indent=2)
|
323
|
+
print(f"Session written to {os.path.realpath(args.output)}")
|
324
|
+
else:
|
325
|
+
print(session)
|
326
|
+
else:
|
327
|
+
print(f"No session found for ID {args.session_id}")
|
328
|
+
elif args.command == 'get_media':
|
329
|
+
fu.get_media(args.media_id)
|
330
|
+
elif args.command == 'get_upload_url':
|
331
|
+
fu.get_upload_url(args.trace_id, args.content_type, args.content_length)
|
332
|
+
elif args.command == 'get_daily_metrics' or args.command == 'gdm':
|
333
|
+
fu.get_daily_metrics(
|
334
|
+
trace_name=args.trace_name,
|
335
|
+
user_id=args.user_id,
|
336
|
+
tags=args.tags,
|
337
|
+
from_timestamp=args.from_timestamp,
|
338
|
+
to_timestamp=args.to_timestamp
|
339
|
+
)
|
222
340
|
else:
|
223
341
|
parser.print_help()
|
224
342
|
exit(1)
|
@@ -8,11 +8,11 @@ import datetime # Add this import
|
|
8
8
|
import pytz # Add this import
|
9
9
|
|
10
10
|
# Load .env from the current working directory
|
11
|
-
dotenv.load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"))
|
11
|
+
dotenv.load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"), override=True)
|
12
12
|
|
13
13
|
# Try loading from home directory if variables are still not set
|
14
14
|
if not os.environ.get("LANGFUSE_PUBLIC_KEY") or not os.environ.get("LANGFUSE_SECRET_KEY") or not os.environ.get("LANGFUSE_HOST"):
|
15
|
-
dotenv.load_dotenv(dotenv_path=os.path.expanduser("~/.env"))
|
15
|
+
dotenv.load_dotenv(dotenv_path=os.path.expanduser("~/.env"), override=True)
|
16
16
|
|
17
17
|
# Final check before exiting
|
18
18
|
missing_vars = []
|
@@ -33,7 +33,6 @@ import sys
|
|
33
33
|
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
34
34
|
import json
|
35
35
|
|
36
|
-
import dotenv
|
37
36
|
_DEBUG_=False
|
38
37
|
if _DEBUG_:
|
39
38
|
print(os.environ.get("LANGFUSE_PUBLIC_KEY"))
|
@@ -43,7 +42,8 @@ if _DEBUG_:
|
|
43
42
|
langfuse = Langfuse(
|
44
43
|
public_key=os.environ.get("LANGFUSE_PUBLIC_KEY"),
|
45
44
|
secret_key=os.environ.get("LANGFUSE_SECRET_KEY"),
|
46
|
-
host=os.environ.get("LANGFUSE_HOST")
|
45
|
+
host=os.environ.get("LANGFUSE_HOST"),
|
46
|
+
release=os.environ.get("LANGFUSE_RELEASE", None)
|
47
47
|
)
|
48
48
|
|
49
49
|
def open_trace_in_browser(trace_id):
|
@@ -241,4 +241,177 @@ def search_traces(
|
|
241
241
|
return filtered_traces
|
242
242
|
except Exception as e:
|
243
243
|
print(f"Error searching traces: {e}")
|
244
|
-
return []
|
244
|
+
return []
|
245
|
+
|
246
|
+
def fetch_all_traces(start_date=None, end_date=None):
|
247
|
+
all_traces = []
|
248
|
+
page = 1
|
249
|
+
chunk_size = 100
|
250
|
+
params = {}
|
251
|
+
if start_date:
|
252
|
+
params['from_timestamp'] = datetime.datetime.fromisoformat(start_date).replace(tzinfo=pytz.UTC)
|
253
|
+
if end_date:
|
254
|
+
params['to_timestamp'] = datetime.datetime.fromisoformat(end_date).replace(tzinfo=pytz.UTC)
|
255
|
+
|
256
|
+
while True:
|
257
|
+
partial = langfuse.get_traces(limit=chunk_size, page=page, **params)
|
258
|
+
if not partial or not partial.data:
|
259
|
+
break
|
260
|
+
all_traces.extend(partial.data)
|
261
|
+
if len(partial.data) < chunk_size:
|
262
|
+
break
|
263
|
+
page += 1
|
264
|
+
return all_traces
|
265
|
+
|
266
|
+
def export_traces(format='json', output_path=None, start_date=None, end_date=None):
|
267
|
+
"""
|
268
|
+
Export traces to a given format (json or csv).
|
269
|
+
"""
|
270
|
+
try:
|
271
|
+
all_traces = fetch_all_traces(start_date=start_date, end_date=end_date)
|
272
|
+
if not output_path:
|
273
|
+
output_path = f"./traces_export.{format}"
|
274
|
+
|
275
|
+
# Ensure the output directory exists
|
276
|
+
output_dir = os.path.dirname(output_path)
|
277
|
+
if output_dir and not os.path.exists(output_dir):
|
278
|
+
os.makedirs(output_dir)
|
279
|
+
|
280
|
+
if format == 'json':
|
281
|
+
with open(output_path, 'w') as f:
|
282
|
+
json.dump([t.__dict__ for t in all_traces], f, indent=2, default=str)
|
283
|
+
elif format == 'csv':
|
284
|
+
import csv
|
285
|
+
fieldnames = ['id', 'name', 'input', 'output', 'createdAt']
|
286
|
+
with open(output_path, 'w', newline='') as f:
|
287
|
+
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
288
|
+
writer.writeheader()
|
289
|
+
for t in all_traces:
|
290
|
+
writer.writerow({
|
291
|
+
'id': t.id,
|
292
|
+
'name': t.name,
|
293
|
+
'input': t.input,
|
294
|
+
'output': t.output,
|
295
|
+
'createdAt': str(t.createdAt)
|
296
|
+
})
|
297
|
+
|
298
|
+
if all_traces:
|
299
|
+
# Sort traces by createdAt to ensure the oldest date is first
|
300
|
+
all_traces.sort(key=lambda x: x.createdAt)
|
301
|
+
first_trace_date = datetime.datetime.fromisoformat(all_traces[0].createdAt.replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
|
302
|
+
last_trace_date = datetime.datetime.fromisoformat(all_traces[-1].CreatedAt.replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
|
303
|
+
print(f"Traces exported to {output_path}. Total traces exported: {len(all_traces)}")
|
304
|
+
print(f"Date range: {first_trace_date} to {last_trace_date}")
|
305
|
+
else:
|
306
|
+
print(f"Traces exported to {output_path}. Total traces exported: {len(all_traces)}")
|
307
|
+
except Exception as e:
|
308
|
+
print(f"Error exporting traces: {e}")
|
309
|
+
|
310
|
+
def import_traces(format='json', input_path=None):
|
311
|
+
"""
|
312
|
+
Import traces from a given file (json or csv) into Langfuse.
|
313
|
+
"""
|
314
|
+
if not input_path:
|
315
|
+
print("No input file provided for importing traces.")
|
316
|
+
return
|
317
|
+
|
318
|
+
try:
|
319
|
+
if format == 'json':
|
320
|
+
with open(input_path, 'r') as f:
|
321
|
+
data = json.load(f)
|
322
|
+
elif format == 'csv':
|
323
|
+
import csv
|
324
|
+
data = []
|
325
|
+
with open(input_path, 'r', newline='') as f:
|
326
|
+
reader = csv.DictReader(f)
|
327
|
+
for row in reader:
|
328
|
+
data.append(row)
|
329
|
+
|
330
|
+
# Create new traces in Langfuse from data
|
331
|
+
for item in data:
|
332
|
+
langfuse.create_trace(
|
333
|
+
name=item.get('name', 'Imported Trace'),
|
334
|
+
input=item.get('input', ''),
|
335
|
+
output=item.get('output', '')
|
336
|
+
# pass other fields as needed
|
337
|
+
)
|
338
|
+
print(f"Imported {len(data)} traces from {input_path}")
|
339
|
+
except Exception as e:
|
340
|
+
print(f"Error importing traces: {e}")
|
341
|
+
|
342
|
+
def list_sessions(limit=100, start_date=None, end_date=None):
|
343
|
+
"""
|
344
|
+
List all sessions with optional date filtering.
|
345
|
+
Retrieves multiple pages so we don't miss older sessions.
|
346
|
+
"""
|
347
|
+
base_url = os.environ.get("LANGFUSE_HOST")
|
348
|
+
public_key = os.environ.get("LANGFUSE_PUBLIC_KEY")
|
349
|
+
secret_key = os.environ.get("LANGFUSE_SECRET_KEY")
|
350
|
+
url = f"{base_url}/api/public/sessions"
|
351
|
+
sessions = []
|
352
|
+
page = 1
|
353
|
+
while True:
|
354
|
+
params = {
|
355
|
+
"page": page,
|
356
|
+
"limit": limit
|
357
|
+
}
|
358
|
+
if start_date:
|
359
|
+
params["fromTimestamp"] = datetime.datetime.fromisoformat(start_date).isoformat() + 'Z'
|
360
|
+
if end_date:
|
361
|
+
params["toTimestamp"] = datetime.datetime.fromisoformat(end_date).isoformat() + 'Z'
|
362
|
+
|
363
|
+
try:
|
364
|
+
response = requests.get(url, auth=(public_key, secret_key), params=params)
|
365
|
+
response.raise_for_status()
|
366
|
+
data = response.json()
|
367
|
+
except Exception as e:
|
368
|
+
print(f"Error retrieving sessions: {e}")
|
369
|
+
break
|
370
|
+
|
371
|
+
if "data" not in data or len(data["data"]) == 0:
|
372
|
+
break
|
373
|
+
|
374
|
+
sessions.extend(data["data"])
|
375
|
+
if len(data["data"]) < limit:
|
376
|
+
break
|
377
|
+
page += 1
|
378
|
+
|
379
|
+
return sessions
|
380
|
+
|
381
|
+
def get_session(session_id):
|
382
|
+
"""
|
383
|
+
Get details of a specific session including its traces.
|
384
|
+
"""
|
385
|
+
base_url = os.environ.get("LANGFUSE_HOST")
|
386
|
+
public_key = os.environ.get("LANGFUSE_PUBLIC_KEY")
|
387
|
+
secret_key = os.environ.get("LANGFUSE_SECRET_KEY")
|
388
|
+
url = f"{base_url}/api/public/sessions/{session_id}"
|
389
|
+
|
390
|
+
try:
|
391
|
+
response = requests.get(url, auth=(public_key, secret_key))
|
392
|
+
response.raise_for_status()
|
393
|
+
return response.json()
|
394
|
+
except Exception as e:
|
395
|
+
print(f"Error retrieving session {session_id}: {e}")
|
396
|
+
return None
|
397
|
+
|
398
|
+
def get_upload_url(trace_id, content_type, content_length):
|
399
|
+
"""
|
400
|
+
Get a presigned URL for media upload.
|
401
|
+
"""
|
402
|
+
# TODO: Implement API call to POST /media
|
403
|
+
pass
|
404
|
+
|
405
|
+
def get_media(media_id):
|
406
|
+
"""
|
407
|
+
Retrieve media record details.
|
408
|
+
"""
|
409
|
+
# TODO: Implement API call to GET /media/{mediaId}
|
410
|
+
pass
|
411
|
+
|
412
|
+
def get_daily_metrics(trace_name=None, user_id=None, tags=None, from_timestamp=None, to_timestamp=None):
|
413
|
+
"""
|
414
|
+
Get daily metrics with optional filtering.
|
415
|
+
"""
|
416
|
+
# TODO: Implement API call to GET /metrics/daily with query params
|
417
|
+
pass
|
@@ -1,6 +1,7 @@
|
|
1
1
|
#%%
|
2
2
|
import os
|
3
|
-
|
3
|
+
import sys
|
4
|
+
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
|
4
5
|
import dotenv
|
5
6
|
from langchain import hub
|
6
7
|
import argparse
|
@@ -153,6 +154,18 @@ def _parse_args():
|
|
153
154
|
parser.add_argument("-y", "--yes", action="store_true", help="Accept the new file olca.yml")
|
154
155
|
return parser.parse_args()
|
155
156
|
|
157
|
+
def parse_model_uri(uri: str):
|
158
|
+
# Example: "ollama://llama2@localhost"
|
159
|
+
if "://" not in uri:
|
160
|
+
return "openai", uri, None # default provider is openai
|
161
|
+
provider, rest = uri.split("://", 1)
|
162
|
+
host = None
|
163
|
+
if "@" in rest:
|
164
|
+
base_model, host = rest.split("@", 1)
|
165
|
+
else:
|
166
|
+
base_model = rest
|
167
|
+
return provider, base_model, host
|
168
|
+
|
156
169
|
def main():
|
157
170
|
args = _parse_args()
|
158
171
|
olca_config_file = 'olca.yml'
|
@@ -217,20 +230,33 @@ def main():
|
|
217
230
|
system_instructions = config.get('system_instructions', '')
|
218
231
|
user_input = config.get('user_input', '')
|
219
232
|
default_model_id = "gpt-4o-mini"
|
220
|
-
model_name = config.get('model_name', default_model_id)
|
221
233
|
recursion_limit = config.get('recursion_limit', 15)
|
222
234
|
disable_system_append = _parse_args().disable_system_append
|
223
|
-
|
224
235
|
# Use the system_instructions and user_input in your CLI logic
|
236
|
+
model_name = config.get('model_name', default_model_id)
|
237
|
+
provider, base_model, host = parse_model_uri(model_name)
|
238
|
+
|
239
|
+
if provider == "ollama":
|
240
|
+
from langchain_ollama import OllamaLLM
|
241
|
+
model = OllamaLLM(model=base_model, base_url=host if host else None)
|
242
|
+
elif provider == "openai":
|
243
|
+
from langchain_openai import ChatOpenAI
|
244
|
+
model = ChatOpenAI(model=base_model, temperature=0)
|
245
|
+
else:
|
246
|
+
# default fallback
|
247
|
+
from langchain_openai import ChatOpenAI
|
248
|
+
model = ChatOpenAI(model=model_name, temperature=0)
|
249
|
+
|
225
250
|
print("System Instructions:", system_instructions)
|
226
251
|
print("User Input:", user_input)
|
227
|
-
print("Model Name:", model_name)
|
228
252
|
print("Recursion Limit:", recursion_limit)
|
229
253
|
print("Trace:", tracing_enabled)
|
254
|
+
print("Model Name:", model_name)
|
230
255
|
|
231
|
-
model = ChatOpenAI(model=model_name, temperature=0)
|
232
256
|
selected_tools = ["terminal"]
|
233
257
|
|
258
|
+
disable_system_append = _parse_args().disable_system_append
|
259
|
+
|
234
260
|
human_switch = args.human
|
235
261
|
#look in olca_config.yaml for human: true
|
236
262
|
if "human" in config:
|
@@ -240,6 +266,7 @@ def main():
|
|
240
266
|
selected_tools.append("human")
|
241
267
|
|
242
268
|
if args.math:
|
269
|
+
from langchain_openai import OpenAI
|
243
270
|
math_llm = OpenAI()
|
244
271
|
selected_tools.append("llm-math")
|
245
272
|
if human_switch:
|
@@ -1,5 +1,8 @@
|
|
1
1
|
# Create a new file "prompts.py" to store the prompt-related constants.
|
2
2
|
|
3
|
+
|
4
|
+
#@STCGoal https://smith.langchain.com/hub/jgwill/olca_system_append
|
5
|
+
SYSTEM_PROMPT_APPEND_hub_tag_name="jgwill/olca_system_append"
|
3
6
|
SYSTEM_PROMPT_APPEND = """
|
4
7
|
You do what is above and consider the following when doing the task:
|
5
8
|
---
|
@@ -22,6 +25,8 @@ You do what is above and consider the following when doing the task:
|
|
22
25
|
REMEMBER: Dont introduce nor conclude, just output results. No comments. you present in a coherent format without preambles or fluff. Never use the word "determination" and we never brainstorm (we conceptualize the result we want in the germination phase then transform it into vision by choice and work as assimilating the vision to until the last phase which is completing our work).
|
23
26
|
"""
|
24
27
|
|
28
|
+
#@STCGoal https://smith.langchain.com/hub/jgwill/olca_human_append
|
29
|
+
HUMAN_APPEND_PROMPT_hub_tag_name="jgwill/olca_human_append"
|
25
30
|
HUMAN_APPEND_PROMPT = """
|
26
31
|
* Utilize the 'human' tool for interactions as directed.
|
27
32
|
* Communicate clearly and simply, avoiding exaggeration.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: olca
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.61
|
4
4
|
Summary: A Python package for experimental usage of Langchain and Human-in-the-Loop
|
5
5
|
Home-page: https://github.com/jgwill/olca
|
6
6
|
Author: Jean GUillaume ISabelle
|
@@ -361,11 +361,13 @@ Requires-Dist: requests
|
|
361
361
|
Requires-Dist: markdown
|
362
362
|
Requires-Dist: langchain
|
363
363
|
Requires-Dist: langchain-openai
|
364
|
+
Requires-Dist: langchain-ollama
|
364
365
|
Requires-Dist: langchain-experimental
|
365
366
|
Requires-Dist: click
|
366
367
|
Requires-Dist: langgraph
|
367
368
|
Requires-Dist: langfuse
|
368
369
|
Requires-Dist: pytz
|
370
|
+
Requires-Dist: google.generativeai
|
369
371
|
|
370
372
|
# oLCa
|
371
373
|
|
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
|
7
7
|
|
8
8
|
[project]
|
9
9
|
name = "olca"
|
10
|
-
version = "0.2.
|
10
|
+
version = "0.2.61"
|
11
11
|
|
12
12
|
description = "A Python package for experimental usage of Langchain and Human-in-the-Loop"
|
13
13
|
readme = "README.md"
|
@@ -27,11 +27,13 @@ dependencies = [
|
|
27
27
|
"markdown",
|
28
28
|
"langchain",
|
29
29
|
"langchain-openai",
|
30
|
+
"langchain-ollama",
|
30
31
|
"langchain-experimental",
|
31
32
|
"click",
|
32
33
|
"langgraph",
|
33
34
|
"langfuse",
|
34
35
|
"pytz",
|
36
|
+
"google.generativeai",
|
35
37
|
]
|
36
38
|
classifiers = [
|
37
39
|
"Programming Language :: Python :: 3",
|
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name='olca',
|
5
|
-
version = "0.2.
|
5
|
+
version = "0.2.61",
|
6
6
|
author='Jean GUillaume ISabelle',
|
7
7
|
author_email='jgi@jgwill.com',
|
8
8
|
description='A Python package for experimenting with Langchain agent and interactivity in Terminal modalities.',
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|