quantalogic 0.2.26__py3-none-any.whl → 0.2.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,167 @@
1
+ """Tool for executing SQL queries and returning paginated results in markdown format."""
2
+
3
+ from typing import Any, Dict, List
4
+
5
+ from pydantic import Field, ValidationError
6
+ from sqlalchemy import create_engine, text
7
+ from sqlalchemy.exc import SQLAlchemyError
8
+
9
+ from quantalogic.tools.tool import Tool, ToolArgument
10
+
11
+
12
+ class SQLQueryTool(Tool):
13
+ """Tool for executing SQL queries and returning paginated results in markdown format."""
14
+
15
+ name: str = "sql_query_tool"
16
+ description: str = (
17
+ "Executes a SQL query and returns results in markdown table format "
18
+ "with pagination support. Results are truncated based on start/end row numbers."
19
+ )
20
+ arguments: list = [
21
+ ToolArgument(
22
+ name="query",
23
+ arg_type="string",
24
+ description="The SQL query to execute",
25
+ required=True,
26
+ example="SELECT * FROM customers WHERE country = 'France'"
27
+ ),
28
+ ToolArgument(
29
+ name="start_row",
30
+ arg_type="int",
31
+ description="1-based starting row number for results",
32
+ required=True,
33
+ example="1"
34
+ ),
35
+ ToolArgument(
36
+ name="end_row",
37
+ arg_type="int",
38
+ description="1-based ending row number for results",
39
+ required=True,
40
+ example="100"
41
+ ),
42
+ ]
43
+ connection_string: str = Field(
44
+ ...,
45
+ description="SQLAlchemy-compatible database connection string",
46
+ example="postgresql://user:password@localhost/mydb"
47
+ )
48
+
49
+ def execute(self, query: str, start_row: Any, end_row: Any) -> str:
50
+ """
51
+ Executes a SQL query and returns formatted results.
52
+
53
+ Args:
54
+ query: SQL query to execute
55
+ start_row: 1-based starting row number (supports various numeric types)
56
+ end_row: 1-based ending row number (supports various numeric types)
57
+
58
+ Returns:
59
+ str: Markdown-formatted results with pagination metadata
60
+
61
+ Raises:
62
+ ValueError: For invalid parameters or query errors
63
+ RuntimeError: For database connection issues
64
+ """
65
+ try:
66
+ # Convert and validate row numbers
67
+ start = self._convert_row_number(start_row, "start_row")
68
+ end = self._convert_row_number(end_row, "end_row")
69
+
70
+ if start > end:
71
+ raise ValueError(f"start_row ({start}) must be <= end_row ({end})")
72
+
73
+ # Execute query
74
+ engine = create_engine(self.connection_string)
75
+ with engine.connect() as conn:
76
+ result = conn.execute(text(query))
77
+ columns: List[str] = result.keys()
78
+ all_rows: List[Dict] = [dict(row._mapping) for row in result]
79
+
80
+ # Apply pagination
81
+ total_rows = len(all_rows)
82
+ actual_start = max(1, start)
83
+ actual_end = min(end, total_rows)
84
+
85
+ if actual_start > total_rows:
86
+ return f"No results found (total rows: {total_rows})"
87
+
88
+ # Slice results (convert to 0-based index)
89
+ displayed_rows = all_rows[actual_start-1:actual_end]
90
+
91
+ # Format results
92
+ markdown = [
93
+ f"**Query Results:** `{actual_start}-{actual_end}` of `{total_rows}` rows",
94
+ self._format_table(columns, displayed_rows)
95
+ ]
96
+
97
+ # Add pagination notice
98
+ if actual_end < total_rows:
99
+ remaining = total_rows - actual_end
100
+ markdown.append(f"\n*Showing first {actual_end} rows - {remaining} more row{'s' if remaining > 1 else ''} available*")
101
+
102
+ return "\n".join(markdown)
103
+
104
+ except SQLAlchemyError as e:
105
+ raise ValueError(f"SQL Error: {str(e)}") from e
106
+ except ValidationError as e:
107
+ raise ValueError(f"Validation Error: {str(e)}") from e
108
+ except Exception as e:
109
+ raise RuntimeError(f"Database Error: {str(e)}") from e
110
+
111
+ def _convert_row_number(self, value: Any, field_name: str) -> int:
112
+ """Convert and validate row number input."""
113
+ try:
114
+ # Handle numeric strings and floats
115
+ if isinstance(value, str):
116
+ if "." in value:
117
+ num = float(value)
118
+ else:
119
+ num = int(value)
120
+ else:
121
+ num = value
122
+
123
+ converted = int(num)
124
+ if converted != num: # Check if float had decimal part
125
+ raise ValueError("Decimal values are not allowed for row numbers")
126
+
127
+ if converted <= 0:
128
+ raise ValueError(f"{field_name} must be a positive integer")
129
+
130
+ return converted
131
+ except (ValueError, TypeError) as e:
132
+ raise ValueError(f"Invalid value for {field_name}: {repr(value)}") from e
133
+
134
+ def _format_table(self, columns: List[str], rows: List[Dict]) -> str:
135
+ """Format results as markdown table with truncation."""
136
+ if not rows:
137
+ return "No results found"
138
+
139
+ # Create header
140
+ header = "| " + " | ".join(columns) + " |"
141
+ separator = "| " + " | ".join(["---"] * len(columns)) + " |"
142
+
143
+ # Create rows with truncation
144
+ body = []
145
+ for row in rows:
146
+ values = []
147
+ for col in columns:
148
+ val = str(row.get(col, ""))
149
+ # Truncate long values
150
+ values.append(val[:50] + "..." if len(val) > 50 else val)
151
+ body.append("| " + " | ".join(values) + " |")
152
+
153
+ return "\n".join([header, separator] + body)
154
+
155
+
156
+
157
+ if __name__ == "__main__":
158
+ from quantalogic.tools.utils.create_sample_database import create_sample_database
159
+
160
+ # Create and document sample database
161
+ create_sample_database("sample.db")
162
+ tool = SQLQueryTool(connection_string="sqlite:///sample.db")
163
+ print(tool.execute("select * from customers", 1, 10))
164
+ print(tool.execute("select * from customers", 11, 20))
165
+
166
+
167
+
@@ -0,0 +1,13 @@
1
+ """
2
+ Utility functions and classes for quantalogic tools.
3
+
4
+ This module provides common utility functions used across the quantalogic package.
5
+ """
6
+
7
+ from .create_sample_database import create_sample_database
8
+ from .generate_database_report import generate_database_report
9
+
10
+ __all__ = [
11
+ 'create_sample_database',
12
+ 'generate_database_report'
13
+ ]
@@ -0,0 +1,124 @@
1
+ import random
2
+ from datetime import datetime, timedelta
3
+
4
+ from faker import Faker
5
+ from sqlalchemy import Column, Date, Float, ForeignKey, Integer, String, create_engine
6
+ from sqlalchemy.orm import declarative_base, relationship, sessionmaker
7
+
8
+ Base = declarative_base()
9
+ fake = Faker()
10
+
11
+ def create_sample_database(db_path: str) -> None:
12
+ """
13
+ Creates a sample SQLite database with 5 tables and 10 rows each.
14
+
15
+ Args:
16
+ db_path: Path to the SQLite database file (e.g., 'sample.db')
17
+ """
18
+ # Define database schema
19
+ class Customer(Base):
20
+ __tablename__ = 'customers'
21
+ id = Column(Integer, primary_key=True)
22
+ name = Column(String)
23
+ email = Column(String)
24
+ addresses = relationship("Address", back_populates="customer")
25
+ orders = relationship("Order", back_populates="customer")
26
+
27
+ class Address(Base):
28
+ __tablename__ = 'addresses'
29
+ id = Column(Integer, primary_key=True)
30
+ street = Column(String)
31
+ city = Column(String)
32
+ customer_id = Column(Integer, ForeignKey('customers.id'))
33
+ customer = relationship("Customer", back_populates="addresses")
34
+
35
+ class Product(Base):
36
+ __tablename__ = 'products'
37
+ id = Column(Integer, primary_key=True)
38
+ name = Column(String)
39
+ price = Column(Float)
40
+
41
+ class Order(Base):
42
+ __tablename__ = 'orders'
43
+ id = Column(Integer, primary_key=True)
44
+ order_date = Column(Date)
45
+ customer_id = Column(Integer, ForeignKey('customers.id'))
46
+ customer = relationship("Customer", back_populates="orders")
47
+ items = relationship("OrderItem", back_populates="order")
48
+
49
+ class OrderItem(Base):
50
+ __tablename__ = 'order_items'
51
+ id = Column(Integer, primary_key=True)
52
+ quantity = Column(Integer)
53
+ order_id = Column(Integer, ForeignKey('orders.id'))
54
+ product_id = Column(Integer, ForeignKey('products.id'))
55
+ order = relationship("Order", back_populates="items")
56
+ product = relationship("Product")
57
+
58
+ # Create database and tables
59
+ engine = create_engine(f'sqlite:///{db_path}')
60
+ Base.metadata.create_all(engine)
61
+ Session = sessionmaker(bind=engine) # noqa: N806
62
+ session = Session()
63
+
64
+ # Generate sample data
65
+ try:
66
+ # Create 10 customers
67
+ customers = []
68
+ for _ in range(10):
69
+ customer = Customer(
70
+ name=fake.name(),
71
+ email=fake.email()
72
+ )
73
+ customers.append(customer)
74
+ session.add(customer)
75
+
76
+ session.commit()
77
+
78
+ # Create 10 addresses (1 per customer)
79
+ for customer in customers:
80
+ address = Address(
81
+ street=fake.street_address(),
82
+ city=fake.city(),
83
+ customer=customer
84
+ )
85
+ session.add(address)
86
+
87
+ # Create 10 products
88
+ products = []
89
+ for _ in range(10):
90
+ product = Product(
91
+ name=fake.word().capitalize(),
92
+ price=round(random.uniform(10, 1000), 2)
93
+ )
94
+ products.append(product)
95
+ session.add(product)
96
+
97
+ # Create 10 orders (1 per customer)
98
+ orders = []
99
+ start_date = datetime.now() - timedelta(days=365)
100
+ for customer in customers:
101
+ order = Order(
102
+ order_date=fake.date_between(start_date=start_date),
103
+ customer=customer
104
+ )
105
+ orders.append(order)
106
+ session.add(order)
107
+
108
+ # Create 10 order items (1 per order)
109
+ for order in orders:
110
+ order_item = OrderItem(
111
+ quantity=random.randint(1, 5),
112
+ order=order,
113
+ product=random.choice(products)
114
+ )
115
+ session.add(order_item)
116
+
117
+ session.commit()
118
+ finally:
119
+ session.close()
120
+
121
+ # Example usage
122
+ if __name__ == "__main__":
123
+ create_sample_database("sample.db")
124
+ print("Sample database created successfully!")
@@ -0,0 +1,289 @@
1
+ from datetime import UTC, datetime
2
+ from typing import Dict, List
3
+
4
+ import networkx as nx
5
+ from sqlalchemy import create_engine, inspect, text
6
+ from sqlalchemy.engine import Inspector
7
+
8
+
9
+ def generate_database_report(connection_string: str) -> str:
10
+ """
11
+ Generates a comprehensive Markdown database documentation report with ER diagram.
12
+
13
+ Args:
14
+ connection_string: SQLAlchemy-compatible database connection string
15
+
16
+ Returns:
17
+ Markdown-formatted report as a string
18
+ """
19
+ # Setup database connection and inspection
20
+ engine = create_engine(connection_string)
21
+ inspector = inspect(engine)
22
+
23
+ # Collect database metadata
24
+ db_metadata = {
25
+ 'name': engine.url.database,
26
+ 'dialect': engine.dialect.name,
27
+ 'tables': inspector.get_table_names()
28
+ }
29
+
30
+ # Initialize data structures
31
+ graph = nx.DiGraph()
32
+ table_metadata: Dict[str, dict] = {}
33
+ fk_relationships: List[dict] = []
34
+ sampled_ids: Dict[str, list] = {}
35
+ sample_data: Dict[str, list] = {}
36
+
37
+ # Collect schema metadata and relationships
38
+ for table in db_metadata['tables']:
39
+ columns = inspector.get_columns(table)
40
+ pk = inspector.get_pk_constraint(table).get('constrained_columns', [])
41
+ indexes = inspector.get_indexes(table)
42
+ fks = inspector.get_foreign_keys(table)
43
+
44
+ # Process foreign keys
45
+ for fk in fks:
46
+ process_foreign_key(table, fk, inspector, graph, fk_relationships)
47
+
48
+ table_metadata[table] = {
49
+ 'columns': columns,
50
+ 'primary_keys': pk,
51
+ 'indexes': indexes,
52
+ 'foreign_keys': fks
53
+ }
54
+
55
+ # Process tables in dependency order
56
+ sorted_tables = get_sorted_tables(graph, db_metadata['tables'])
57
+
58
+ # Collect sample data with parent-child relationships
59
+ collect_sample_data(engine, sorted_tables, table_metadata, sample_data, sampled_ids)
60
+
61
+ # Generate Markdown report
62
+ return generate_markdown_report(db_metadata, sorted_tables, table_metadata,
63
+ fk_relationships, sample_data)
64
+
65
+
66
+ def process_foreign_key(
67
+ table: str,
68
+ fk: dict,
69
+ inspector: Inspector,
70
+ graph: nx.DiGraph,
71
+ fk_relationships: List[dict]
72
+ ) -> None:
73
+ """Process and record foreign key relationships with cardinality information."""
74
+ src_col = fk['constrained_columns'][0]
75
+ tgt_table = fk['referred_table']
76
+ tgt_col = fk['referred_columns'][0]
77
+
78
+ # Check uniqueness and nullability in source column
79
+ src_columns = inspector.get_columns(table)
80
+ src_col_meta = next(c for c in src_columns if c['name'] == src_col)
81
+ is_unique = src_col_meta.get('unique', False) or src_col in inspector.get_pk_constraint(table).get('constrained_columns', [])
82
+ is_nullable = src_col_meta['nullable']
83
+
84
+ fk_relationships.append({
85
+ 'source_table': table,
86
+ 'source_column': src_col,
87
+ 'target_table': tgt_table,
88
+ 'target_column': tgt_col,
89
+ 'constraint_name': fk['name'],
90
+ 'is_unique': is_unique,
91
+ 'is_nullable': is_nullable
92
+ })
93
+ graph.add_edge(table, tgt_table)
94
+
95
+
96
+ def get_sorted_tables(graph: nx.DiGraph, tables: List[str]) -> List[str]:
97
+ """Return tables sorted topologically with fallback to original order."""
98
+ try:
99
+ return list(nx.topological_sort(graph))
100
+ except nx.NetworkXUnfeasible:
101
+ return tables
102
+
103
+
104
+ def collect_sample_data(
105
+ engine,
106
+ tables: List[str],
107
+ table_metadata: Dict[str, dict],
108
+ sample_data: Dict[str, list],
109
+ sampled_ids: Dict[str, list]
110
+ ) -> None:
111
+ """Collect sample data while maintaining referential integrity."""
112
+ for table in tables:
113
+ with engine.connect() as conn:
114
+ # Get parent samples
115
+ result = conn.execute(text(f"SELECT * FROM {table} LIMIT 5"))
116
+ samples = [dict(row._mapping) for row in result]
117
+ sample_data[table] = samples
118
+
119
+ # Store IDs for child sampling
120
+ if samples and table_metadata[table]['primary_keys']:
121
+ pk_col = table_metadata[table]['primary_keys'][0]
122
+ sampled_ids[table] = [row[pk_col] for row in samples]
123
+
124
+
125
+ def generate_markdown_report(
126
+ db_metadata: dict,
127
+ tables: List[str],
128
+ table_metadata: Dict[str, dict],
129
+ fk_relationships: List[dict],
130
+ sample_data: Dict[str, list]
131
+ ) -> str:
132
+ """Generate the complete Markdown report."""
133
+ md = []
134
+
135
+ # Database Summary
136
+ md.append("# Database Documentation Report\n")
137
+ md.append(f"**Database Type**: {db_metadata['dialect'].capitalize()}\n")
138
+ md.append(f"**Database Name**: {db_metadata['name']}\n")
139
+ md.append(f"**Total Tables**: {len(db_metadata['tables'])}\n")
140
+ md.append(f"**Generated At**: {datetime.now(UTC).strftime('%Y-%m-%d %H:%M:%S UTC')}\n\n")
141
+
142
+ # ERD Section
143
+ md.append("## Entity Relationship Diagram\n")
144
+ md.append("```mermaid\nerDiagram\n")
145
+ generate_erd_section(md, tables, table_metadata, fk_relationships)
146
+ md.append("```\n\n")
147
+
148
+ # Schema Details
149
+ md.append("## Schema Details\n")
150
+ for table in tables:
151
+ meta = table_metadata[table]
152
+ md.append(f"### {table}\n")
153
+ generate_columns_section(md, meta)
154
+ generate_indexes_section(md, meta)
155
+
156
+ # Relationships
157
+ generate_relationships_section(md, fk_relationships)
158
+
159
+ # Cardinality Report
160
+ generate_cardinality_section(md, fk_relationships)
161
+
162
+ # Data Samples
163
+ md.append("## Data Samples\n")
164
+ for table in tables:
165
+ samples = sample_data[table]
166
+ md.append(f"### {table}\n")
167
+ generate_sample_table(md, samples)
168
+
169
+ return '\n'.join(md)
170
+
171
+
172
+ def generate_erd_section(md: List[str], tables: List[str], table_metadata: Dict[str, dict], fk_relationships: List[dict]) -> None:
173
+ """Generate Mermaid ER diagram section."""
174
+ # Define tables with their columns
175
+ for table in tables:
176
+ table_upper = table.upper()
177
+ md.append(f" {table_upper} {{\n")
178
+ for col in table_metadata[table]['columns']:
179
+ col_type = str(col['type']).split('(')[0].upper() # Simplify type names
180
+ annotations = []
181
+ if col['name'] in table_metadata[table]['primary_keys']:
182
+ annotations.append("PK")
183
+ # Check if column is a foreign key
184
+ for fk in fk_relationships:
185
+ if fk['source_table'] == table and fk['source_column'] == col['name']:
186
+ annotations.append("FK")
187
+ break
188
+ annotation_str = " ".join(annotations)
189
+ md.append(f" {col_type} {col['name']} {annotation_str}\n")
190
+ md.append(" }\n")
191
+
192
+ # Define relationships with cardinality
193
+ for fk in fk_relationships:
194
+ target_table = fk['target_table'].upper()
195
+ source_table = fk['source_table'].upper()
196
+ source_cardinality = get_source_cardinality(fk['is_unique'], fk['is_nullable'])
197
+ md.append(f" {target_table} ||--{source_cardinality} {source_table} : \"{fk['constraint_name']}\"\n")
198
+
199
+
200
+ def get_source_cardinality(is_unique: bool, is_nullable: bool) -> str:
201
+ """Determine Mermaid cardinality symbol for source side of relationship."""
202
+ if is_unique:
203
+ return "|o" if is_nullable else "||"
204
+ else:
205
+ return "o{" if is_nullable else "|{"
206
+
207
+
208
+ def generate_relationships_section(md: List[str], fk_relationships: List[dict]) -> None:
209
+ """Generate foreign key relationships section."""
210
+ if fk_relationships:
211
+ md.append("## Relationships\n")
212
+ for fk in fk_relationships:
213
+ src = f"{fk['source_table']}.{fk['source_column']}"
214
+ tgt = f"{fk['target_table']}.{fk['target_column']}"
215
+ md.append(f"- `{src}` → `{tgt}` (Constraint: `{fk['constraint_name']}`)\n")
216
+ md.append("\n")
217
+
218
+
219
+ def generate_cardinality_section(md: List[str], fk_relationships: List[dict]) -> None:
220
+ """Generate cardinality report section."""
221
+ cardinalities = {}
222
+ for fk in fk_relationships:
223
+ key = (fk['target_table'], fk['source_table'])
224
+ if key in cardinalities:
225
+ continue
226
+
227
+ if fk['is_unique']:
228
+ cardinality = "(1) → (1)"
229
+ else:
230
+ cardinality = "(1) → (N)"
231
+
232
+ cardinalities[key] = f"{fk['target_table']} {cardinality} {fk['source_table']}"
233
+
234
+ if cardinalities:
235
+ md.append("## Cardinality Report\n")
236
+ for entry in cardinalities.values():
237
+ md.append(f"- {entry}\n")
238
+ md.append("\n")
239
+
240
+
241
+ def generate_columns_section(md: List[str], meta: dict) -> None:
242
+ """Generate columns table section."""
243
+ md.append("#### Columns\n")
244
+ md.append("| Column Name | Data Type | Nullable? | Primary Key? |\n")
245
+ md.append("|-------------|-----------|-----------|--------------|\n")
246
+ for col in meta['columns']:
247
+ pk = "Yes" if col['name'] in meta['primary_keys'] else "No"
248
+ md.append(f"| `{col['name']}` | {col['type']} | {'Yes' if col['nullable'] else 'No'} | {pk} |\n")
249
+ md.append("\n")
250
+
251
+
252
+ def generate_indexes_section(md: List[str], meta: dict) -> None:
253
+ """Generate indexes section."""
254
+ if meta['indexes']:
255
+ md.append("#### Indexes\n")
256
+ for idx in meta['indexes']:
257
+ columns = ", ".join(idx['column_names'])
258
+ md.append(f"- `{idx['name']}` ({idx['type'] or 'INDEX'}) → {columns}\n")
259
+ md.append("\n")
260
+
261
+
262
+ def generate_sample_table(md: List[str], samples: list) -> None:
263
+ """Generate sample data table section."""
264
+ if not samples:
265
+ md.append("No records found.\n\n")
266
+ return
267
+
268
+ headers = samples[0].keys()
269
+ md.append("| " + " | ".join(headers) + " |\n")
270
+ md.append("|" + "|".join(["---"] * len(headers)) + "|\n")
271
+
272
+ for row in samples:
273
+ values = []
274
+ for val in row.values():
275
+ if isinstance(val, str) and len(val) > 50:
276
+ values.append(f"{val[:47]}...")
277
+ else:
278
+ values.append(str(val))
279
+ md.append("| " + " | ".join(values) + " |\n")
280
+ md.append("\n")
281
+
282
+
283
+ if __name__ == "__main__":
284
+ from quantalogic.tools.utils.create_sample_database import create_sample_database
285
+
286
+ # Create and document sample database
287
+ create_sample_database("sample.db")
288
+ report = generate_database_report("sqlite:///sample.db")
289
+ print(report)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.26
3
+ Version: 0.2.29
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -12,6 +12,7 @@ Requires-Dist: beautifulsoup4 (>=4.12.3,<5.0.0)
12
12
  Requires-Dist: boto3 (>=1.35.86,<2.0.0)
13
13
  Requires-Dist: click (>=8.1.8,<9.0.0)
14
14
  Requires-Dist: duckduckgo-search (>=7.2.1,<8.0.0)
15
+ Requires-Dist: faker (>=33.3.1,<34.0.0)
15
16
  Requires-Dist: fastapi (>=0.115.6,<0.116.0)
16
17
  Requires-Dist: google-auth (>=2.20.0,<3.0.0)
17
18
  Requires-Dist: google-search-results (>=2.4.2,<3.0.0)
@@ -28,13 +29,16 @@ Requires-Dist: mkdocs-mermaid2-plugin (>=1.1.1,<2.0.0)
28
29
  Requires-Dist: mkdocs-minify-plugin (>=0.7.1,<0.8.0)
29
30
  Requires-Dist: mkdocstrings (>=0.24.0,<0.25.0)
30
31
  Requires-Dist: mkdocstrings-python (>=1.7.0,<2.0.0)
32
+ Requires-Dist: networkx (>=3.4.2,<4.0.0)
31
33
  Requires-Dist: pathspec (>=0.12.1,<0.13.0)
32
34
  Requires-Dist: prompt-toolkit (>=3.0.48,<4.0.0)
33
35
  Requires-Dist: pydantic (>=2.10.4,<3.0.0)
34
36
  Requires-Dist: pymdown-extensions (>=10.3.1,<11.0.0)
37
+ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
35
38
  Requires-Dist: requests (>=2.32.3,<3.0.0)
36
39
  Requires-Dist: rich (>=13.9.4,<14.0.0)
37
40
  Requires-Dist: serpapi (>=0.1.5,<0.2.0)
41
+ Requires-Dist: sqlalchemy (>=2.0.37,<3.0.0)
38
42
  Requires-Dist: tenacity (>=9.0.0,<10.0.0)
39
43
  Requires-Dist: toml (>=0.10.2,<0.11.0)
40
44
  Requires-Dist: tree-sitter (>=0.23.2,<0.24.0)
@@ -59,8 +63,10 @@ Description-Content-Type: text/markdown
59
63
  [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://quantalogic.github.io/quantalogic/)
60
64
 
61
65
 
66
+
62
67
  QuantaLogic is a ReAct (Reasoning & Action) framework for building advanced AI agents.
63
68
 
69
+
64
70
  It seamlessly integrates large language models (LLMs) with a robust tool system, enabling agents to understand, reason about, and execute complex tasks through natural language interaction.
65
71
 
66
72
  The `cli` version include coding capabilities comparable to Aider.
@@ -82,7 +88,7 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
82
88
  ## 🌟 Highlights
83
89
 
84
90
  - **ReAct Framework**: Advanced implementation combining LLM reasoning with concrete actions
85
- - **Universal LLM Support**: Integration with OpenAI, Anthropic, LM Studio, Bedrock, Ollama, DeepSeek V3, via LiteLLM
91
+ - **Universal LLM Support**: Integration with OpenAI, Anthropic, LM Studio, Bedrock, Ollama, DeepSeek V3, DeepSeek R1, via LiteLLM. Example usage: `quantalogic --model-name deepseek/deepseek-reasoner` or `quantalogic --model-name openrouter/deepseek/deepseek-r1`
86
92
  - **Secure Tool System**: Docker-based code execution and file manipulation tools
87
93
  - **Real-time Monitoring**: Web interface with SSE-based event visualization
88
94
  - **Memory Management**: Intelligent context handling and optimization