construct-labs-crm-env 0.1.7__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: construct-labs-crm-env
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: CRM Agent Environment SDK by Construct Labs - Train RL agents to interact with CRM systems
5
5
  Project-URL: Homepage, https://construct-labs.com
6
6
  Author-email: Construct Labs GmbH <hello@construct-labs.com>
@@ -22,6 +22,9 @@ Requires-Python: >=3.10
22
22
  Requires-Dist: openenv-core>=0.2.0
23
23
  Requires-Dist: pydantic>=2.0.0
24
24
  Requires-Dist: websockets>=12.0
25
+ Provides-Extra: chat
26
+ Requires-Dist: google-genai>=1.0.0; extra == 'chat'
27
+ Requires-Dist: python-dotenv>=1.0.0; extra == 'chat'
25
28
  Provides-Extra: dev
26
29
  Requires-Dist: mypy>=1.0.0; extra == 'dev'
27
30
  Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
@@ -51,7 +54,7 @@ from construct_labs_crm_env import CrmAgentEnv, CrmAgentAction, CRMActionType
51
54
 
52
55
  # Connect to the CRM environment
53
56
  with CrmAgentEnv(
54
- base_url="https://api.construct-labs.com",
57
+ base_url="https://env.crm.construct-labs.com",
55
58
  api_key="your-api-key" # Issued by Construct Labs
56
59
  ) as env:
57
60
  # Reset the environment
@@ -75,7 +78,7 @@ export CRM_AGENT_API_KEY=your-api-key
75
78
 
76
79
  ```python
77
80
  # API key is read from environment
78
- env = CrmAgentEnv(base_url="https://api.construct-labs.com")
81
+ env = CrmAgentEnv(base_url="https://env.crm.construct-labs.com")
79
82
  ```
80
83
 
81
84
  ## LLM Integration Example
@@ -86,7 +89,7 @@ The SDK is designed to work with LLM-based agents. Here's how to parse LLM tool
86
89
  from construct_labs_crm_env import CrmAgentEnv
87
90
 
88
91
  with CrmAgentEnv(
89
- base_url="https://api.construct-labs.com",
92
+ base_url="https://env.crm.construct-labs.com",
90
93
  api_key="your-api-key"
91
94
  ) as env:
92
95
  result = env.reset()
@@ -257,7 +260,7 @@ def collect_rollouts(
257
260
 
258
261
  # Example usage
259
262
  with CrmAgentEnv(
260
- base_url="https://api.construct-labs.com",
263
+ base_url="https://env.crm.construct-labs.com",
261
264
  api_key="your-api-key"
262
265
  ) as env:
263
266
  # Collect 10 rollouts
@@ -305,7 +308,7 @@ def compute_grpo_advantages(group: list[Rollout]) -> list[float]:
305
308
 
306
309
  # Training loop
307
310
  with CrmAgentEnv(
308
- base_url="https://api.construct-labs.com",
311
+ base_url="https://env.crm.construct-labs.com",
309
312
  api_key="your-api-key"
310
313
  ) as env:
311
314
  for step in range(num_training_steps):
@@ -325,7 +328,7 @@ with CrmAgentEnv(
325
328
  from construct_labs_crm_env import CrmAgentEnv
326
329
 
327
330
  env = CrmAgentEnv(
328
- base_url="https://api.construct-labs.com",
331
+ base_url="https://env.crm.construct-labs.com",
329
332
  api_key="your-api-key"
330
333
  )
331
334
 
@@ -20,7 +20,7 @@ from construct_labs_crm_env import CrmAgentEnv, CrmAgentAction, CRMActionType
20
20
 
21
21
  # Connect to the CRM environment
22
22
  with CrmAgentEnv(
23
- base_url="https://api.construct-labs.com",
23
+ base_url="https://env.crm.construct-labs.com",
24
24
  api_key="your-api-key" # Issued by Construct Labs
25
25
  ) as env:
26
26
  # Reset the environment
@@ -44,7 +44,7 @@ export CRM_AGENT_API_KEY=your-api-key
44
44
 
45
45
  ```python
46
46
  # API key is read from environment
47
- env = CrmAgentEnv(base_url="https://api.construct-labs.com")
47
+ env = CrmAgentEnv(base_url="https://env.crm.construct-labs.com")
48
48
  ```
49
49
 
50
50
  ## LLM Integration Example
@@ -55,7 +55,7 @@ The SDK is designed to work with LLM-based agents. Here's how to parse LLM tool
55
55
  from construct_labs_crm_env import CrmAgentEnv
56
56
 
57
57
  with CrmAgentEnv(
58
- base_url="https://api.construct-labs.com",
58
+ base_url="https://env.crm.construct-labs.com",
59
59
  api_key="your-api-key"
60
60
  ) as env:
61
61
  result = env.reset()
@@ -226,7 +226,7 @@ def collect_rollouts(
226
226
 
227
227
  # Example usage
228
228
  with CrmAgentEnv(
229
- base_url="https://api.construct-labs.com",
229
+ base_url="https://env.crm.construct-labs.com",
230
230
  api_key="your-api-key"
231
231
  ) as env:
232
232
  # Collect 10 rollouts
@@ -274,7 +274,7 @@ def compute_grpo_advantages(group: list[Rollout]) -> list[float]:
274
274
 
275
275
  # Training loop
276
276
  with CrmAgentEnv(
277
- base_url="https://api.construct-labs.com",
277
+ base_url="https://env.crm.construct-labs.com",
278
278
  api_key="your-api-key"
279
279
  ) as env:
280
280
  for step in range(num_training_steps):
@@ -294,7 +294,7 @@ with CrmAgentEnv(
294
294
  from construct_labs_crm_env import CrmAgentEnv
295
295
 
296
296
  env = CrmAgentEnv(
297
- base_url="https://api.construct-labs.com",
297
+ base_url="https://env.crm.construct-labs.com",
298
298
  api_key="your-api-key"
299
299
  )
300
300
 
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "construct-labs-crm-env"
7
- version = "0.1.7"
7
+ version = "0.1.8"
8
8
  description = "CRM Agent Environment SDK by Construct Labs - Train RL agents to interact with CRM systems"
9
9
  readme = "README.md"
10
10
  license = { text = "Proprietary" }
@@ -39,7 +39,14 @@ dependencies = [
39
39
  "websockets>=12.0",
40
40
  ]
41
41
 
42
+ [project.scripts]
43
+ chat-agent = "construct_labs_crm_env.examples.chat:main"
44
+
42
45
  [project.optional-dependencies]
46
+ chat = [
47
+ "google-genai>=1.0.0",
48
+ "python-dotenv>=1.0.0",
49
+ ]
43
50
  dev = [
44
51
  "pytest>=7.0.0",
45
52
  "pytest-asyncio>=0.21.0",
@@ -8,7 +8,7 @@ Example:
8
8
  >>> from construct_labs_crm_env import CrmAgentEnv, CrmAgentAction, CRMActionType
9
9
  >>>
10
10
  >>> with CrmAgentEnv(
11
- ... base_url="https://api.construct-labs.com",
11
+ ... base_url="https://env.crm.construct-labs.com",
12
12
  ... api_key="your-api-key"
13
13
  ... ) as env:
14
14
  ... result = env.reset()
@@ -61,7 +61,7 @@ class CrmAgentEnv(EnvClient[CrmAgentAction, CrmAgentObservation, CrmAgentState])
61
61
  Example:
62
62
  >>> # Basic usage
63
63
  >>> with CrmAgentEnv(
64
- ... base_url="https://api.construct-labs.com",
64
+ ... base_url="https://env.crm.construct-labs.com",
65
65
  ... api_key="cl_live_xxx"
66
66
  ... ) as env:
67
67
  ... result = env.reset()
@@ -0,0 +1 @@
1
+ """Example scripts for construct-labs-crm-env."""
@@ -0,0 +1,504 @@
1
+ """
2
+ Interactive chat CLI for interacting with the CRM environment via an LLM.
3
+
4
+ This example demonstrates how to build a conversational agent that uses
5
+ the CRM environment tools. The LLM can make multiple tool calls to query
6
+ and modify CRM data before providing a final answer.
7
+
8
+ Requirements:
9
+ pip install construct-labs-crm-env google-genai python-dotenv
10
+
11
+ Usage:
12
+ export CRM_AGENT_API_KEY=your-crm-api-key
13
+ export GOOGLE_API_KEY=your-google-api-key
14
+ python chat.py --help
15
+ python chat.py
16
+ python chat.py --question "How many companies are there?"
17
+ python chat.py --model gemini-2.0-flash --stream
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import argparse
23
+ import json
24
+ import os
25
+ import sys
26
+
27
+ try:
28
+ from dotenv import load_dotenv
29
+ except ImportError:
30
+ load_dotenv = None # type: ignore[assignment, misc]
31
+
32
+ try:
33
+ from google import genai
34
+ from google.genai import types
35
+ except ImportError:
36
+ genai = None # type: ignore[assignment]
37
+ types = None # type: ignore[assignment]
38
+
39
+ from construct_labs_crm_env import CrmAgentEnv
40
+
41
+
42
+ def _check_dependencies() -> None:
43
+ """Check that optional dependencies are installed."""
44
+ missing = []
45
+ if genai is None:
46
+ missing.append("google-genai")
47
+ if load_dotenv is None:
48
+ missing.append("python-dotenv")
49
+
50
+ if missing:
51
+ print(
52
+ f"Missing dependencies: {', '.join(missing)}\n"
53
+ f"Install with: pip install construct-labs-crm-env[chat]"
54
+ )
55
+ sys.exit(1)
56
+
57
+
58
+ if load_dotenv is not None:
59
+ load_dotenv()
60
+
61
+
62
+ def print_colored(text: str, color: str) -> None:
63
+ """Print colored text to terminal."""
64
+ colors = {
65
+ "red": "\033[91m",
66
+ "green": "\033[92m",
67
+ "yellow": "\033[93m",
68
+ "blue": "\033[94m",
69
+ "magenta": "\033[95m",
70
+ "cyan": "\033[96m",
71
+ "white": "\033[97m",
72
+ "reset": "\033[0m",
73
+ "bold": "\033[1m",
74
+ "dim": "\033[2m",
75
+ }
76
+ print(f"{colors.get(color, '')}{text}{colors['reset']}")
77
+
78
+
79
+ def print_separator(char: str = "-", width: int = 60) -> None:
80
+ """Print a separator line."""
81
+ print(char * width)
82
+
83
+
84
+ def convert_openai_tools_to_gemini(openai_tools: list[dict]) -> list[types.Tool]:
85
+ """Convert OpenAI-format tools to Gemini format."""
86
+ function_declarations = []
87
+
88
+ for tool in openai_tools:
89
+ if tool.get("type") != "function":
90
+ continue
91
+
92
+ func = tool["function"]
93
+ name = func["name"]
94
+ description = func.get("description", "")
95
+ parameters = func.get("parameters", {})
96
+
97
+ function_declarations.append(
98
+ types.FunctionDeclaration(
99
+ name=name,
100
+ description=description,
101
+ parameters=parameters if parameters else None,
102
+ )
103
+ )
104
+
105
+ return [types.Tool(function_declarations=function_declarations)]
106
+
107
+
108
+ def format_tool_call_compact(tool_name: str, tool_args: dict) -> str:
109
+ """Format a tool call in compact function-call style: func_name(arg1, arg2, ...)"""
110
+ if not tool_args:
111
+ return f"{tool_name}()"
112
+
113
+ arg_parts = []
114
+ for key, value in tool_args.items():
115
+ if isinstance(value, str):
116
+ if len(value) > 30:
117
+ value = value[:27] + "..."
118
+ arg_parts.append(f'{key}="{value}"')
119
+ elif isinstance(value, (dict, list)):
120
+ arg_parts.append(f"{key}=<{type(value).__name__}>")
121
+ else:
122
+ arg_parts.append(f"{key}={value}")
123
+
124
+ return f"{tool_name}({', '.join(arg_parts)})"
125
+
126
+
127
+ def _stream_response(
128
+ client: genai.Client,
129
+ model_name: str,
130
+ contents: list[types.Content],
131
+ config: types.GenerateContentConfig,
132
+ print_output: bool = True,
133
+ ) -> tuple[list[str], list[types.FunctionCall], types.Content | None]:
134
+ """
135
+ Make a streaming call to the model, optionally printing tokens as they arrive.
136
+
137
+ Returns:
138
+ Tuple of (text_parts, function_calls, final_content)
139
+ """
140
+ text_parts: list[str] = []
141
+ function_calls: list[types.FunctionCall] = []
142
+ all_parts: list[types.Part] = []
143
+
144
+ for chunk in client.models.generate_content_stream(
145
+ model=model_name,
146
+ contents=contents,
147
+ config=config,
148
+ ):
149
+ if not chunk.candidates:
150
+ continue
151
+
152
+ candidate = chunk.candidates[0]
153
+
154
+ for part in candidate.content.parts:
155
+ if part.text:
156
+ if print_output:
157
+ print(part.text, end="", flush=True)
158
+ text_parts.append(part.text)
159
+ all_parts.append(part)
160
+
161
+ if part.function_call:
162
+ function_calls.append(part.function_call)
163
+ all_parts.append(part)
164
+
165
+ if print_output and text_parts:
166
+ print()
167
+
168
+ if not all_parts:
169
+ return [], [], None
170
+
171
+ final_content = types.Content(role="model", parts=all_parts)
172
+ full_text = "".join(text_parts)
173
+ text_result = [full_text] if full_text else []
174
+
175
+ return text_result, function_calls, final_content
176
+
177
+
178
+ def _non_stream_response(
179
+ client: genai.Client,
180
+ model_name: str,
181
+ contents: list[types.Content],
182
+ config: types.GenerateContentConfig,
183
+ ) -> tuple[list[str], list[types.FunctionCall], types.Content | None]:
184
+ """Make a non-streaming call to the model."""
185
+ response = client.models.generate_content(
186
+ model=model_name,
187
+ contents=contents,
188
+ config=config,
189
+ )
190
+
191
+ if not response.candidates:
192
+ return [], [], None
193
+
194
+ candidate = response.candidates[0]
195
+ text_parts: list[str] = []
196
+ function_calls: list[types.FunctionCall] = []
197
+
198
+ for part in candidate.content.parts:
199
+ if part.text:
200
+ text_parts.append(part.text)
201
+ if part.function_call:
202
+ function_calls.append(part.function_call)
203
+
204
+ return text_parts, function_calls, candidate.content
205
+
206
+
207
+ def run_agent_loop(
208
+ env: CrmAgentEnv,
209
+ client: genai.Client,
210
+ model_name: str,
211
+ tools: list[types.Tool],
212
+ system_instruction: str,
213
+ contents: list[types.Content],
214
+ max_iterations: int = 20,
215
+ verbose: bool = False,
216
+ stream: bool = False,
217
+ ) -> tuple[str | None, list[types.Content]]:
218
+ """
219
+ Run the agent loop until it calls submit_answer or reaches max iterations.
220
+
221
+ The agent can make multiple tool calls, getting results back each time,
222
+ until it's ready to submit a final answer.
223
+
224
+ Returns:
225
+ Tuple of (final_answer or None, updated contents)
226
+ """
227
+ iteration = 0
228
+
229
+ while iteration < max_iterations:
230
+ iteration += 1
231
+
232
+ config = types.GenerateContentConfig(
233
+ tools=tools,
234
+ system_instruction=system_instruction,
235
+ temperature=0.7,
236
+ )
237
+
238
+ try:
239
+ if stream:
240
+ text_parts, function_calls, final_content = _stream_response(
241
+ client=client,
242
+ model_name=model_name,
243
+ contents=contents,
244
+ config=config,
245
+ print_output=False,
246
+ )
247
+ else:
248
+ text_parts, function_calls, final_content = _non_stream_response(
249
+ client=client,
250
+ model_name=model_name,
251
+ contents=contents,
252
+ config=config,
253
+ )
254
+ except Exception as e:
255
+ print_colored(f"API Error: {e}", "red")
256
+ return None, contents
257
+
258
+ if final_content is None:
259
+ print_colored("Error: No response from model", "red")
260
+ return None, contents
261
+
262
+ contents.append(final_content)
263
+
264
+ if not function_calls:
265
+ if text_parts:
266
+ return "\n".join(text_parts), contents
267
+ return None, contents
268
+
269
+ if text_parts:
270
+ print_colored("\nThinking:", "dim")
271
+ print("\n".join(text_parts))
272
+
273
+ function_response_parts = []
274
+ final_answer = None
275
+
276
+ for func_call in function_calls:
277
+ tool_name = func_call.name
278
+ tool_args = dict(func_call.args) if func_call.args else {}
279
+
280
+ if tool_name == "submit_answer":
281
+ final_answer = tool_args.get("answer", "")
282
+ print_colored("\n>>> Agent submitting answer", "cyan")
283
+ function_response_parts.append(
284
+ types.Part.from_function_response(
285
+ name=func_call.name,
286
+ response={"result": "Answer submitted successfully."},
287
+ )
288
+ )
289
+ continue
290
+
291
+ tool_display = format_tool_call_compact(tool_name, tool_args)
292
+ print_colored(f"\n{tool_display}", "magenta")
293
+ if verbose:
294
+ print_colored(f" Args: {json.dumps(tool_args, indent=2)}", "dim")
295
+
296
+ parsed_tool_call = {"name": tool_name, "arguments": tool_args}
297
+ parsed_action = env.parse_tool_call(parsed_tool_call)
298
+
299
+ if not parsed_action.is_valid:
300
+ obs_text = f"Error: {parsed_action.error_message}"
301
+ print_colored(f" Invalid: {parsed_action.error_message}", "red")
302
+ else:
303
+ try:
304
+ result = env.step(parsed_action.action)
305
+ obs_text = env.format_observation(result.observation)
306
+
307
+ display_text = (
308
+ obs_text[:500] + "..." if len(obs_text) > 500 else obs_text
309
+ )
310
+ print_colored(f" Result: {display_text}", "green")
311
+ except Exception as e:
312
+ obs_text = f"Error executing action: {e}"
313
+ print_colored(f" {obs_text}", "red")
314
+
315
+ function_response_parts.append(
316
+ types.Part.from_function_response(
317
+ name=func_call.name,
318
+ response={"result": obs_text},
319
+ )
320
+ )
321
+
322
+ contents.append(types.Content(role="user", parts=function_response_parts))
323
+
324
+ if final_answer is not None:
325
+ return final_answer, contents
326
+
327
+ print_colored(f"\nMax iterations ({max_iterations}) reached", "yellow")
328
+ return None, contents
329
+
330
+
331
+ def run_chat(
332
+ env: CrmAgentEnv,
333
+ client: genai.Client,
334
+ model_name: str,
335
+ tools: list[types.Tool],
336
+ system_instruction: str,
337
+ initial_question: str | None = None,
338
+ max_iterations: int = 20,
339
+ verbose: bool = False,
340
+ stream: bool = False,
341
+ ) -> None:
342
+ """
343
+ Run an interactive chat session with the environment.
344
+
345
+ The flow is:
346
+ 1. User asks a question
347
+ 2. Agent uses tools as needed (multiple calls in a loop)
348
+ 3. Agent calls submit_answer when ready
349
+ 4. Answer is displayed to user
350
+ 5. User can ask another question (loop back to 1)
351
+ """
352
+ print_colored("\nResetting environment...", "dim")
353
+ env.reset()
354
+ print_colored("Environment ready.\n", "green")
355
+
356
+ contents: list[types.Content] = []
357
+
358
+ while True:
359
+ if initial_question:
360
+ question = initial_question
361
+ initial_question = None
362
+ else:
363
+ print_colored("\nYou: ", "cyan")
364
+ try:
365
+ question = input().strip()
366
+ except EOFError:
367
+ break
368
+
369
+ if not question:
370
+ continue
371
+ if question.lower() in ("quit", "exit", "q"):
372
+ print_colored("Goodbye!", "yellow")
373
+ break
374
+
375
+ print_separator("=")
376
+
377
+ contents.append(
378
+ types.Content(role="user", parts=[types.Part.from_text(text=question)])
379
+ )
380
+
381
+ answer, contents = run_agent_loop(
382
+ env=env,
383
+ client=client,
384
+ model_name=model_name,
385
+ tools=tools,
386
+ system_instruction=system_instruction,
387
+ contents=contents,
388
+ max_iterations=max_iterations,
389
+ verbose=verbose,
390
+ stream=stream,
391
+ )
392
+
393
+ print_separator("=")
394
+ if answer:
395
+ print_colored("\nAssistant:", "blue")
396
+ print(answer)
397
+ else:
398
+ print_colored("\nNo answer provided.", "yellow")
399
+ print()
400
+
401
+
402
+ def main() -> None:
403
+ """Main entry point for the chat CLI."""
404
+ _check_dependencies()
405
+
406
+ # load_dotenv already called at module level if available
407
+
408
+ parser = argparse.ArgumentParser(
409
+ description="Interactive chat CLI for the CRM environment",
410
+ formatter_class=argparse.RawDescriptionHelpFormatter,
411
+ epilog="""
412
+ Examples:
413
+ python chat.py
414
+ python chat.py --stream
415
+ python chat.py --question "How many companies are there?"
416
+ python chat.py --model gemini-2.0-flash --stream
417
+ """,
418
+ )
419
+
420
+ parser.add_argument(
421
+ "--base-url",
422
+ type=str,
423
+ default="https://env.crm.construct-labs.com",
424
+ help="Base URL for the CRM environment (default: https://env.crm.construct-labs.com)",
425
+ )
426
+ parser.add_argument(
427
+ "--model",
428
+ type=str,
429
+ default="gemini-2.0-flash",
430
+ help="Gemini model to use (default: gemini-2.0-flash)",
431
+ )
432
+ parser.add_argument(
433
+ "--api-key",
434
+ type=str,
435
+ default=None,
436
+ help="Google API key (default: uses GOOGLE_API_KEY env var)",
437
+ )
438
+ parser.add_argument(
439
+ "--question",
440
+ type=str,
441
+ default=None,
442
+ help="Initial question/task for the agent",
443
+ )
444
+ parser.add_argument(
445
+ "--max-iterations",
446
+ type=int,
447
+ default=20,
448
+ help="Maximum tool call iterations per question (default: 20)",
449
+ )
450
+ parser.add_argument(
451
+ "--verbose",
452
+ action="store_true",
453
+ help="Print verbose output",
454
+ )
455
+ parser.add_argument(
456
+ "--stream",
457
+ action="store_true",
458
+ help="Stream tokens from the LLM as they are generated",
459
+ )
460
+
461
+ args = parser.parse_args()
462
+
463
+ api_key = args.api_key or os.getenv("GOOGLE_API_KEY")
464
+ if not api_key:
465
+ print_colored("Error: No Google API key provided.", "red")
466
+ print_colored("Set GOOGLE_API_KEY in .env file or use --api-key", "dim")
467
+ sys.exit(1)
468
+
469
+ client = genai.Client(api_key=api_key)
470
+
471
+ print_colored(f"Connecting to CRM environment at {args.base_url}...", "dim")
472
+ try:
473
+ env = CrmAgentEnv(base_url=args.base_url)
474
+ except Exception as e:
475
+ print_colored(f"Error connecting to environment: {e}", "red")
476
+ sys.exit(1)
477
+
478
+ gemini_tools = convert_openai_tools_to_gemini(env.tools)
479
+
480
+ system_prompt = """You are a helpful assistant with access to CRM tools.
481
+
482
+ Use the provided tools to help the user with their requests. You can make multiple tool calls to complete a task.
483
+
484
+ When you have gathered enough information or completed the task, call submit_answer with your response to the user."""
485
+
486
+ try:
487
+ with env:
488
+ run_chat(
489
+ env=env,
490
+ client=client,
491
+ model_name=args.model,
492
+ tools=gemini_tools,
493
+ system_instruction=system_prompt,
494
+ initial_question=args.question,
495
+ max_iterations=args.max_iterations,
496
+ verbose=args.verbose,
497
+ stream=args.stream,
498
+ )
499
+ except KeyboardInterrupt:
500
+ print_colored("\n\nSession interrupted.", "yellow")
501
+
502
+
503
+ if __name__ == "__main__":
504
+ main()
@@ -486,14 +486,35 @@ LIST_NOTES: ToolDefinition = {
486
486
  "type": "function",
487
487
  "function": {
488
488
  "name": "list_notes",
489
- "description": "List all notes in the CRM. Notes are attached to companies, people, or opportunities and contain meeting summaries, call logs, and updates.",
489
+ "description": "List all notes in the CRM. Notes are attached to companies, people, or opportunities and contain meeting summaries, call logs, and updates. Response includes pageInfo with hasNextPage, startCursor, and endCursor for pagination.",
490
490
  "parameters": {
491
491
  "type": "object",
492
492
  "properties": {
493
493
  "limit": {
494
494
  "type": "integer",
495
- "default": 10,
496
- "description": "Maximum number of notes to return. Default is 10. Use higher values to see more history.",
495
+ "default": 60,
496
+ "description": "Maximum number of notes to return (1-200). Default is 60, max is 200.",
497
+ },
498
+ "starting_after": {
499
+ "type": "string",
500
+ "description": "Cursor for forward pagination - returns notes after this cursor. Use endCursor from previous response's pageInfo.",
501
+ },
502
+ "ending_before": {
503
+ "type": "string",
504
+ "description": "Cursor for backward pagination - returns notes before this cursor. Use startCursor from previous response's pageInfo.",
505
+ },
506
+ "order_by": {
507
+ "type": "string",
508
+ "description": "Sort order in format 'field[ASC|DESC]'. Examples: 'createdAt[DESC]', 'updatedAt[ASC]'.",
509
+ },
510
+ "filter": {
511
+ "type": "string",
512
+ "description": "Filter in format 'field[comparator]:value'. Comparators: eq, neq, gt, gte, lt, lte, in, is, like, ilike, startsWith, containsAny. Quote strings/dates, not numbers. Examples: 'body[ilike]:\"%meeting%\"', 'createdAt[gte]:\"2026-01-01\"'. Use dot notation for related entities: 'company.name[ilike]:\"%acme%\"', 'person.email[eq]:\"john@example.com\"'.",
513
+ },
514
+ "depth": {
515
+ "type": "integer",
516
+ "default": 1,
517
+ "description": "Relation depth: 0 returns only note fields, 1 includes related company/person/opportunity data. Default is 1.",
497
518
  },
498
519
  },
499
520
  "required": [],
@@ -536,14 +557,35 @@ LIST_TASKS: ToolDefinition = {
536
557
  "type": "function",
537
558
  "function": {
538
559
  "name": "list_tasks",
539
- "description": "List all tasks in the CRM. Tasks represent follow-ups, reminders, and action items that may be linked to companies, people, or opportunities.",
560
+ "description": "List all tasks in the CRM. Tasks represent follow-ups, reminders, and action items that may be linked to companies, people, or opportunities. Response includes pageInfo with hasNextPage, startCursor, and endCursor for pagination.",
540
561
  "parameters": {
541
562
  "type": "object",
542
563
  "properties": {
543
564
  "limit": {
544
565
  "type": "integer",
545
- "default": 10,
546
- "description": "Maximum number of tasks to return. Default is 10. Use higher values to see more tasks.",
566
+ "default": 60,
567
+ "description": "Maximum number of tasks to return (1-200). Default is 60, max is 200.",
568
+ },
569
+ "starting_after": {
570
+ "type": "string",
571
+ "description": "Cursor for forward pagination - returns tasks after this cursor. Use endCursor from previous response's pageInfo.",
572
+ },
573
+ "ending_before": {
574
+ "type": "string",
575
+ "description": "Cursor for backward pagination - returns tasks before this cursor. Use startCursor from previous response's pageInfo.",
576
+ },
577
+ "order_by": {
578
+ "type": "string",
579
+ "description": "Sort order in format 'field[ASC|DESC]'. Examples: 'dueAt[ASC]', 'createdAt[DESC]', 'status[ASC]'.",
580
+ },
581
+ "filter": {
582
+ "type": "string",
583
+ "description": "Filter in format 'field[comparator]:value'. Comparators: eq, neq, gt, gte, lt, lte, in, is, like, ilike, startsWith, containsAny. Quote strings/dates, not numbers. Examples: 'status[eq]:\"TODO\"', 'dueAt[lte]:\"2026-02-15\"', 'title[ilike]:\"%follow up%\"'. Use dot notation for related entities.",
584
+ },
585
+ "depth": {
586
+ "type": "integer",
587
+ "default": 1,
588
+ "description": "Relation depth: 0 returns only task fields, 1 includes related company/person/opportunity data. Default is 1.",
547
589
  },
548
590
  },
549
591
  "required": [],