futurehouse-client 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,6 +14,10 @@ from futurehouse_client.models.rest import (
14
14
  StoreAgentStatePostRequest,
15
15
  StoreEnvironmentFrameRequest,
16
16
  )
17
+ from futurehouse_client.utils.monitoring import (
18
+ external_trace,
19
+ insert_distributed_trace_headers,
20
+ )
17
21
 
18
22
  logger = logging.getLogger(__name__)
19
23
 
@@ -90,14 +94,32 @@ class JobClient:
90
94
  data = FinalEnvironmentRequest(status=status)
91
95
  try:
92
96
  async with httpx.AsyncClient(timeout=self.REQUEST_TIMEOUT) as client:
93
- response = await client.patch(
94
- url=f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/environment-frame",
95
- json=data.model_dump(mode="json"),
96
- headers={
97
- "Authorization": f"Bearer {self.oauth_jwt}",
98
- "x-trajectory-id": self.trajectory_id,
97
+ url = f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/environment-frame"
98
+ headers = {
99
+ "Authorization": f"Bearer {self.oauth_jwt}",
100
+ "x-trajectory-id": self.trajectory_id,
101
+ }
102
+
103
+ with external_trace(
104
+ url=url,
105
+ method="PATCH",
106
+ library="httpx",
107
+ custom_params={
108
+ "trajectory_id": self.trajectory_id,
109
+ "agent": self.agent,
110
+ "environment": self.environment,
111
+ "status": status,
112
+ "operation": "finalize_environment",
99
113
  },
100
- )
114
+ ):
115
+ headers = insert_distributed_trace_headers(headers)
116
+
117
+ response = await client.patch(
118
+ url=url,
119
+ json=data.model_dump(mode="json"),
120
+ headers=headers,
121
+ )
122
+
101
123
  response.raise_for_status()
102
124
  logger.debug(f"Environment updated with status {status}")
103
125
  except httpx.HTTPStatusError:
@@ -145,14 +167,32 @@ class JobClient:
145
167
 
146
168
  try:
147
169
  async with httpx.AsyncClient(timeout=self.REQUEST_TIMEOUT) as client:
148
- response = await client.post(
149
- url=f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/agent-state",
150
- json=data.model_dump(mode="json"),
151
- headers={
152
- "Authorization": f"Bearer {self.oauth_jwt}",
153
- "x-trajectory-id": self.trajectory_id,
170
+ url = f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/agent-state"
171
+ headers = {
172
+ "Authorization": f"Bearer {self.oauth_jwt}",
173
+ "x-trajectory-id": self.trajectory_id,
174
+ }
175
+
176
+ with external_trace(
177
+ url=url,
178
+ method="POST",
179
+ library="httpx",
180
+ custom_params={
181
+ "trajectory_id": self.trajectory_id,
182
+ "agent": self.agent,
183
+ "environment": self.environment,
184
+ "step": step,
185
+ "timestep": self.current_timestep,
186
+ "operation": "store_agent_state",
154
187
  },
155
- )
188
+ ):
189
+ headers = insert_distributed_trace_headers(headers)
190
+
191
+ response = await client.post(
192
+ url=url,
193
+ json=data.model_dump(mode="json"),
194
+ headers=headers,
195
+ )
156
196
  response.raise_for_status()
157
197
  logger.info(f"Successfully stored agent state for step {step}")
158
198
  return response.json()
@@ -198,14 +238,34 @@ class JobClient:
198
238
 
199
239
  try:
200
240
  async with httpx.AsyncClient(timeout=self.REQUEST_TIMEOUT) as client:
201
- response = await client.post(
202
- url=f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/environment-frame",
203
- json=data.model_dump(mode="json"),
204
- headers={
205
- "Authorization": f"Bearer {self.oauth_jwt}",
206
- "x-trajectory-id": self.trajectory_id,
207
- },
208
- )
241
+ url = f"{self.base_uri}/v0.1/trajectories/{self.trajectory_id}/environment-frame"
242
+ headers = {
243
+ "Authorization": f"Bearer {self.oauth_jwt}",
244
+ "x-trajectory-id": self.trajectory_id,
245
+ }
246
+
247
+ custom_params = {
248
+ "trajectory_id": self.trajectory_id,
249
+ "agent": self.agent,
250
+ "environment": self.environment,
251
+ "timestep": self.current_timestep,
252
+ "operation": "store_environment_frame",
253
+ }
254
+ if self.current_step:
255
+ custom_params["step"] = self.current_step
256
+ if state_identifier:
257
+ custom_params["state_identifier"] = state_identifier
258
+
259
+ with external_trace(
260
+ url=url, method="POST", library="httpx", custom_params=custom_params
261
+ ):
262
+ headers = insert_distributed_trace_headers(headers)
263
+
264
+ response = await client.post(
265
+ url=url,
266
+ json=data.model_dump(mode="json"),
267
+ headers=headers,
268
+ )
209
269
  response.raise_for_status()
210
270
  logger.debug(
211
271
  f"Successfully stored environment frame for state {state_identifier}",
@@ -15,9 +15,19 @@ from uuid import UUID
15
15
 
16
16
  import cloudpickle
17
17
  from aviary.functional import EnvironmentBuilder
18
- from httpx import Client, HTTPStatusError
18
+ from httpx import (
19
+ Client,
20
+ CloseError,
21
+ ConnectError,
22
+ ConnectTimeout,
23
+ HTTPStatusError,
24
+ NetworkError,
25
+ ReadError,
26
+ ReadTimeout,
27
+ RemoteProtocolError,
28
+ )
19
29
  from pydantic import BaseModel, ConfigDict, model_validator
20
- from requests.exceptions import Timeout
30
+ from requests.exceptions import RequestException, Timeout
21
31
  from tenacity import (
22
32
  retry,
23
33
  retry_if_exception_type,
@@ -37,11 +47,29 @@ from futurehouse_client.utils.module_utils import (
37
47
  OrganizationSelector,
38
48
  fetch_environment_function_docstring,
39
49
  )
50
+ from futurehouse_client.utils.monitoring import (
51
+ external_trace,
52
+ )
40
53
 
41
54
  logger = logging.getLogger(__name__)
42
55
 
43
56
  TaskRequest.model_rebuild()
44
57
 
58
+ retry_if_connection_error = retry_if_exception_type((
59
+ # From requests
60
+ Timeout,
61
+ ConnectionError,
62
+ RequestException,
63
+ # From httpx
64
+ ConnectError,
65
+ ConnectTimeout,
66
+ ReadTimeout,
67
+ ReadError,
68
+ NetworkError,
69
+ RemoteProtocolError,
70
+ CloseError,
71
+ ))
72
+
45
73
  FILE_UPLOAD_IGNORE_PARTS = {
46
74
  ".ruff_cache",
47
75
  "__pycache__",
@@ -342,7 +370,7 @@ class RestClient:
342
370
  @retry(
343
371
  stop=stop_after_attempt(MAX_RETRY_ATTEMPTS),
344
372
  wait=wait_exponential(multiplier=RETRY_MULTIPLIER, max=MAX_RETRY_WAIT),
345
- retry=retry_if_exception_type(Timeout),
373
+ retry=retry_if_connection_error,
346
374
  )
347
375
  def get_task(
348
376
  self, task_id: str | None = None, history: bool = False, verbose: bool = False
@@ -350,10 +378,22 @@ class RestClient:
350
378
  """Get details for a specific task."""
351
379
  try:
352
380
  task_id = task_id or self.trajectory_id
353
- response = self.client.get(
354
- f"/v0.1/trajectories/{task_id}",
355
- params={"history": history},
356
- )
381
+ url = f"/v0.1/trajectories/{task_id}"
382
+ full_url = f"{self.base_url}{url}"
383
+
384
+ with external_trace(
385
+ url=full_url,
386
+ method="GET",
387
+ library="httpx",
388
+ custom_params={
389
+ "operation": "get_job",
390
+ "job_id": task_id,
391
+ },
392
+ ):
393
+ response = self.client.get(
394
+ url,
395
+ params={"history": history},
396
+ )
357
397
  response.raise_for_status()
358
398
  verbose_response = TaskResponseVerbose(**response.json())
359
399
  if verbose:
@@ -372,7 +412,7 @@ class RestClient:
372
412
  @retry(
373
413
  stop=stop_after_attempt(MAX_RETRY_ATTEMPTS),
374
414
  wait=wait_exponential(multiplier=RETRY_MULTIPLIER, max=MAX_RETRY_WAIT),
375
- retry=retry_if_exception_type(Timeout),
415
+ retry=retry_if_connection_error,
376
416
  )
377
417
  def create_task(self, task_data: TaskRequest | dict[str, Any]):
378
418
  """Create a new futurehouse task."""
@@ -398,7 +438,7 @@ class RestClient:
398
438
  @retry(
399
439
  stop=stop_after_attempt(MAX_RETRY_ATTEMPTS),
400
440
  wait=wait_exponential(multiplier=RETRY_MULTIPLIER, max=MAX_RETRY_WAIT),
401
- retry=retry_if_exception_type(Timeout),
441
+ retry=retry_if_connection_error,
402
442
  )
403
443
  def get_build_status(self, build_id: UUID | None = None) -> dict[str, Any]:
404
444
  """Get the status of a build."""
@@ -411,7 +451,7 @@ class RestClient:
411
451
  @retry(
412
452
  stop=stop_after_attempt(MAX_RETRY_ATTEMPTS),
413
453
  wait=wait_exponential(multiplier=RETRY_MULTIPLIER, max=MAX_RETRY_WAIT),
414
- retry=retry_if_exception_type(Timeout),
454
+ retry=retry_if_connection_error,
415
455
  )
416
456
  def create_job(self, config: JobDeploymentConfig) -> dict[str, Any]: # noqa: PLR0915
417
457
  """Creates a futurehouse job deployment from the environment and environment files.
@@ -0,0 +1,246 @@
1
+ """Utilities for monitoring and observability integration.
2
+
3
+ This module provides utilities for integrating with monitoring and observability tools
4
+ like NewRelic. It handles availability checking and provides wrapper functions that
5
+ conditionally use monitoring tools only when they're available and properly initialized.
6
+
7
+ NOTE: NewRelic is an optional dependency. To use monitoring functionality, install
8
+ the package with the monitoring extras:
9
+ pip install futurehouse-client[monitoring]
10
+
11
+ Environment variables:
12
+ NEW_RELIC_ENVIRONMENT: The environment to use for NewRelic reporting (dev, staging, prod)
13
+ NEW_RELIC_CONFIG_FILE: Path to the NewRelic configuration file
14
+ NEW_RELIC_LICENSE_KEY: Your NewRelic license key
15
+ """
16
+
17
+ import contextlib
18
+ import json
19
+ import logging
20
+ import os
21
+ from contextvars import ContextVar
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Check if NewRelic initialization is enabled (default: False)
26
+ NEWRELIC_AUTO_INIT = (
27
+ os.environ.get("FUTUREHOUSE_NEWRELIC_AUTO_INIT", "false").lower() == "true"
28
+ )
29
+
30
+ # Check if NewRelic is installed
31
+ try:
32
+ import newrelic.agent
33
+
34
+ NEWRELIC_INSTALLED = True
35
+ except ImportError:
36
+ NEWRELIC_INSTALLED = False
37
+ logger.info("NewRelic package not installed")
38
+
39
+ # Context variable to track NewRelic initialization state
40
+ newrelic_initialized: ContextVar[bool] = ContextVar(
41
+ "newrelic_initialized", default=False
42
+ )
43
+
44
+
45
+ def ensure_newrelic() -> bool: # noqa: PLR0911
46
+ """Check if NewRelic is available and initialize it if auto-init is enabled.
47
+
48
+ This will use environment variables:
49
+ - FUTUREHOUSE_NEWRELIC_AUTO_INIT: Set to "true" to enable automatic initialization (default: "false")
50
+ - NEW_RELIC_CONFIG_FILE: Path to the NewRelic config file (required)
51
+ - NEW_RELIC_ENVIRONMENT: Environment (dev, staging, prod)
52
+
53
+ Returns:
54
+ bool: True if NewRelic is available for use, False otherwise
55
+ """
56
+ if newrelic_initialized.get():
57
+ return True
58
+
59
+ if not NEWRELIC_AUTO_INIT:
60
+ return False
61
+
62
+ if not NEWRELIC_INSTALLED:
63
+ logger.info("NewRelic package is not installed")
64
+ return False
65
+
66
+ nr_config = os.environ.get("NEW_RELIC_CONFIG_FILE")
67
+ if not nr_config:
68
+ logger.warning("NEW_RELIC_CONFIG_FILE environment variable must be set")
69
+ return False
70
+
71
+ try:
72
+ nr_env = os.environ.get("NEW_RELIC_ENVIRONMENT", "dev")
73
+ newrelic.agent.initialize(nr_config, environment=nr_env)
74
+
75
+ app = newrelic.agent.application()
76
+ if app is None:
77
+ logger.warning("NewRelic initialization failed: no application returned")
78
+ return False
79
+
80
+ newrelic_initialized.set(True)
81
+ logger.info(f"NewRelic initialized successfully for environment: {nr_env}")
82
+ except Exception as e:
83
+ logger.warning(f"NewRelic initialization failed: {e}")
84
+ return False
85
+
86
+ else:
87
+ return True
88
+
89
+
90
+ def insert_distributed_trace_headers(headers: dict[str, str]) -> dict[str, str]:
91
+ """Insert distributed trace headers if NewRelic is available.
92
+
93
+ Args:
94
+ headers: The headers dictionary to modify.
95
+
96
+ Returns:
97
+ The modified headers dictionary with NewRelic distributed trace headers if available,
98
+ otherwise the original headers.
99
+ """
100
+ if not ensure_newrelic():
101
+ return headers
102
+
103
+ try:
104
+ nr_headers: list[tuple[str, str]] = []
105
+ newrelic.agent.insert_distributed_trace_headers(nr_headers)
106
+ for header in nr_headers:
107
+ headers[header[0]] = header[1]
108
+ except Exception as e:
109
+ logger.info(f"Error inserting distributed trace headers: {e}")
110
+
111
+ return headers
112
+
113
+
114
+ @contextlib.contextmanager
115
+ def external_trace(
116
+ url: str,
117
+ method: str = "GET",
118
+ library: str = "httpx",
119
+ custom_params: dict | None = None,
120
+ ):
121
+ """Context manager for NewRelic external traces that works whether NewRelic is available or not.
122
+
123
+ Creates an ExternalTrace span in NewRelic for HTTP requests to external services. This provides detailed timing and proper distributed tracing between services.
124
+ "External" refers to HTTP requests made to services outside of your application (like third-party APIs or other microservices).
125
+
126
+ Args:
127
+ url: The URL being called.
128
+ method: The HTTP method (GET, POST, etc.).
129
+ library: The library being used for the HTTP call.
130
+ custom_params: Optional dictionary of custom parameters to add to the transaction.
131
+
132
+ Yields:
133
+ None: This is a context manager that doesn't yield a value.
134
+ """
135
+ if not ensure_newrelic():
136
+ yield
137
+ return
138
+
139
+ # Proceed with tracing
140
+ try:
141
+ with newrelic.agent.ExternalTrace(
142
+ library=library,
143
+ url=url,
144
+ method=method,
145
+ ):
146
+ txn = newrelic.agent.current_transaction()
147
+ if txn:
148
+ txn.add_custom_parameter("request_url", url)
149
+ txn.add_custom_parameter("request_method", method)
150
+
151
+ if custom_params:
152
+ for key, value in custom_params.items():
153
+ txn.add_custom_parameter(key, value)
154
+
155
+ yield
156
+ except Exception as e:
157
+ # If there's an exception in the transaction handling,
158
+ # log it but don't let it break the client
159
+ try:
160
+ txn = newrelic.agent.current_transaction()
161
+ if txn:
162
+ txn.add_custom_parameter("external_request_url", url)
163
+ txn.add_custom_parameter("external_request_method", method)
164
+ txn.add_custom_parameter("error_type", e.__class__.__name__)
165
+ txn.add_custom_parameter("error_message", str(e))
166
+ txn.record_exception(e)
167
+ except Exception as nr_error:
168
+ # If even the error handling fails, just log it
169
+ logger.info(f"Failed to record NewRelic error: {nr_error}")
170
+
171
+ # Always re-raise the original exception
172
+ raise
173
+
174
+
175
+ @contextlib.contextmanager
176
+ def monitored_transaction(
177
+ name: str, group: str = "Task", custom_params: dict | None = None
178
+ ):
179
+ """Context manager for NewRelic transactions that appear in distributed traces.
180
+
181
+ This uses WebTransaction for better visibility in distributed traces, even for
182
+ background job-type workloads.
183
+
184
+ Args:
185
+ name: Name of the transaction (e.g., 'run_crow_job')
186
+ group: Group for transaction categorization (default: 'Task')
187
+ custom_params: Optional dictionary of custom parameters to add to the transaction
188
+
189
+ Yields:
190
+ None: This is a context manager that doesn't yield a value.
191
+ """
192
+ if not ensure_newrelic():
193
+ logger.info("NewRelic not available, skipping transaction")
194
+ yield
195
+ return
196
+
197
+ try:
198
+ app = newrelic.agent.application()
199
+ if app is None:
200
+ logger.warning("No NewRelic application found, skipping transaction")
201
+ yield
202
+ return
203
+
204
+ parsed_headers = None
205
+ trace_context = os.environ.get("NEW_RELIC_DISTRIBUTED_TRACING_CONTEXT")
206
+ if trace_context:
207
+ try:
208
+ parsed_headers = json.loads(trace_context)
209
+ except Exception as e:
210
+ logger.warning(f"Failed to parse distributed trace context: {e}")
211
+ else:
212
+ logger.info("No distributed trace context found")
213
+
214
+ with newrelic.agent.WebTransaction(app, name, group=group):
215
+ if parsed_headers:
216
+ current_txn = newrelic.agent.current_transaction()
217
+ if current_txn:
218
+ accepted = newrelic.agent.accept_distributed_trace_headers(
219
+ parsed_headers
220
+ )
221
+ if not accepted:
222
+ logger.warning("Failed to accept distributed trace headers")
223
+
224
+ if custom_params:
225
+ txn = newrelic.agent.current_transaction()
226
+ if txn:
227
+ for key, value in custom_params.items():
228
+ txn.add_custom_parameter(key, value)
229
+
230
+ yield
231
+ logger.info(f"Completed NewRelic transaction: {name}")
232
+ except Exception as e:
233
+ # If there's an exception in the transaction handling,
234
+ # log it but don't let it break the client
235
+ try:
236
+ txn = newrelic.agent.current_transaction()
237
+ if txn:
238
+ txn.add_custom_parameter("error_type", e.__class__.__name__)
239
+ txn.add_custom_parameter("error_message", str(e))
240
+ txn.record_exception(e)
241
+ except Exception as nr_error:
242
+ # If even the error handling fails, just log it
243
+ logger.info(f"Failed to record NewRelic error: {nr_error}")
244
+
245
+ logger.warning(f"Error in NewRelic transaction: {e}")
246
+ yield
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: futurehouse-client
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: A client for interacting with endpoints of the FutureHouse service.
5
5
  Author-email: FutureHouse technical staff <hello@futurehouse.org>
6
6
  Classifier: Operating System :: OS Independent
@@ -35,104 +35,81 @@ Requires-Dist: pytest-timeout; extra == "dev"
35
35
  Requires-Dist: pytest-xdist; extra == "dev"
36
36
  Requires-Dist: ruff; extra == "dev"
37
37
  Requires-Dist: setuptools_scm; extra == "dev"
38
+ Provides-Extra: monitoring
39
+ Requires-Dist: newrelic>=8.8.0; extra == "monitoring"
38
40
 
39
41
  # FutureHouse Platform API Documentation
40
42
 
41
- Documentation and tutorials for crow-client, a client for interacting with endpoints of the FutureHouse crow service.
42
-
43
- > FutureHouse's mascot is the crow. Therefore, some objects are named after the crow as a homage.
43
+ Documentation and tutorials for futurehouse-client, a client for interacting with endpoints of the FutureHouse platform.
44
44
 
45
45
  <!--TOC-->
46
46
 
47
47
  - [Installation](#installation)
48
48
  - [Quickstart](#quickstart)
49
49
  - [Functionalities](#functionalities)
50
- - [Stages](#stages)
51
50
  - [Authentication](#authentication)
52
- - [Job submission](#job-submission)
53
- - [Job Continuation](#job-continuation)
54
- - [Job retrieval](#job-retrieval)
51
+ - [Task submission](#task-submission)
52
+ - [Task Continuation](#task-continuation)
53
+ - [Task retrieval](#task-retrieval)
55
54
 
56
55
  <!--TOC-->
57
56
 
58
57
  ## Installation
59
58
 
60
59
  ```bash
61
- uv pip install crow-client
60
+ uv pip install futurehouse-client
62
61
  ```
63
62
 
64
63
  ## Quickstart
65
64
 
66
65
  ```python
67
- from crow_client import CrowClient, JobNames
66
+ from futurehouse_client import FutureHouseClient, JobNames
68
67
  from pathlib import Path
69
68
  from aviary.core import DummyEnv
70
69
  import ldp
71
70
 
72
- client = CrowClient(
73
- stage=Stage.PROD,
74
- auth_type=AuthType.API_KEY,
71
+ client = FutureHouseClient(
75
72
  api_key="your_api_key",
76
73
  )
77
74
 
78
- job_data = {
75
+ task_data = {
79
76
  "name": JobNames.CROW,
80
- "query": "Has anyone tested therapeutic exerkines in humans or NHPs?",
77
+ "query": "Which neglected diseases had a treatment developed by artificial intelligence?",
81
78
  }
82
79
 
83
- job_run_id = client.create_job(job_data)
80
+ task_run_id = client.create_task(task_data)
84
81
 
85
- job_status = client.get_job(job_run_id)
82
+ task_status = client.get_task(task_run_id)
86
83
  ```
87
84
 
88
- A quickstart example can be found in the [crow_client_notebook.ipynb](./docs/crow_client_notebook.ipynb) file, where we show how to submit and retrieve a job task, pass runtime configuration to the agent, and ask follow-up questions to the previous job.
85
+ A quickstart example can be found in the [client_notebook.ipynb](https://github.com/Future-House/futurehouse-client-docs/blob/main/docs/client_notebook.ipynb) file, where we show how to submit and retrieve a job task, pass runtime configuration to the agent, and ask follow-up questions to the previous job.
89
86
 
90
87
  ## Functionalities
91
88
 
92
- Crow-client implements a RestClient (called `CrowClient`) with the following functionalities:
89
+ FutureHouse client implements a RestClient (called `FutureHouseClient`) with the following functionalities:
93
90
 
94
- - [Authentication](#authtype): `auth_client`
95
- - [Job submission](#job-submission): `create_job(JobRequest)`
96
- - [Job status](#job-status): `get_job(job_id)`
97
-
98
- To create a `CrowClient`, you need to pass the following parameters:
99
- | Parameter | Type | Default | Description |
100
- | --- | --- | --- | --- |
101
- | stage | Stage | Stage.DEV | Where the job will be submitted? |
102
- | organization | str \| None | None | Which organization to use? |
103
- | auth_type | AuthType | AuthType.API_KEY | Which authentication method to use? |
104
- | api_key | str \| None | None | The API key to use for authentication, if using auth_type=AuthType.API_KEY. |
91
+ - [Task submission](#task-submission): `create_task(TaskRequest)`
92
+ - [Task status](#task-status): `get_task(task_id)`
105
93
 
106
- To instantiate a Client, we can use the following code:
94
+ To create a `FutureHouseClient`, you need to pass an FutureHouse platform api key (see [Authentication](#authentication)):
107
95
 
108
96
  ```python
109
- from crow_client import CrowClient
110
- from crow_client.models import Stage, AuthType
97
+ from futurehouse_client import FutureHouseClient
111
98
 
112
- client = CrowClient(
113
- stage=Stage.PROD,
114
- organization="your_organization",
115
- auth_type=AuthType.API_KEY,
99
+ client = FutureHouseClient(
116
100
  api_key="your_api_key",
117
101
  )
118
102
  ```
119
103
 
120
- ### Stages
121
-
122
- The stage is where your job will be submitted. This parameter can be one of the following:
123
- | Name | Description |
124
- | --- | --- |
125
- | Stage.DEV | Development environment at https://dev.api.platform.futurehouse.org |
126
- | Stage.PROD | Production environment at https://api.platform.futurehouse.org |
127
-
128
104
  ## Authentication
129
105
 
130
- In order to use the `CrowClient`, you need to authenticate yourself. Authentication is done by providing an API key, which can be obtained directly from your [profile page in the FutureHouse platform](https://platform.futurehouse.org/profile).
106
+ In order to use the `FutureHouseClient`, you need to authenticate yourself. Authentication is done by providing an API key, which can be obtained directly from your [profile page in the FutureHouse platform](https://platform.futurehouse.org/profile).
131
107
 
132
- ## Job submission
108
+ ## Task submission
133
109
 
134
- `CrowClient` can be used to submit jobs to the FutureHouse platform. Using a `CrowClient` instance, you can submit jobs to the platform by calling the `create_job` method, which receives a `JobRequest` (or a dictionary with `kwargs`) and returns the job id.
135
- Aiming to make the submission of jobs as simple as possible, we have created a `JobNames` enum that contains the available job types.
110
+ In the futurehouse platform, we define the deployed combination of an agent and an environment as a `job`. To invoke a job, we need to submit a `task` (also called a `query`) to it.
111
+ `FutureHouseClient` can be used to submit tasks/queries to available jobs in the FutureHouse platform. Using a `FutureHouseClient` instance, you can submit tasks to the platform by calling the `create_task` method, which receives a `TaskRequest` (or a dictionary with `kwargs`) and returns the task id.
112
+ Aiming to make the submission of tasks as simple as possible, we have created a `JobNames` `enum` that contains the available task types.
136
113
 
137
114
  The available supported jobs are:
138
115
  | Alias | Job Name | Task type | Description |
@@ -143,27 +120,24 @@ The available supported jobs are:
143
120
  | `JobNames.DUMMY` | `job-futurehouse-dummy` | Dummy Task | This is a dummy task. Mainly for testing purposes. |
144
121
 
145
122
  Using `JobNames`, the client automatically adapts the job name to the current stage.
146
- The job submission looks like this:
123
+ The task submission looks like this:
147
124
 
148
125
  ```python
149
- from crow_client import CrowClient, JobNames
150
- from crow_client.models import AuthType, Stage
126
+ from futurehouse_client import FutureHouseClient, JobNames
151
127
 
152
- client = CrowClient(
153
- stage=Stage.PROD,
154
- auth_type=AuthType.API_KEY,
128
+ client = FutureHouseClient(
155
129
  api_key="your_api_key",
156
130
  )
157
131
 
158
- job_data = {
159
- "name": JobNames.CROW,
132
+ task_data = {
133
+ "name": JobNames.OWL,
160
134
  "query": "Has anyone tested therapeutic exerkines in humans or NHPs?",
161
135
  }
162
136
 
163
- job_id = client.create_job(job_data)
137
+ task_id = client.create_task(task_data)
164
138
  ```
165
139
 
166
- `JobRequest` has the following fields:
140
+ `TaskRequest` has the following fields:
167
141
 
168
142
  | Field | Type | Description |
169
143
  | -------------- | ------------- | ------------------------------------------------------------------------------------------------------------------- |
@@ -175,51 +149,46 @@ job_id = client.create_job(job_data)
175
149
  `runtime_config` can receive a `AgentConfig` object with the desired kwargs. Check the available `AgentConfig` fields in the [LDP documentation](https://github.com/Future-House/ldp/blob/main/src/ldp/agent/agent.py#L87). Besides the `AgentConfig` object, we can also pass `timeout` and `max_steps` to limit the execution time and the number of steps the agent can take.
176
150
  Other especialised configurations are also available but are outside the scope of this documentation.
177
151
 
178
- ## Job Continuation
152
+ ## Task Continuation
179
153
 
180
- Once a job is submitted and the answer is returned, FutureHouse platform allow you to ask follow-up questions to the previous job.
154
+ Once a task is submitted and the answer is returned, FutureHouse platform allow you to ask follow-up questions to the previous task.
181
155
  It is also possible through the platform API.
182
- To accomplish that, we can use the `runtime_config` we discussed in the [Job submission](#job-submission) section.
156
+ To accomplish that, we can use the `runtime_config` we discussed in the [Task submission](#task-submission) section.
183
157
 
184
158
  ```python
185
- from crow_client import CrowClient, JobNames
186
- from crow_client.models import AuthType, Stage
159
+ from futurehouse_client import FutureHouseClient, JobNames
187
160
 
188
- client = CrowClient(
189
- stage=Stage.PROD,
190
- auth_type=AuthType.API_KEY,
161
+ client = FutureHouseClient(
191
162
  api_key="your_api_key",
192
163
  )
193
164
 
194
- job_data = {"name": JobNames.CROW, "query": "How many species of birds are there?"}
165
+ task_data = {"name": JobNames.CROW, "query": "How many species of birds are there?"}
195
166
 
196
- job_id = client.create_job(job_data)
167
+ task_id = client.create_task(task_data)
197
168
 
198
- continued_job_data = {
169
+ continued_task_data = {
199
170
  "name": JobNames.CROW,
200
171
  "query": "From the previous answer, specifically,how many species of crows are there?",
201
- "runtime_config": {"continued_job_id": job_id},
172
+ "runtime_config": {"continued_task_id": task_id},
202
173
  }
203
174
 
204
- continued_job_id = client.create_job(continued_job_data)
175
+ continued_task_id = client.create_task(continued_task_data)
205
176
  ```
206
177
 
207
- ## Job retrieval
178
+ ## Task retrieval
208
179
 
209
- Once a job is submitted, you can retrieve it by calling the `get_job` method, which receives a job id and returns a `JobResponse` object.
180
+ Once a task is submitted, you can retrieve it by calling the `get_task` method, which receives a task id and returns a `TaskResponse` object.
210
181
 
211
182
  ```python
212
- from crow_client import CrowClient
213
- from crow_client.models import AuthType
183
+ from futurehouse_client import FutureHouseClient
214
184
 
215
- client = CrowClient(
216
- auth_type=AuthType.API_KEY,
185
+ client = FutureHouseClient(
217
186
  api_key="your_api_key",
218
187
  )
219
188
 
220
- job_id = "job_id"
189
+ task_id = "task_id"
221
190
 
222
- job_status = client.get_job(job_id)
191
+ task_status = client.get_task(task_id)
223
192
  ```
224
193
 
225
- `job_status` contains information about the job. For instance, its `status`, `task`, `environment_name` and `agent_name`, and other fields specific to the job.
194
+ `task_status` contains information about the task. For instance, its `status`, `task`, `environment_name` and `agent_name`, and other fields specific to the job.
@@ -1,14 +1,15 @@
1
1
  futurehouse_client/__init__.py,sha256=ddxO7JE97c6bt7LjNglZZ2Ql8bYCGI9laSFeh9MP6VU,344
2
2
  futurehouse_client/clients/__init__.py,sha256=tFWqwIAY5PvwfOVsCje4imjTpf6xXNRMh_UHIKVI1_0,320
3
- futurehouse_client/clients/job_client.py,sha256=RNgdSJVI1vjQSypdfswHX0Gvv_XnKG4bZjklf3WdSuk,8828
4
- futurehouse_client/clients/rest_client.py,sha256=GtHDTGHdz0f6xzBWaufqh3f76Life66df6abrJrSKvA,25116
3
+ futurehouse_client/clients/job_client.py,sha256=yBFKDNcFnuZDNgoK2d5037rbuzQ7TlSK6MmklEKV8EA,11056
4
+ futurehouse_client/clients/rest_client.py,sha256=ZCGhKCNzDtaLRPjhciKXdjbR_PYHKkBt7SSa8ansuaM,25961
5
5
  futurehouse_client/models/__init__.py,sha256=ta3jFLM_LsDz1rKDmx8rja8sT7WtSKoFvMgLF0yFpvA,342
6
6
  futurehouse_client/models/app.py,sha256=G8_-I3aQnRIyxFJT3snTSWsPcZZ2nEvYkRaE5sVdeys,22299
7
7
  futurehouse_client/models/client.py,sha256=n4HD0KStKLm6Ek9nL9ylP-bkK10yzAaD1uIDF83Qp_A,1828
8
8
  futurehouse_client/models/rest.py,sha256=W-wNFTN7HALYFFphw-RQYRMm6_TSa1cl4T-mZ1msk90,393
9
9
  futurehouse_client/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  futurehouse_client/utils/module_utils.py,sha256=aFyd-X-pDARXz9GWpn8SSViUVYdSbuy9vSkrzcVIaGI,4955
11
- futurehouse_client-0.0.4.dist-info/METADATA,sha256=mb918ca4LdaKsctNjrzswLkfq-NAfD0s6CmjWqyvo3I,8898
12
- futurehouse_client-0.0.4.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
13
- futurehouse_client-0.0.4.dist-info/top_level.txt,sha256=TRuLUCt_qBnggdFHCX4O_BoCu1j2X43lKfIZC-ElwWY,19
14
- futurehouse_client-0.0.4.dist-info/RECORD,,
11
+ futurehouse_client/utils/monitoring.py,sha256=UjRlufe67kI3VxRHOd5fLtJmlCbVA2Wqwpd4uZhXkQM,8728
12
+ futurehouse_client-0.0.6.dist-info/METADATA,sha256=K0-xlHNzmzmBOpCRJIXu-izmorzx4rik_Zx5giLjfoY,8137
13
+ futurehouse_client-0.0.6.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
14
+ futurehouse_client-0.0.6.dist-info/top_level.txt,sha256=TRuLUCt_qBnggdFHCX4O_BoCu1j2X43lKfIZC-ElwWY,19
15
+ futurehouse_client-0.0.6.dist-info/RECORD,,