orchestrator-core 4.6.0rc1__py3-none-any.whl → 4.6.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
orchestrator/__init__.py CHANGED
@@ -13,7 +13,7 @@
13
13
 
14
14
  """This is the orchestrator workflow engine."""
15
15
 
16
- __version__ = "4.6.0rc1"
16
+ __version__ = "4.6.0rc3"
17
17
 
18
18
 
19
19
  from structlog import get_logger
@@ -26,7 +26,7 @@ from orchestrator.cli.main import app as cli_app
26
26
  from orchestrator.llm_settings import LLMSettings, llm_settings
27
27
 
28
28
  if TYPE_CHECKING:
29
- from pydantic_ai.models.openai import OpenAIModel
29
+ from pydantic_ai.models.openai import OpenAIChatModel
30
30
 
31
31
  logger = get_logger(__name__)
32
32
 
@@ -36,7 +36,7 @@ class LLMOrchestratorCore(OrchestratorCore):
36
36
  self,
37
37
  *args: Any,
38
38
  llm_settings: LLMSettings = llm_settings,
39
- agent_model: "OpenAIModel | str | None" = None,
39
+ agent_model: "OpenAIChatModel | str | None" = None,
40
40
  **kwargs: Any,
41
41
  ) -> None:
42
42
  """Initialize the `LLMOrchestratorCore` class.
@@ -20,7 +20,6 @@ from pydantic_ai.agent import Agent
20
20
  from starlette.responses import Response
21
21
  from structlog import get_logger
22
22
 
23
- from orchestrator.llm_settings import llm_settings
24
23
  from orchestrator.search.agent import build_agent_instance
25
24
  from orchestrator.search.agent.state import SearchState
26
25
 
@@ -29,12 +28,12 @@ logger = get_logger(__name__)
29
28
 
30
29
 
31
30
  @cache
32
- def get_agent() -> Agent[StateDeps[SearchState], str]:
31
+ def get_agent(request: Request) -> Agent[StateDeps[SearchState], str]:
33
32
  """Dependency to provide the agent instance.
34
33
 
35
34
  The agent is built once and cached for the lifetime of the application.
36
35
  """
37
- return build_agent_instance(llm_settings.AGENT_MODEL)
36
+ return build_agent_instance(request.app.agent_model)
38
37
 
39
38
 
40
39
  @router.post("/")
@@ -12,26 +12,23 @@
12
12
  # limitations under the License.
13
13
 
14
14
 
15
- import logging
16
15
  import time
17
16
 
18
17
  import typer
19
18
 
20
19
  from orchestrator.schedules.scheduler import (
21
- get_paused_scheduler,
20
+ get_all_scheduler_tasks,
21
+ get_scheduler,
22
+ get_scheduler_task,
22
23
  )
23
24
 
24
- log = logging.getLogger(__name__)
25
-
26
25
  app: typer.Typer = typer.Typer()
27
26
 
28
27
 
29
28
  @app.command()
30
29
  def run() -> None:
31
30
  """Start scheduler and loop eternally to keep thread alive."""
32
- with get_paused_scheduler() as scheduler:
33
- scheduler.resume()
34
-
31
+ with get_scheduler():
35
32
  while True:
36
33
  time.sleep(1)
37
34
 
@@ -42,27 +39,23 @@ def show_schedule() -> None:
42
39
 
43
40
  in cli underscore is replaced by a dash `show-schedule`
44
41
  """
45
- with get_paused_scheduler() as scheduler:
46
- jobs = scheduler.get_jobs()
47
-
48
- for job in jobs:
49
- typer.echo(f"[{job.id}] Next run: {job.next_run_time} | Trigger: {job.trigger}")
42
+ for task in get_all_scheduler_tasks():
43
+ typer.echo(f"[{task.id}] Next run: {task.next_run_time} | Trigger: {task.trigger}")
50
44
 
51
45
 
52
46
  @app.command()
53
- def force(job_id: str) -> None:
54
- """Force the execution of (a) scheduler(s) based on a job_id."""
55
- with get_paused_scheduler() as scheduler:
56
- job = scheduler.get_job(job_id)
47
+ def force(task_id: str) -> None:
48
+ """Force the execution of (a) scheduler(s) based on a task_id."""
49
+ task = get_scheduler_task(task_id)
57
50
 
58
- if not job:
59
- typer.echo(f"Job '{job_id}' not found.")
51
+ if not task:
52
+ typer.echo(f"Task '{task_id}' not found.")
60
53
  raise typer.Exit(code=1)
61
54
 
62
- typer.echo(f"Running job [{job.id}] now...")
55
+ typer.echo(f"Running Task [{task.id}] now...")
63
56
  try:
64
- job.func(*job.args or (), **job.kwargs or {})
65
- typer.echo("Job executed successfully.")
57
+ task.func(*task.args or (), **task.kwargs or {})
58
+ typer.echo("Task executed successfully.")
66
59
  except Exception as e:
67
- typer.echo(f"Job execution failed: {e}")
60
+ typer.echo(f"Task execution failed: {e}")
68
61
  raise typer.Exit(code=1)
@@ -10,14 +10,31 @@
10
10
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
+ from typing import Annotated
14
+
13
15
  from pydantic import Field, field_validator
14
16
  from pydantic_settings import BaseSettings
15
17
  from structlog import get_logger
16
18
 
17
19
  logger = get_logger(__name__)
18
20
 
21
+ EMBEDDING_DIMENSION_MIN = 200
22
+ EMBEDDING_DIMENSION_MAX = 2000
23
+ EMBEDDING_DIMENSION_DEFAULT = 1536
24
+
25
+ EMBEDDING_DIMENSION_FIELD = Annotated[
26
+ int,
27
+ Field(
28
+ ge=EMBEDDING_DIMENSION_MIN,
29
+ le=EMBEDDING_DIMENSION_MAX,
30
+ default=EMBEDDING_DIMENSION_DEFAULT,
31
+ description="Embedding dimension: when embeddings are generated at a higher resolution than this setting, the least significant numbers will be truncated",
32
+ ),
33
+ ]
34
+
19
35
 
20
36
  class LLMSettings(BaseSettings):
37
+
21
38
  # Feature flags for LLM functionality
22
39
  SEARCH_ENABLED: bool = False # Enable search/indexing with embeddings
23
40
  AGENT_ENABLED: bool = False # Enable agentic functionality
@@ -27,7 +44,7 @@ class LLMSettings(BaseSettings):
27
44
  AGENT_MODEL_VERSION: str = "2025-01-01-preview"
28
45
  OPENAI_API_KEY: str = "" # Change per provider (Azure, etc).
29
46
  # Embedding settings
30
- EMBEDDING_DIMENSION: int = 1536
47
+ EMBEDDING_DIMENSION: EMBEDDING_DIMENSION_FIELD = 1536
31
48
  EMBEDDING_MODEL: str = "openai/text-embedding-3-small" # See litellm docs for supported models.
32
49
  EMBEDDING_SAFE_MARGIN_PERCENT: float = Field(
33
50
  0.1, description="Safety margin as a percentage (e.g., 0.1 for 10%) for token budgeting.", ge=0, le=1
@@ -17,16 +17,16 @@ from datetime import datetime
17
17
  from typing import Any, Generator
18
18
 
19
19
  from apscheduler.executors.pool import ThreadPoolExecutor
20
- from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
20
+ from apscheduler.jobstores.sqlalchemy import Job, SQLAlchemyJobStore
21
21
  from apscheduler.schedulers.background import BackgroundScheduler
22
22
  from more_itertools import partition
23
23
  from pydantic import BaseModel
24
24
 
25
+ from orchestrator.db import db
25
26
  from orchestrator.db.filters import Filter
26
27
  from orchestrator.db.filters.filters import CallableErrorHandler
27
28
  from orchestrator.db.sorting import Sort
28
29
  from orchestrator.db.sorting.sorting import SortOrder
29
- from orchestrator.settings import app_settings
30
30
  from orchestrator.utils.helpers import camel_to_snake, to_camel
31
31
 
32
32
  executors = {
@@ -40,18 +40,37 @@ scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults)
40
40
 
41
41
 
42
42
  @contextmanager
43
- def get_paused_scheduler() -> Generator[BackgroundScheduler, Any, None]:
43
+ def get_scheduler_store() -> Generator[SQLAlchemyJobStore, Any, None]:
44
+ store = SQLAlchemyJobStore(engine=db.engine)
44
45
  try:
45
- scheduler.add_jobstore(SQLAlchemyJobStore(url=str(app_settings.DATABASE_URI)))
46
- except ValueError:
47
- pass
48
- scheduler.start(paused=True)
49
-
50
- try:
51
- yield scheduler
46
+ yield store
52
47
  finally:
53
- scheduler.shutdown()
54
- scheduler._jobstores["default"].engine.dispose()
48
+ store.shutdown()
49
+
50
+
51
+ def get_all_scheduler_tasks() -> list[Job]:
52
+ with get_scheduler_store() as scheduler_store:
53
+ return scheduler_store.get_all_jobs()
54
+
55
+
56
+ def get_scheduler_task(job_id: str) -> Job | None:
57
+ with get_scheduler_store() as scheduler_store:
58
+ return scheduler_store.lookup_job(job_id)
59
+
60
+
61
+ @contextmanager
62
+ def get_scheduler(paused: bool = False) -> Generator[BackgroundScheduler, Any, None]:
63
+ with get_scheduler_store() as store:
64
+ try:
65
+ scheduler.add_jobstore(store)
66
+ except ValueError:
67
+ pass
68
+ scheduler.start(paused=paused)
69
+
70
+ try:
71
+ yield scheduler
72
+ finally:
73
+ scheduler.shutdown()
55
74
 
56
75
 
57
76
  class ScheduledTask(BaseModel):
@@ -149,9 +168,7 @@ def get_scheduler_tasks(
149
168
  sort_by: list[Sort] | None = None,
150
169
  error_handler: CallableErrorHandler = default_error_handler,
151
170
  ) -> tuple[list[ScheduledTask], int]:
152
- with get_paused_scheduler() as pauzed_scheduler:
153
- scheduled_tasks = pauzed_scheduler.get_jobs()
154
-
171
+ scheduled_tasks = get_all_scheduler_tasks()
155
172
  scheduled_tasks = filter_scheduled_tasks(scheduled_tasks, error_handler, filter_by)
156
173
  scheduled_tasks = sort_scheduled_tasks(scheduled_tasks, error_handler, sort_by)
157
174
 
@@ -16,7 +16,7 @@ from typing import Any
16
16
  import structlog
17
17
  from pydantic_ai.ag_ui import StateDeps
18
18
  from pydantic_ai.agent import Agent
19
- from pydantic_ai.models.openai import OpenAIModel
19
+ from pydantic_ai.models.openai import OpenAIChatModel
20
20
  from pydantic_ai.settings import ModelSettings
21
21
  from pydantic_ai.toolsets import FunctionToolset
22
22
 
@@ -28,12 +28,12 @@ logger = structlog.get_logger(__name__)
28
28
 
29
29
 
30
30
  def build_agent_instance(
31
- model: str | OpenAIModel, agent_tools: list[FunctionToolset[Any]] | None = None
31
+ model: str | OpenAIChatModel, agent_tools: list[FunctionToolset[Any]] | None = None
32
32
  ) -> Agent[StateDeps[SearchState], str]:
33
33
  """Build and configure the search agent instance.
34
34
 
35
35
  Args:
36
- model: The LLM model to use (string or OpenAIModel instance)
36
+ model: The LLM model to use (string or OpenAIChatModel instance)
37
37
  agent_tools: Optional list of additional toolsets to include
38
38
 
39
39
  Returns:
@@ -42,7 +42,7 @@ class EmbeddingIndexer:
42
42
  max_retries=llm_settings.LLM_MAX_RETRIES,
43
43
  )
44
44
  data = sorted(resp.data, key=lambda e: e["index"])
45
- return [row["embedding"] for row in data]
45
+ return [row["embedding"][: llm_settings.EMBEDDING_DIMENSION] for row in data]
46
46
  except (llm_exc.APIError, llm_exc.APIConnectionError, llm_exc.RateLimitError, llm_exc.Timeout) as e:
47
47
  logger.error("Embedding request failed", error=str(e))
48
48
  return [[] for _ in texts]
@@ -67,7 +67,7 @@ class QueryEmbedder:
67
67
  timeout=5.0,
68
68
  max_retries=0, # No retries, prioritize speed.
69
69
  )
70
- return resp.data[0]["embedding"]
70
+ return resp.data[0]["embedding"][: llm_settings.EMBEDDING_DIMENSION]
71
71
  except Exception as e:
72
72
  logger.error("Async embedding generation failed", error=str(e))
73
73
  return []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: orchestrator-core
3
- Version: 4.6.0rc1
3
+ Version: 4.6.0rc3
4
4
  Summary: This is the orchestrator workflow engine.
5
5
  Author-email: SURF <automation-beheer@surf.nl>
6
6
  Requires-Python: >=3.11,<3.14
@@ -1,8 +1,8 @@
1
- orchestrator/__init__.py,sha256=5_01EzLm3qIBb4g-j2hknYC1bKKR4yvTNcVx3hpF98Y,1450
2
- orchestrator/agentic_app.py,sha256=6C_-pbw4xLJah8--CPcopz6dym4V7AfX2DtAYIGljmk,3020
1
+ orchestrator/__init__.py,sha256=4xARck_KGcUMZ2sp8i379W7wwCUi1Fhs333T_FD2L0M,1450
2
+ orchestrator/agentic_app.py,sha256=ouiyyZiS4uS6Lox2DtbGGRnb2njJBMSHpSAGe-T5rX0,3028
3
3
  orchestrator/app.py,sha256=UPKQuDpg8MWNC6r3SRRbp6l9RBzwb00IMIaGRk-jbCU,13203
4
4
  orchestrator/exception_handlers.py,sha256=UsW3dw8q0QQlNLcV359bIotah8DYjMsj2Ts1LfX4ClY,1268
5
- orchestrator/llm_settings.py,sha256=UDehiEVXkRMfmPSfCTHQX8dtH2gLCGtZK_wQTU3yISg,2316
5
+ orchestrator/llm_settings.py,sha256=dU6fHcInyP3UzFlr3h4b-P7WyHdm04NM7ZDu1XnLAsQ,2816
6
6
  orchestrator/log_config.py,sha256=1cPl_OXT4tEUyNxG8cwIWXrmadUm1E81vq0mdtrV-v4,1912
7
7
  orchestrator/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  orchestrator/security.py,sha256=iXFxGxab54aav7oHEKLAVkTgrQMJGHy6IYLojEnD7gI,2422
@@ -18,7 +18,7 @@ orchestrator/api/models.py,sha256=z9BDBx7uI4KBHWbD_LVrLsqNQ0_w-Mg9Qiy7PR_rZhk,59
18
18
  orchestrator/api/api_v1/__init__.py,sha256=GyHNfEFCGKQwRiN6rQmvSRH2iYX7npjMZn97n8XzmLU,571
19
19
  orchestrator/api/api_v1/api.py,sha256=1qQRsIxKXLW3kcmSV5u3_v1TZk5RcNWb4ZOyLguhTKY,3488
20
20
  orchestrator/api/api_v1/endpoints/__init__.py,sha256=GyHNfEFCGKQwRiN6rQmvSRH2iYX7npjMZn97n8XzmLU,571
21
- orchestrator/api/api_v1/endpoints/agent.py,sha256=BmsW0UDn9Cr2CBJTGfqrvYAmgZj0VNHk3WiPFFSilEU,1768
21
+ orchestrator/api/api_v1/endpoints/agent.py,sha256=9_s3nchTr9ua_1Sxs0kJl2mH_20o-DlUaaQPmY4vRMk,1732
22
22
  orchestrator/api/api_v1/endpoints/health.py,sha256=iaxs1XX1_250_gKNsspuULCV2GEMBjbtjsmfQTOvMAI,1284
23
23
  orchestrator/api/api_v1/endpoints/processes.py,sha256=OVbt6FgFnJ4aHaYGIg0cPoim8mxDpdzJ4TGAyfB_kPw,16269
24
24
  orchestrator/api/api_v1/endpoints/product_blocks.py,sha256=kZ6ywIOsS_S2qGq7RvZ4KzjvaS1LmwbGWR37AKRvWOw,2146
@@ -40,7 +40,7 @@ orchestrator/cli/migrate_domain_models.py,sha256=WRXy_1OnziQwpsCFZXvjB30nDJtjj0i
40
40
  orchestrator/cli/migrate_tasks.py,sha256=bju8XColjSZD0v3rS4kl-24dLr8En_H4-6enBmqd494,7255
41
41
  orchestrator/cli/migrate_workflows.py,sha256=nxUpx0vgEIc_8aJrjAyrw3E9Dt8JmaamTts8oiQ4vHY,8923
42
42
  orchestrator/cli/migration_helpers.py,sha256=C5tpkP5WEBr7G9S-1k1hgSI8ili6xd9Z5ygc9notaK0,4110
43
- orchestrator/cli/scheduler.py,sha256=2q6xT_XVOodY3e_qzIV98MWNvKvrbFpOJajWesj1fcs,1911
43
+ orchestrator/cli/scheduler.py,sha256=4jWpgxx0j0UFoba4Kw0nOEM6slr5XffDYBkm6hzK_C0,1766
44
44
  orchestrator/cli/domain_gen_helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  orchestrator/cli/domain_gen_helpers/fixed_input_helpers.py,sha256=uzpwsaau81hHSxNMOS9-o7kF-9_78R0f_UE0AvWooZQ,6775
46
46
  orchestrator/cli/domain_gen_helpers/helpers.py,sha256=tIPxn8ezED_xYZxH7ZAtQLwkDc6RNmLZVxWAoJ3a9lw,4203
@@ -258,7 +258,7 @@ orchestrator/migrations/versions/schema/2025-07-04_4b58e336d1bf_deprecating_work
258
258
  orchestrator/migrations/versions/schema/2025-07-28_850dccac3b02_update_description_of_resume_workflows_.py,sha256=R6Qoga83DJ1IL0WYPu0u5u2ZvAmqGlDmUMv_KtJyOhQ,812
259
259
  orchestrator/schedules/__init__.py,sha256=Zy0fTOBMGIRFoh5iVFDLF9_PRAFaONYDThGK9EsysWo,981
260
260
  orchestrator/schedules/resume_workflows.py,sha256=jRnVRWDy687pQu-gtk80ecwiLSdrvtL15tG3U2zWA6I,891
261
- orchestrator/schedules/scheduler.py,sha256=nnuehZnBbtC90MsFP_Q6kqcD1ihsq08vr1ALJ6jHF_s,5833
261
+ orchestrator/schedules/scheduler.py,sha256=9d6n-J2_GB6crOoVSCK29IfaktfUyzQYcTZl7gRTZ5c,6250
262
262
  orchestrator/schedules/scheduling.py,sha256=_mbpHMhijey8Y56ebtJ4wVkrp_kPVRm8hoByzlQF4SE,2821
263
263
  orchestrator/schedules/task_vacuum.py,sha256=mxb7fsy1GphRwvUWi_lvwNaj51YAXUdIDlkOJd90AFI,874
264
264
  orchestrator/schedules/validate_products.py,sha256=_ucUG9HecskG2eN3tcDSiMzJK9gN3kZB1dXjrtxcApY,1324
@@ -280,13 +280,13 @@ orchestrator/search/__init__.py,sha256=2uhTQexKx-cdBP1retV3CYSNCs02s8WL3fhGvupRG
280
280
  orchestrator/search/export.py,sha256=_0ncVpTqN6AoQfW3WX0fWnDQX3hBz6ZGC31Beu4PVwQ,6678
281
281
  orchestrator/search/llm_migration.py,sha256=tJAfAoykMFIROQrKBKpAbDaGYDLKcmevKWjYrsBmuAY,6703
282
282
  orchestrator/search/agent/__init__.py,sha256=_O4DN0MSTUtr4olhyE0-2hsb7x3f_KURMCYjg8jV4QA,756
283
- orchestrator/search/agent/agent.py,sha256=EFhbv7tRTcdk2i9iFzuiGFjcu0N-j8yZprhbwc5oBeM,2063
283
+ orchestrator/search/agent/agent.py,sha256=iWa4_achqh5zRIfcJvjmY3hmDuFVGpV_PxjgttLdokU,2075
284
284
  orchestrator/search/agent/json_patch.py,sha256=_Z5ULhLyeuOuy-Gr_DJR4eA-wo9F78qySKUt5F_SQvQ,1892
285
285
  orchestrator/search/agent/prompts.py,sha256=6EPubiSLFyICIeinfVUF6miU1nS2QTAhqgzm-l5O3PI,5810
286
286
  orchestrator/search/agent/state.py,sha256=WhvZu7N0NhD1DD5mfZSUAzYN4mu8dDyvQ4Tz9I-hLtg,1364
287
287
  orchestrator/search/agent/tools.py,sha256=m4Krtb-Qmep-JkbJ9-RC7QqKa0CuQJM6-Z6_PN-b8HU,14706
288
288
  orchestrator/search/core/__init__.py,sha256=q5G0z3nKjIHKFs1PkEG3nvTUy3Wp4kCyBtCbqUITj3A,579
289
- orchestrator/search/core/embedding.py,sha256=ESeI5Vcobb__CRRZE_RP-m4eAz8JUP8S16aGLJh4uAY,2751
289
+ orchestrator/search/core/embedding.py,sha256=n16H5fZRlfn91wI8PfZPa1R39HwQd8T1nwlDOzcOUBU,2823
290
290
  orchestrator/search/core/exceptions.py,sha256=S_ZMEhrqsQBVqJ559FQ5J6tZU6BYLiU65AGWgSvgv_k,1159
291
291
  orchestrator/search/core/types.py,sha256=0U_m4ZmPwvL77hIx9yk7UyvkE8HoiRvEnGdY4mDLzCo,8853
292
292
  orchestrator/search/core/validators.py,sha256=zktY5A3RTBmfdARJoxoz9rnnyTZj7L30Kbmh9UTQz2o,1204
@@ -373,7 +373,7 @@ orchestrator/workflows/tasks/resume_workflows.py,sha256=T3iobSJjVgiupe0rClD34kUZ
373
373
  orchestrator/workflows/tasks/validate_product_type.py,sha256=lo2TX_MZOfcOmYFjLyD82FrJ5AAN3HOsE6BhDVFuy9Q,3210
374
374
  orchestrator/workflows/tasks/validate_products.py,sha256=GZJBoFF-WMphS7ghMs2-gqvV2iL1F0POhk0uSNt93n0,8510
375
375
  orchestrator/workflows/translations/en-GB.json,sha256=ST53HxkphFLTMjFHonykDBOZ7-P_KxksktZU3GbxLt0,846
376
- orchestrator_core-4.6.0rc1.dist-info/licenses/LICENSE,sha256=b-aA5OZQuuBATmLKo_mln8CQrDPPhg3ghLzjPjLn4Tg,11409
377
- orchestrator_core-4.6.0rc1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
378
- orchestrator_core-4.6.0rc1.dist-info/METADATA,sha256=C0vrYdm7AO0XDhb5lJ8JplynhdKsB-tu0tRWF9tyuB8,6253
379
- orchestrator_core-4.6.0rc1.dist-info/RECORD,,
376
+ orchestrator_core-4.6.0rc3.dist-info/licenses/LICENSE,sha256=b-aA5OZQuuBATmLKo_mln8CQrDPPhg3ghLzjPjLn4Tg,11409
377
+ orchestrator_core-4.6.0rc3.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
378
+ orchestrator_core-4.6.0rc3.dist-info/METADATA,sha256=yl83UYnpanmL7aawkE34ZHG0uBALcvgHh2MBHoY7Oq8,6253
379
+ orchestrator_core-4.6.0rc3.dist-info/RECORD,,