flock-core 0.4.0b4__py3-none-any.whl → 0.4.0b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

@@ -18,7 +18,12 @@ from fastapi.responses import HTMLResponse
18
18
  from flock.core.logging.logging import get_logger
19
19
 
20
20
  # Import models and UI utils
21
- from .models import FlockAPIRequest, FlockAPIResponse
21
+ from .models import (
22
+ FlockAPIRequest,
23
+ FlockAPIResponse,
24
+ FlockBatchRequest,
25
+ FlockBatchResponse,
26
+ )
22
27
 
23
28
  # Import UI utils - assuming they are now in ui/utils.py
24
29
 
@@ -98,6 +103,68 @@ def create_api_router(flock_api: "FlockAPI") -> APIRouter:
98
103
  run_store.update_run_status(run_id, "failed", error_msg)
99
104
  raise HTTPException(status_code=500, detail=error_msg)
100
105
 
106
+ @router.post("/run/batch", response_model=FlockBatchResponse, tags=["API"])
107
+ async def run_batch_json(
108
+ request: FlockBatchRequest, background_tasks: BackgroundTasks
109
+ ):
110
+ """Run a batch of inputs through the flock workflow (expects JSON)."""
111
+ batch_id = None
112
+ try:
113
+ # Validate agent exists
114
+ if request.agent_name not in flock_instance.agents:
115
+ raise ValueError(f"Agent '{request.agent_name}' not found")
116
+
117
+ # Validate batch inputs
118
+ if (
119
+ isinstance(request.batch_inputs, list)
120
+ and not request.batch_inputs
121
+ ):
122
+ raise ValueError("Batch inputs list cannot be empty")
123
+
124
+ batch_id = str(uuid.uuid4())
125
+ run_store.create_batch(batch_id) # Use RunStore
126
+ response = run_store.get_batch(
127
+ batch_id
128
+ ) # Get initial response from store
129
+
130
+ # Log batch size for monitoring
131
+ batch_size = (
132
+ len(request.batch_inputs)
133
+ if isinstance(request.batch_inputs, list)
134
+ else "CSV/DataFrame"
135
+ )
136
+ logger.info(
137
+ f"API request: run batch with '{request.agent_name}' (batch_id: {batch_id})",
138
+ batch_size=batch_size,
139
+ )
140
+
141
+ # Always run batch processing asynchronously
142
+ logger.debug(
143
+ f"Running batch with '{request.agent_name}' asynchronously (batch_id: {batch_id})"
144
+ )
145
+ # Call the helper method on the passed FlockAPI instance
146
+ background_tasks.add_task(
147
+ flock_api._run_batch,
148
+ batch_id,
149
+ request,
150
+ )
151
+ run_store.update_batch_status(batch_id, "running")
152
+ response.status = "running" # Update local response copy too
153
+
154
+ return response
155
+ except ValueError as ve:
156
+ error_msg = f"Value error starting batch: {ve}"
157
+ logger.error(error_msg)
158
+ if batch_id:
159
+ run_store.update_batch_status(batch_id, "failed", str(ve))
160
+ raise HTTPException(status_code=400, detail=str(ve))
161
+ except Exception as e:
162
+ error_msg = f"Internal server error: {type(e).__name__}: {e!s}"
163
+ logger.error(error_msg, exc_info=True)
164
+ if batch_id:
165
+ run_store.update_batch_status(batch_id, "failed", error_msg)
166
+ raise HTTPException(status_code=500, detail=error_msg)
167
+
101
168
  @router.get("/run/{run_id}", response_model=FlockAPIResponse, tags=["API"])
102
169
  async def get_run_status(run_id: str):
103
170
  """Get the status of a specific run."""
@@ -108,6 +175,39 @@ def create_api_router(flock_api: "FlockAPI") -> APIRouter:
108
175
  raise HTTPException(status_code=404, detail="Run not found")
109
176
  return run_data
110
177
 
178
+ @router.get(
179
+ "/batch/{batch_id}", response_model=FlockBatchResponse, tags=["API"]
180
+ )
181
+ async def get_batch_status(batch_id: str):
182
+ """Get the status of a specific batch run.
183
+
184
+ Returns details including:
185
+ - Total number of items in the batch
186
+ - Number of completed items
187
+ - Percentage of completion
188
+ - Any partial results available (for running batches)
189
+ - Complete results (for completed batches)
190
+ """
191
+ logger.debug(f"API request: get status for batch_id: {batch_id}")
192
+ batch_data = run_store.get_batch(batch_id)
193
+ if not batch_data:
194
+ logger.warning(f"Batch ID not found: {batch_id}")
195
+ raise HTTPException(status_code=404, detail="Batch not found")
196
+
197
+ # Add useful info for client display
198
+ extra_info = {
199
+ "status": batch_data.status,
200
+ "completed_items": batch_data.completed_items,
201
+ "total_items": batch_data.total_items,
202
+ "progress_percentage": round(batch_data.progress_percentage, 1),
203
+ "has_partial_results": len(batch_data.results) > 0
204
+ and batch_data.status == "running",
205
+ "has_error": batch_data.error is not None,
206
+ }
207
+ logger.debug(f"Returning batch status: {extra_info}")
208
+
209
+ return batch_data
210
+
111
211
  @router.get("/agents", tags=["API"])
112
212
  async def list_agents():
113
213
  """List all available agents."""
@@ -162,7 +262,7 @@ def create_api_router(flock_api: "FlockAPI") -> APIRouter:
162
262
 
163
263
  logger.debug(f"Parsed form inputs for UI run: {form_inputs}")
164
264
  run_id = str(uuid.uuid4())
165
- run_store.create_run(run_id) # Use RunStore
265
+ run_store.create_run(run_id)
166
266
  logger.debug(
167
267
  f"Running flock '{agent_name}' synchronously from UI (run_id: {run_id})"
168
268
  )
flock/core/api/main.py CHANGED
@@ -8,6 +8,7 @@ from fastapi import FastAPI
8
8
  from fastapi.responses import RedirectResponse
9
9
 
10
10
  # Flock core imports
11
+ from flock.core.api.models import FlockBatchRequest
11
12
  from flock.core.flock import Flock
12
13
  from flock.core.logging.logging import get_logger
13
14
 
@@ -114,6 +115,219 @@ class FlockAPI:
114
115
  self.run_store.update_run_status(run_id, "failed", str(e))
115
116
  raise # Re-raise for the endpoint handler
116
117
 
118
+ async def _run_batch(self, batch_id: str, request: "FlockBatchRequest"):
119
+ """Executes a batch of runs (internal helper)."""
120
+ try:
121
+ if request.agent_name not in self.flock.agents:
122
+ raise ValueError(f"Agent '{request.agent_name}' not found")
123
+
124
+ logger.debug(
125
+ f"Executing batch run starting with '{request.agent_name}' (batch_id: {batch_id})",
126
+ batch_size=len(request.batch_inputs)
127
+ if isinstance(request.batch_inputs, list)
128
+ else "CSV",
129
+ )
130
+
131
+ # Import the thread pool executor here to avoid circular imports
132
+ import asyncio
133
+ import threading
134
+ from concurrent.futures import ThreadPoolExecutor
135
+
136
+ # Define a synchronous function to run the batch processing
137
+ def run_batch_sync():
138
+ # Use a new event loop for the batch processing
139
+ loop = asyncio.new_event_loop()
140
+ asyncio.set_event_loop(loop)
141
+ try:
142
+ # Set the total number of batch items if possible
143
+ batch_size = (
144
+ len(request.batch_inputs)
145
+ if isinstance(request.batch_inputs, list)
146
+ else 0
147
+ )
148
+ if batch_size > 0:
149
+ # Directly call the store method - no need for asyncio here
150
+ # since we're already in a separate thread
151
+ self.run_store.set_batch_total_items(
152
+ batch_id, batch_size
153
+ )
154
+
155
+ # Custom progress tracking wrapper
156
+ class ProgressTracker:
157
+ def __init__(self, store, batch_id, total_size):
158
+ self.store = store
159
+ self.batch_id = batch_id
160
+ self.current_count = 0
161
+ self.total_size = total_size
162
+ self._lock = threading.Lock()
163
+ self.partial_results = []
164
+
165
+ def increment(self, result=None):
166
+ with self._lock:
167
+ self.current_count += 1
168
+ if result is not None:
169
+ # Store partial result
170
+ self.partial_results.append(result)
171
+
172
+ # Directly call the store method - no need for asyncio here
173
+ # since we're already in a separate thread
174
+ try:
175
+ self.store.update_batch_progress(
176
+ self.batch_id,
177
+ self.current_count,
178
+ self.partial_results,
179
+ )
180
+ except Exception as e:
181
+ logger.error(
182
+ f"Error updating progress: {e}"
183
+ )
184
+ return self.current_count
185
+
186
+ # Create a progress tracker
187
+ progress_tracker = ProgressTracker(
188
+ self.run_store, batch_id, batch_size
189
+ )
190
+
191
+ # Define a custom worker that reports progress
192
+ async def progress_aware_worker(index, item_inputs):
193
+ try:
194
+ result = await self.flock.run_async(
195
+ start_agent=request.agent_name,
196
+ input=item_inputs,
197
+ box_result=request.box_results,
198
+ )
199
+ # Report progress after each item
200
+ progress_tracker.increment(result)
201
+ return result
202
+ except Exception as e:
203
+ logger.error(
204
+ f"Error processing batch item {index}: {e}"
205
+ )
206
+ progress_tracker.increment(
207
+ e if request.return_errors else None
208
+ )
209
+ if request.return_errors:
210
+ return e
211
+ return None
212
+
213
+ # Process the batch items with progress tracking
214
+ batch_inputs = request.batch_inputs
215
+ if isinstance(batch_inputs, list):
216
+ # Process list of inputs with progress tracking
217
+ tasks = []
218
+ for i, item_inputs in enumerate(batch_inputs):
219
+ # Combine with static inputs if provided
220
+ full_inputs = {
221
+ **(request.static_inputs or {}),
222
+ **item_inputs,
223
+ }
224
+ tasks.append(progress_aware_worker(i, full_inputs))
225
+
226
+ # Run all tasks
227
+ if request.parallel and request.max_workers > 1:
228
+ # Run in parallel with semaphore for max_workers
229
+ semaphore = asyncio.Semaphore(request.max_workers)
230
+
231
+ async def bounded_worker(i, inputs):
232
+ async with semaphore:
233
+ return await progress_aware_worker(
234
+ i, inputs
235
+ )
236
+
237
+ bounded_tasks = []
238
+ for i, item_inputs in enumerate(batch_inputs):
239
+ full_inputs = {
240
+ **(request.static_inputs or {}),
241
+ **item_inputs,
242
+ }
243
+ bounded_tasks.append(
244
+ bounded_worker(i, full_inputs)
245
+ )
246
+
247
+ results = loop.run_until_complete(
248
+ asyncio.gather(*bounded_tasks)
249
+ )
250
+ else:
251
+ # Run sequentially
252
+ results = []
253
+ for i, item_inputs in enumerate(batch_inputs):
254
+ full_inputs = {
255
+ **(request.static_inputs or {}),
256
+ **item_inputs,
257
+ }
258
+ result = loop.run_until_complete(
259
+ progress_aware_worker(i, full_inputs)
260
+ )
261
+ results.append(result)
262
+ else:
263
+ # Let the original run_batch_async handle DataFrame or CSV
264
+ results = loop.run_until_complete(
265
+ self.flock.run_batch_async(
266
+ start_agent=request.agent_name,
267
+ batch_inputs=request.batch_inputs,
268
+ input_mapping=request.input_mapping,
269
+ static_inputs=request.static_inputs,
270
+ parallel=request.parallel,
271
+ max_workers=request.max_workers,
272
+ use_temporal=request.use_temporal,
273
+ box_results=request.box_results,
274
+ return_errors=request.return_errors,
275
+ silent_mode=request.silent_mode,
276
+ write_to_csv=request.write_to_csv,
277
+ )
278
+ )
279
+
280
+ # Update progress one last time with final count
281
+ if results:
282
+ progress_tracker.current_count = len(results)
283
+ self.run_store.update_batch_progress(
284
+ batch_id,
285
+ len(results),
286
+ results, # Include all results as partial results
287
+ )
288
+
289
+ # Update store with results from this thread
290
+ self.run_store.update_batch_result(batch_id, results)
291
+
292
+ logger.info(
293
+ f"Batch run completed (batch_id: {batch_id})",
294
+ num_results=len(results),
295
+ )
296
+ return results
297
+ except Exception as e:
298
+ logger.error(
299
+ f"Error in batch run {batch_id} (started with '{request.agent_name}'): {e!s}",
300
+ exc_info=True,
301
+ )
302
+ # Update store status
303
+ self.run_store.update_batch_status(
304
+ batch_id, "failed", str(e)
305
+ )
306
+ return None
307
+ finally:
308
+ loop.close()
309
+
310
+ # Run the batch processing in a thread pool
311
+ try:
312
+ loop = asyncio.get_running_loop()
313
+ with ThreadPoolExecutor() as pool:
314
+ await loop.run_in_executor(pool, run_batch_sync)
315
+ except Exception as e:
316
+ error_msg = f"Error running batch in thread pool: {e!s}"
317
+ logger.error(error_msg, exc_info=True)
318
+ self.run_store.update_batch_status(
319
+ batch_id, "failed", error_msg
320
+ )
321
+
322
+ except Exception as e:
323
+ logger.error(
324
+ f"Error setting up batch run {batch_id} (started with '{request.agent_name}'): {e!s}",
325
+ exc_info=True,
326
+ )
327
+ # Update store status
328
+ self.run_store.update_batch_status(batch_id, "failed", str(e))
329
+ raise # Re-raise for the endpoint handler
330
+
117
331
  # --- UI Helper Methods (kept here as they are called by endpoints via self) ---
118
332
 
119
333
  def _parse_input_spec(self, input_spec: str) -> list[dict[str, str]]:
flock/core/api/models.py CHANGED
@@ -32,3 +32,66 @@ class FlockAPIResponse(BaseModel):
32
32
  None, description="When the run completed"
33
33
  )
34
34
  error: str | None = Field(None, description="Error message if failed")
35
+
36
+
37
+ class FlockBatchRequest(BaseModel):
38
+ """Request model for batch processing via JSON API."""
39
+
40
+ agent_name: str = Field(..., description="Name of the agent to run")
41
+ batch_inputs: list[dict[str, Any]] | str = Field(
42
+ ..., description="List of input dictionaries or path to CSV file"
43
+ )
44
+ input_mapping: dict[str, str] | None = Field(
45
+ None, description="Maps DataFrame/CSV column names to agent input keys"
46
+ )
47
+ static_inputs: dict[str, Any] | None = Field(
48
+ None, description="Inputs constant across all batch runs"
49
+ )
50
+ parallel: bool = Field(
51
+ default=True, description="Whether to run jobs in parallel"
52
+ )
53
+ max_workers: int = Field(
54
+ default=5, description="Max concurrent workers for parallel runs"
55
+ )
56
+ use_temporal: bool | None = Field(
57
+ None, description="Override Flock's enable_temporal setting"
58
+ )
59
+ box_results: bool = Field(
60
+ default=True, description="Wrap results in Box objects"
61
+ )
62
+ return_errors: bool = Field(
63
+ default=False, description="Return Exception objects for failed runs"
64
+ )
65
+ silent_mode: bool = Field(
66
+ default=True, description="Suppress output and show progress bar"
67
+ )
68
+ write_to_csv: str | None = Field(
69
+ None, description="Path to save results as CSV file"
70
+ )
71
+
72
+
73
+ class FlockBatchResponse(BaseModel):
74
+ """Response model for batch processing requests."""
75
+
76
+ batch_id: str = Field(..., description="Unique ID for this batch run")
77
+ status: str = Field(..., description="Status of the batch run")
78
+ results: list[Any] = Field(
79
+ default_factory=list,
80
+ description="List of results from batch processing",
81
+ )
82
+ started_at: datetime = Field(..., description="When the batch run started")
83
+ completed_at: datetime | None = Field(
84
+ None, description="When the batch run completed"
85
+ )
86
+ error: str | None = Field(None, description="Error message if failed")
87
+
88
+ # Additional fields for batch progress tracking
89
+ total_items: int = Field(
90
+ 0, description="Total number of items in the batch"
91
+ )
92
+ completed_items: int = Field(
93
+ 0, description="Number of completed items in the batch"
94
+ )
95
+ progress_percentage: float = Field(
96
+ 0.0, description="Percentage of completion (0-100)"
97
+ )
@@ -3,10 +3,14 @@
3
3
 
4
4
  import threading
5
5
  from datetime import datetime
6
+ from typing import Any
6
7
 
7
8
  from flock.core.logging.logging import get_logger
8
9
 
9
- from .models import FlockAPIResponse # Import from the models file
10
+ from .models import ( # Import from the models file
11
+ FlockAPIResponse,
12
+ FlockBatchResponse,
13
+ )
10
14
 
11
15
  logger = get_logger("api.run_store")
12
16
 
@@ -16,6 +20,7 @@ class RunStore:
16
20
 
17
21
  def __init__(self):
18
22
  self._runs: dict[str, FlockAPIResponse] = {}
23
+ self._batches: dict[str, FlockBatchResponse] = {}
19
24
  self._lock = threading.Lock() # Basic lock for thread safety
20
25
 
21
26
  def create_run(self, run_id: str) -> FlockAPIResponse:
@@ -69,4 +74,151 @@ class RunStore:
69
74
  f"Attempted to update result for non-existent run_id: {run_id}"
70
75
  )
71
76
 
77
+ def create_batch(self, batch_id: str) -> FlockBatchResponse:
78
+ """Creates a new batch record with 'starting' status."""
79
+ with self._lock:
80
+ if batch_id in self._batches:
81
+ logger.warning(
82
+ f"Batch ID {batch_id} already exists. Overwriting."
83
+ )
84
+ response = FlockBatchResponse(
85
+ batch_id=batch_id,
86
+ status="starting",
87
+ results=[],
88
+ started_at=datetime.now(),
89
+ total_items=0,
90
+ completed_items=0,
91
+ progress_percentage=0.0,
92
+ )
93
+ self._batches[batch_id] = response
94
+ logger.debug(f"Created batch record for batch_id: {batch_id}")
95
+ return response
96
+
97
+ def get_batch(self, batch_id: str) -> FlockBatchResponse | None:
98
+ """Gets the status of a batch run."""
99
+ with self._lock:
100
+ return self._batches.get(batch_id)
101
+
102
+ def update_batch_status(
103
+ self, batch_id: str, status: str, error: str | None = None
104
+ ):
105
+ """Updates the status and potentially error of a batch run."""
106
+ with self._lock:
107
+ if batch_id in self._batches:
108
+ self._batches[batch_id].status = status
109
+ if error:
110
+ self._batches[batch_id].error = error
111
+ if status in ["completed", "failed"]:
112
+ self._batches[batch_id].completed_at = datetime.now()
113
+ # When completed, ensure progress is 100%
114
+ if (
115
+ status == "completed"
116
+ and self._batches[batch_id].total_items > 0
117
+ ):
118
+ self._batches[batch_id].completed_items = self._batches[
119
+ batch_id
120
+ ].total_items
121
+ self._batches[batch_id].progress_percentage = 100.0
122
+ logger.debug(
123
+ f"Updated status for batch_id {batch_id} to {status}"
124
+ )
125
+ else:
126
+ logger.warning(
127
+ f"Attempted to update status for non-existent batch_id: {batch_id}"
128
+ )
129
+
130
+ def update_batch_result(self, batch_id: str, results: list[Any]):
131
+ """Updates the results of a completed batch run."""
132
+ with self._lock:
133
+ if batch_id in self._batches:
134
+ # Ensure results are serializable
135
+ final_results = [
136
+ dict(r) if hasattr(r, "to_dict") else r for r in results
137
+ ]
138
+ self._batches[batch_id].results = final_results
139
+ self._batches[batch_id].status = "completed"
140
+ self._batches[batch_id].completed_at = datetime.now()
141
+
142
+ # Update progress tracking
143
+ self._batches[batch_id].completed_items = len(final_results)
144
+ self._batches[batch_id].total_items = len(final_results)
145
+ self._batches[batch_id].progress_percentage = 100.0
146
+
147
+ logger.debug(
148
+ f"Updated results for completed batch_id: {batch_id}"
149
+ )
150
+ else:
151
+ logger.warning(
152
+ f"Attempted to update results for non-existent batch_id: {batch_id}"
153
+ )
154
+
155
+ def set_batch_total_items(self, batch_id: str, total_items: int):
156
+ """Sets the total number of items in a batch."""
157
+ try:
158
+ with self._lock:
159
+ if batch_id in self._batches:
160
+ self._batches[batch_id].total_items = total_items
161
+ # Recalculate percentage
162
+ if total_items > 0:
163
+ self._batches[batch_id].progress_percentage = (
164
+ self._batches[batch_id].completed_items
165
+ / total_items
166
+ * 100.0
167
+ )
168
+ logger.debug(
169
+ f"Set total_items for batch_id {batch_id} to {total_items}"
170
+ )
171
+ else:
172
+ logger.warning(
173
+ f"Attempted to set total_items for non-existent batch_id: {batch_id}"
174
+ )
175
+ except Exception as e:
176
+ logger.error(f"Error setting batch total items: {e}", exc_info=True)
177
+
178
+ def update_batch_progress(
179
+ self,
180
+ batch_id: str,
181
+ completed_items: int,
182
+ partial_results: list[Any] = None,
183
+ ):
184
+ """Updates the progress of a batch run and optionally adds partial results.
185
+
186
+ Args:
187
+ batch_id: The ID of the batch to update
188
+ completed_items: The number of items that have been completed
189
+ partial_results: Optional list of results for completed items to add to the batch
190
+ """
191
+ try:
192
+ with self._lock:
193
+ if batch_id in self._batches:
194
+ self._batches[batch_id].completed_items = completed_items
195
+
196
+ # Calculate percentage if we have a total
197
+ if self._batches[batch_id].total_items > 0:
198
+ self._batches[batch_id].progress_percentage = (
199
+ completed_items
200
+ / self._batches[batch_id].total_items
201
+ * 100.0
202
+ )
203
+
204
+ # Add partial results if provided
205
+ if partial_results:
206
+ # Ensure results are serializable
207
+ final_results = [
208
+ dict(r) if hasattr(r, "to_dict") else r
209
+ for r in partial_results
210
+ ]
211
+ self._batches[batch_id].results = final_results
212
+
213
+ logger.debug(
214
+ f"Updated progress for batch_id {batch_id}: {completed_items}/{self._batches[batch_id].total_items} "
215
+ f"({self._batches[batch_id].progress_percentage:.1f}%)"
216
+ )
217
+ else:
218
+ logger.warning(
219
+ f"Attempted to update progress for non-existent batch_id: {batch_id}"
220
+ )
221
+ except Exception as e:
222
+ logger.error(f"Error updating batch progress: {e}", exc_info=True)
223
+
72
224
  # Add methods for cleanup, persistence, etc. later
@@ -0,0 +1,38 @@
1
+ # src/flock/api/runner.py
2
+ """Provides functionality to start the Flock API server."""
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ from flock.core.logging.logging import get_logger
7
+
8
+ if TYPE_CHECKING:
9
+ from flock.core.flock import Flock
10
+
11
+ logger = get_logger("api.runner")
12
+
13
+
14
+ def start_flock_api(
15
+ flock: "Flock",
16
+ host: str = "127.0.0.1",
17
+ port: int = 8344,
18
+ server_name: str = "Flock API",
19
+ create_ui: bool = False,
20
+ ) -> None:
21
+ """Start a REST API server for the given Flock instance."""
22
+ try:
23
+ # Import API class locally to avoid making it a hard dependency for core flock
24
+ from flock.core.api import FlockAPI
25
+ except ImportError:
26
+ logger.error(
27
+ "API components not found. Cannot start API. "
28
+ "Ensure 'fastapi' and 'uvicorn' are installed."
29
+ )
30
+ return
31
+
32
+ logger.info(
33
+ f"Preparing to start API server for Flock '{flock.name}' on {host}:{port} {'with UI' if create_ui else 'without UI'}"
34
+ )
35
+ api_instance = FlockAPI(flock) # Pass the Flock instance to the API
36
+ api_instance.start(
37
+ host=host, port=port, server_name=server_name, create_ui=create_ui
38
+ )
@@ -7,3 +7,4 @@ FLOCK_RUN_ID = "flock.run_id"
7
7
  FLOCK_LAST_AGENT = "flock.last_agent"
8
8
  FLOCK_LAST_RESULT = "flock.last_result"
9
9
  FLOCK_MODEL = "flock.model"
10
+ FLOCK_BATCH_SILENT_MODE = "flock.batch_silent"