chuk-tool-processor 0.6.29__py3-none-any.whl → 0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

@@ -0,0 +1,350 @@
1
+ # chuk_tool_processor/models/tool_spec.py
2
+ """
3
+ Formal tool specification with JSON Schema export, versioning, and capability discovery.
4
+
5
+ This module provides a unified way to describe tools with their:
6
+ - Input/output schemas (JSON Schema)
7
+ - Versioning information
8
+ - Capabilities (streaming, cancellable, idempotent, etc.)
9
+ - Export to various formats (OpenAI, MCP, Anthropic)
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import inspect
15
+ from collections.abc import Callable
16
+ from enum import Enum
17
+ from typing import Any
18
+
19
+ from pydantic import BaseModel, Field
20
+
21
+
22
+ class ToolCapability(str, Enum):
23
+ """Capabilities that a tool can support."""
24
+
25
+ STREAMING = "streaming" # Tool supports streaming responses
26
+ CANCELLABLE = "cancellable" # Tool supports cancellation
27
+ IDEMPOTENT = "idempotent" # Tool is safe to retry (same result)
28
+ CACHEABLE = "cacheable" # Results can be cached
29
+ RATE_LIMITED = "rate_limited" # Tool has rate limits
30
+ REQUIRES_AUTH = "requires_auth" # Tool requires authentication
31
+ LONG_RUNNING = "long_running" # Tool may take >30s
32
+ STATEFUL = "stateful" # Tool maintains state across calls
33
+
34
+
35
+ class ToolSpec(BaseModel):
36
+ """
37
+ Formal tool specification with JSON Schema export and versioning.
38
+
39
+ This provides a complete description of a tool's interface, capabilities,
40
+ and metadata for discovery and validation.
41
+ """
42
+
43
+ # Core metadata
44
+ name: str = Field(..., description="Tool name (must be unique within namespace)")
45
+ version: str = Field(default="1.0.0", description="Semantic version (e.g., '1.2.3')")
46
+ description: str = Field(..., description="Human-readable description of what the tool does")
47
+ namespace: str = Field(default="default", description="Namespace for organizing tools")
48
+
49
+ # Schema definitions
50
+ parameters: dict[str, Any] = Field(
51
+ ...,
52
+ description="JSON Schema for tool parameters (input)",
53
+ )
54
+ returns: dict[str, Any] | None = Field(
55
+ None,
56
+ description="JSON Schema for return value (output). None if unstructured.",
57
+ )
58
+
59
+ # Capabilities and metadata
60
+ capabilities: list[ToolCapability] = Field(
61
+ default_factory=list,
62
+ description="List of capabilities this tool supports",
63
+ )
64
+ tags: list[str] = Field(
65
+ default_factory=list,
66
+ description="Tags for categorization (e.g., ['search', 'web'])",
67
+ )
68
+ examples: list[dict[str, Any]] = Field(
69
+ default_factory=list,
70
+ description="Example input/output pairs for documentation",
71
+ )
72
+
73
+ # Optional metadata
74
+ author: str | None = Field(None, description="Tool author/maintainer")
75
+ license: str | None = Field(None, description="License (e.g., 'MIT', 'Apache-2.0')")
76
+ documentation_url: str | None = Field(None, description="Link to full documentation")
77
+ source_url: str | None = Field(None, description="Link to source code")
78
+
79
+ # Execution hints
80
+ estimated_duration_seconds: float | None = Field(
81
+ None,
82
+ description="Typical execution time in seconds (for timeout planning)",
83
+ )
84
+ max_retries: int | None = Field(
85
+ None,
86
+ description="Maximum recommended retries (None = use default)",
87
+ )
88
+
89
+ # ------------------------------------------------------------------ #
90
+ # Capability checks
91
+ # ------------------------------------------------------------------ #
92
+ def has_capability(self, capability: ToolCapability) -> bool:
93
+ """Check if tool has a specific capability."""
94
+ return capability in self.capabilities
95
+
96
+ def is_streaming(self) -> bool:
97
+ """Check if tool supports streaming."""
98
+ return self.has_capability(ToolCapability.STREAMING)
99
+
100
+ def is_idempotent(self) -> bool:
101
+ """Check if tool is safe to retry."""
102
+ return self.has_capability(ToolCapability.IDEMPOTENT)
103
+
104
+ def is_cacheable(self) -> bool:
105
+ """Check if results can be cached."""
106
+ return self.has_capability(ToolCapability.CACHEABLE)
107
+
108
+ # ------------------------------------------------------------------ #
109
+ # Export formats
110
+ # ------------------------------------------------------------------ #
111
+ def to_openai(self) -> dict[str, Any]:
112
+ """
113
+ Export as OpenAI function calling format.
114
+
115
+ Returns:
116
+ Dict compatible with OpenAI's tools=[...] parameter
117
+ """
118
+ return {
119
+ "type": "function",
120
+ "function": {
121
+ "name": self.name,
122
+ "description": self.description,
123
+ "parameters": self.parameters,
124
+ },
125
+ }
126
+
127
+ def to_anthropic(self) -> dict[str, Any]:
128
+ """
129
+ Export as Anthropic tool format.
130
+
131
+ Returns:
132
+ Dict compatible with Anthropic's tools parameter
133
+ """
134
+ return {
135
+ "name": self.name,
136
+ "description": self.description,
137
+ "input_schema": self.parameters,
138
+ }
139
+
140
+ def to_mcp(self) -> dict[str, Any]:
141
+ """
142
+ Export as MCP tool format.
143
+
144
+ Returns:
145
+ Dict compatible with MCP tool schema
146
+ """
147
+ result = {
148
+ "name": self.name,
149
+ "description": self.description,
150
+ "inputSchema": self.parameters,
151
+ }
152
+
153
+ # Add optional fields if present
154
+ if self.returns:
155
+ result["outputSchema"] = self.returns
156
+
157
+ return result
158
+
159
+ def to_json_schema(self) -> dict[str, Any]:
160
+ """
161
+ Export as pure JSON Schema (parameters only).
162
+
163
+ Returns:
164
+ JSON Schema dict for tool parameters
165
+ """
166
+ return self.parameters
167
+
168
+ def to_dict(self) -> dict[str, Any]:
169
+ """
170
+ Export complete spec as dict.
171
+
172
+ Returns:
173
+ Full tool specification as dictionary
174
+ """
175
+ return self.model_dump(exclude_none=True)
176
+
177
+ # ------------------------------------------------------------------ #
178
+ # Factory methods
179
+ # ------------------------------------------------------------------ #
180
+ @classmethod
181
+ def from_validated_tool(
182
+ cls,
183
+ tool_cls: type,
184
+ name: str | None = None,
185
+ namespace: str = "default",
186
+ ) -> ToolSpec:
187
+ """
188
+ Create ToolSpec from a ValidatedTool class.
189
+
190
+ Args:
191
+ tool_cls: ValidatedTool subclass
192
+ name: Override tool name (default: class name)
193
+ namespace: Tool namespace
194
+
195
+ Returns:
196
+ ToolSpec instance
197
+ """
198
+ from chuk_tool_processor.models.validated_tool import ValidatedTool
199
+
200
+ if not issubclass(tool_cls, ValidatedTool):
201
+ raise TypeError(f"{tool_cls.__name__} must be a ValidatedTool subclass")
202
+
203
+ # Extract metadata
204
+ tool_name = name or tool_cls.__name__
205
+ description = (tool_cls.__doc__ or f"{tool_name} tool").strip()
206
+
207
+ # Extract schemas
208
+ parameters = tool_cls.Arguments.model_json_schema()
209
+ returns = tool_cls.Result.model_json_schema() if hasattr(tool_cls, "Result") else None
210
+
211
+ # Detect capabilities
212
+ capabilities = []
213
+
214
+ # Check if tool is marked cacheable
215
+ if hasattr(tool_cls, "_cacheable") and tool_cls._cacheable:
216
+ capabilities.append(ToolCapability.CACHEABLE)
217
+
218
+ # Check if idempotent (common pattern: GET-like operations)
219
+ if "get" in tool_name.lower() or "read" in tool_name.lower():
220
+ capabilities.append(ToolCapability.IDEMPOTENT)
221
+
222
+ # Check if streaming
223
+ from chuk_tool_processor.models.streaming_tool import StreamingTool
224
+
225
+ if issubclass(tool_cls, StreamingTool):
226
+ capabilities.append(ToolCapability.STREAMING)
227
+
228
+ return cls( # type: ignore[call-arg]
229
+ name=tool_name,
230
+ description=description,
231
+ namespace=namespace,
232
+ parameters=parameters,
233
+ returns=returns,
234
+ capabilities=capabilities,
235
+ )
236
+
237
+ @classmethod
238
+ def from_function(
239
+ cls,
240
+ func: Callable,
241
+ name: str | None = None,
242
+ description: str | None = None,
243
+ namespace: str = "default",
244
+ ) -> ToolSpec:
245
+ """
246
+ Create ToolSpec from a plain function.
247
+
248
+ Args:
249
+ func: Function to wrap
250
+ name: Tool name (default: function name)
251
+ description: Tool description (default: function docstring)
252
+ namespace: Tool namespace
253
+
254
+ Returns:
255
+ ToolSpec instance
256
+ """
257
+ # Extract metadata
258
+ tool_name = name or func.__name__
259
+ tool_description = description or (func.__doc__ or f"{tool_name} function").strip()
260
+
261
+ # Build parameter schema from function signature
262
+ sig = inspect.signature(func)
263
+ parameters: dict[str, Any] = {
264
+ "type": "object",
265
+ "properties": {},
266
+ "required": [],
267
+ }
268
+
269
+ for param_name, param in sig.parameters.items():
270
+ if param_name == "self":
271
+ continue
272
+
273
+ # Build property schema
274
+ prop: dict[str, Any] = {}
275
+
276
+ # Try to infer type from annotation
277
+ if param.annotation != inspect.Parameter.empty:
278
+ annotation = param.annotation
279
+ # Handle basic types
280
+ if annotation is str:
281
+ prop["type"] = "string"
282
+ elif annotation is int:
283
+ prop["type"] = "integer"
284
+ elif annotation is float:
285
+ prop["type"] = "number"
286
+ elif annotation is bool:
287
+ prop["type"] = "boolean"
288
+ elif annotation is list:
289
+ prop["type"] = "array"
290
+ elif annotation is dict:
291
+ prop["type"] = "object"
292
+
293
+ # Add to schema
294
+ parameters["properties"][param_name] = prop
295
+
296
+ # Mark as required if no default
297
+ if param.default == inspect.Parameter.empty:
298
+ parameters["required"].append(param_name)
299
+
300
+ return cls( # type: ignore[call-arg]
301
+ name=tool_name,
302
+ description=tool_description,
303
+ namespace=namespace,
304
+ parameters=parameters,
305
+ returns=None, # Can't infer return type from plain function
306
+ capabilities=[],
307
+ )
308
+
309
+
310
+ # ------------------------------------------------------------------ #
311
+ # Convenience decorators
312
+ # ------------------------------------------------------------------ #
313
+ def tool_spec(
314
+ *,
315
+ version: str = "1.0.0",
316
+ capabilities: list[ToolCapability] | None = None,
317
+ tags: list[str] | None = None,
318
+ estimated_duration_seconds: float | None = None,
319
+ ) -> Callable:
320
+ """
321
+ Decorator to attach tool specification metadata to a tool class.
322
+
323
+ Example:
324
+ @tool_spec(
325
+ version="2.1.0",
326
+ capabilities=[ToolCapability.CACHEABLE, ToolCapability.IDEMPOTENT],
327
+ tags=["search", "web"],
328
+ estimated_duration_seconds=2.0,
329
+ )
330
+ class SearchTool(ValidatedTool):
331
+ ...
332
+
333
+ Args:
334
+ version: Semantic version
335
+ capabilities: List of capabilities
336
+ tags: List of tags
337
+ estimated_duration_seconds: Estimated execution time
338
+
339
+ Returns:
340
+ Decorator function
341
+ """
342
+
343
+ def decorator(cls):
344
+ cls._tool_spec_version = version
345
+ cls._tool_spec_capabilities = capabilities or []
346
+ cls._tool_spec_tags = tags or []
347
+ cls._tool_spec_estimated_duration = estimated_duration_seconds
348
+ return cls
349
+
350
+ return decorator
@@ -23,7 +23,7 @@ import inspect
23
23
  import json
24
24
  from typing import Any, TypeVar
25
25
 
26
- from pydantic import BaseModel, ValidationError
26
+ from pydantic import BaseModel, ConfigDict, ValidationError
27
27
 
28
28
  from chuk_tool_processor.core.exceptions import ToolValidationError
29
29
 
@@ -97,11 +97,31 @@ class ValidatedTool(_ExportMixin, BaseModel):
97
97
  # Inner models - override in subclasses
98
98
  # ------------------------------------------------------------------ #
99
99
  class Arguments(BaseModel): # noqa: D401 - acts as a namespace
100
- """Input model"""
100
+ """Input model with LLM-friendly coercion defaults."""
101
+
102
+ model_config = ConfigDict(
103
+ # Coerce string numbers to actual numbers
104
+ coerce_numbers_to_str=False,
105
+ # Strip whitespace from strings
106
+ str_strip_whitespace=True,
107
+ # Validate default values
108
+ validate_default=True,
109
+ # Be lenient with extra fields (ignore them)
110
+ extra="ignore",
111
+ # Use enum values instead of enum objects
112
+ use_enum_values=True,
113
+ )
101
114
 
102
115
  class Result(BaseModel): # noqa: D401
103
116
  """Output model"""
104
117
 
118
+ model_config = ConfigDict(
119
+ # Validate default values in results too
120
+ validate_default=True,
121
+ # Use enum values in outputs
122
+ use_enum_values=True,
123
+ )
124
+
105
125
  # ------------------------------------------------------------------ #
106
126
  # Public entry-point called by the processor
107
127
  # ------------------------------------------------------------------ #
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chuk-tool-processor
3
- Version: 0.6.29
3
+ Version: 0.8
4
4
  Summary: Async-native framework for registering, discovering, and executing tools referenced in LLM responses
5
5
  Author-email: CHUK Team <chrishayuk@somejunkmailbox.com>
6
6
  Maintainer-email: CHUK Team <chrishayuk@somejunkmailbox.com>
@@ -20,7 +20,7 @@ Classifier: Framework :: AsyncIO
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.11
22
22
  Description-Content-Type: text/markdown
23
- Requires-Dist: chuk-mcp>=0.6
23
+ Requires-Dist: chuk-mcp>=0.7.1
24
24
  Requires-Dist: dotenv>=0.9.9
25
25
  Requires-Dist: psutil>=7.0.0
26
26
  Requires-Dist: pydantic>=2.11.3
@@ -72,12 +72,15 @@ Unlike full-fledged LLM frameworks (LangChain, LlamaIndex, etc.), CHUK Tool Proc
72
72
  Research code vs production code is about handling the edges:
73
73
 
74
74
  - **Timeouts**: Every tool execution has proper timeout handling
75
- - **Retries**: Automatic retry with exponential backoff
75
+ - **Retries**: Automatic retry with exponential backoff and deadline awareness
76
76
  - **Rate Limiting**: Global and per-tool rate limits with sliding windows
77
- - **Caching**: Intelligent result caching with TTL
78
- - **Error Handling**: Graceful degradation, never crashes your app
77
+ - **Caching**: Intelligent result caching with TTL and idempotency key support
78
+ - **Circuit Breakers**: Prevent cascading failures with automatic fault detection
79
+ - **Error Handling**: Machine-readable error codes with structured details
79
80
  - **Observability**: Structured logging, metrics, request tracing
80
81
  - **Safety**: Subprocess isolation for untrusted code
82
+ - **Type Safety**: Pydantic validation with LLM-friendly argument coercion
83
+ - **Tool Discovery**: Formal schema export (OpenAI, Anthropic, MCP formats)
81
84
 
82
85
  ### It's About Stacks
83
86
 
@@ -91,11 +94,13 @@ CHUK Tool Processor uses a **composable stack architecture**:
91
94
  │ tool calls
92
95
 
93
96
  ┌─────────────────────────────────┐
94
- │ Caching Wrapper │ ← Cache expensive results
97
+ │ Caching Wrapper │ ← Cache expensive results (idempotency keys)
95
98
  ├─────────────────────────────────┤
96
99
  │ Rate Limiting Wrapper │ ← Prevent API abuse
97
100
  ├─────────────────────────────────┤
98
- │ Retry Wrapper │ ← Handle transient failures
101
+ │ Retry Wrapper │ ← Handle transient failures (exponential backoff)
102
+ ├─────────────────────────────────┤
103
+ │ Circuit Breaker Wrapper │ ← Prevent cascading failures (CLOSED/OPEN/HALF_OPEN)
99
104
  ├─────────────────────────────────┤
100
105
  │ Execution Strategy │ ← How to run tools
101
106
  │ • InProcess (fast) │
@@ -639,6 +644,192 @@ processor = ToolProcessor(
639
644
  )
640
645
  ```
641
646
 
647
+ ### Advanced Production Features
648
+
649
+ Beyond basic configuration, CHUK Tool Processor includes several advanced features for production environments:
650
+
651
+ #### Circuit Breaker Pattern
652
+
653
+ Prevent cascading failures by automatically opening circuits for failing tools:
654
+
655
+ ```python
656
+ from chuk_tool_processor.core.processor import ToolProcessor
657
+
658
+ processor = ToolProcessor(
659
+ enable_circuit_breaker=True,
660
+ circuit_breaker_threshold=5, # Open after 5 failures
661
+ circuit_breaker_timeout=60.0, # Try recovery after 60s
662
+ )
663
+
664
+ # Circuit states: CLOSED → OPEN → HALF_OPEN → CLOSED
665
+ # - CLOSED: Normal operation
666
+ # - OPEN: Blocking requests (too many failures)
667
+ # - HALF_OPEN: Testing recovery with limited requests
668
+ ```
669
+
670
+ **How it works:**
671
+ 1. Tool fails repeatedly (hits threshold)
672
+ 2. Circuit opens → requests blocked immediately
673
+ 3. After timeout, circuit enters HALF_OPEN
674
+ 4. If test requests succeed → circuit closes
675
+ 5. If test requests fail → back to OPEN
676
+
677
+ **Benefits:**
678
+ - Prevents wasting resources on failing services
679
+ - Fast-fail for better UX
680
+ - Automatic recovery detection
681
+
682
+ #### Idempotency Keys
683
+
684
+ Automatically deduplicate LLM tool calls using SHA256-based keys:
685
+
686
+ ```python
687
+ from chuk_tool_processor.models.tool_call import ToolCall
688
+
689
+ # Idempotency keys are auto-generated
690
+ call1 = ToolCall(tool="search", arguments={"query": "Python"})
691
+ call2 = ToolCall(tool="search", arguments={"query": "Python"})
692
+
693
+ # Same arguments = same idempotency key
694
+ assert call1.idempotency_key == call2.idempotency_key
695
+
696
+ # Used automatically by caching layer
697
+ processor = ToolProcessor(enable_caching=True)
698
+ results1 = await processor.execute([call1]) # Executes
699
+ results2 = await processor.execute([call2]) # Cache hit!
700
+ ```
701
+
702
+ **Benefits:**
703
+ - Prevents duplicate executions from LLM retries
704
+ - Deterministic cache keys
705
+ - No manual key management needed
706
+
707
+ #### Tool Schema Export
708
+
709
+ Export tool definitions to multiple formats for LLM prompting:
710
+
711
+ ```python
712
+ from chuk_tool_processor.models.tool_spec import ToolSpec, ToolCapability
713
+ from chuk_tool_processor.models.validated_tool import ValidatedTool
714
+
715
+ @register_tool(name="weather")
716
+ class WeatherTool(ValidatedTool):
717
+ """Get current weather for a location."""
718
+
719
+ class Arguments(BaseModel):
720
+ location: str = Field(..., description="City name")
721
+
722
+ class Result(BaseModel):
723
+ temperature: float
724
+ conditions: str
725
+
726
+ # Generate tool spec
727
+ spec = ToolSpec.from_validated_tool(WeatherTool)
728
+
729
+ # Export to different formats
730
+ openai_format = spec.to_openai() # For OpenAI function calling
731
+ anthropic_format = spec.to_anthropic() # For Claude tools
732
+ mcp_format = spec.to_mcp() # For MCP servers
733
+
734
+ # Example OpenAI format:
735
+ # {
736
+ # "type": "function",
737
+ # "function": {
738
+ # "name": "weather",
739
+ # "description": "Get current weather for a location.",
740
+ # "parameters": {...} # JSON Schema
741
+ # }
742
+ # }
743
+ ```
744
+
745
+ **Use cases:**
746
+ - Generate tool definitions for LLM system prompts
747
+ - Documentation generation
748
+ - API contract validation
749
+ - Cross-platform tool sharing
750
+
751
+ #### Machine-Readable Error Codes
752
+
753
+ Structured error handling with error codes for programmatic responses:
754
+
755
+ ```python
756
+ from chuk_tool_processor.core.exceptions import (
757
+ ErrorCode,
758
+ ToolNotFoundError,
759
+ ToolTimeoutError,
760
+ ToolCircuitOpenError,
761
+ )
762
+
763
+ try:
764
+ results = await processor.process(llm_output)
765
+ except ToolNotFoundError as e:
766
+ if e.code == ErrorCode.TOOL_NOT_FOUND:
767
+ # Suggest available tools to LLM
768
+ available = e.details.get("available_tools", [])
769
+ print(f"Try one of: {available}")
770
+ except ToolTimeoutError as e:
771
+ if e.code == ErrorCode.TOOL_TIMEOUT:
772
+ # Inform LLM to use faster alternative
773
+ timeout = e.details["timeout"]
774
+ print(f"Tool timed out after {timeout}s")
775
+ except ToolCircuitOpenError as e:
776
+ if e.code == ErrorCode.TOOL_CIRCUIT_OPEN:
777
+ # Tell LLM this service is temporarily down
778
+ reset_time = e.details.get("reset_timeout")
779
+ print(f"Service unavailable, retry in {reset_time}s")
780
+
781
+ # All errors include .to_dict() for logging
782
+ error_dict = e.to_dict()
783
+ # {
784
+ # "error": "ToolCircuitOpenError",
785
+ # "code": "TOOL_CIRCUIT_OPEN",
786
+ # "message": "Tool 'api_tool' circuit breaker is open...",
787
+ # "details": {"tool_name": "api_tool", "failure_count": 5, ...}
788
+ # }
789
+ ```
790
+
791
+ **Available error codes:**
792
+ - `TOOL_NOT_FOUND` - Tool doesn't exist in registry
793
+ - `TOOL_EXECUTION_FAILED` - Tool execution error
794
+ - `TOOL_TIMEOUT` - Tool exceeded timeout
795
+ - `TOOL_CIRCUIT_OPEN` - Circuit breaker is open
796
+ - `TOOL_RATE_LIMITED` - Rate limit exceeded
797
+ - `TOOL_VALIDATION_ERROR` - Argument validation failed
798
+ - `MCP_CONNECTION_FAILED` - MCP server unreachable
799
+ - Plus 11 more for comprehensive error handling
800
+
801
+ #### LLM-Friendly Argument Coercion
802
+
803
+ Automatically coerce LLM outputs to correct types:
804
+
805
+ ```python
806
+ from chuk_tool_processor.models.validated_tool import ValidatedTool
807
+
808
+ class SearchTool(ValidatedTool):
809
+ class Arguments(BaseModel):
810
+ query: str
811
+ limit: int = 10
812
+ category: str = "all"
813
+
814
+ # Pydantic config for LLM outputs:
815
+ # - str_strip_whitespace=True → Remove accidental whitespace
816
+ # - extra="ignore" → Ignore unknown fields
817
+ # - use_enum_values=True → Convert enums to values
818
+ # - coerce_numbers_to_str=False → Keep type strictness
819
+
820
+ # LLM outputs often have quirks:
821
+ llm_output = {
822
+ "query": " Python tutorials ", # Extra whitespace
823
+ "limit": "5", # String instead of int
824
+ "unknown_field": "ignored" # Extra field
825
+ }
826
+
827
+ # ValidatedTool automatically coerces and validates
828
+ tool = SearchTool()
829
+ result = await tool.execute(**llm_output)
830
+ # ✅ Works! Whitespace stripped, "5" → 5, extra field ignored
831
+ ```
832
+
642
833
  ## Advanced Topics
643
834
 
644
835
  ### Using Subprocess Strategy