edda-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edda/retry.py ADDED
@@ -0,0 +1,207 @@
1
+ """
2
+ Retry policy module for Edda framework.
3
+
4
+ This module provides retry configuration and metadata tracking for activities.
5
+ Inspired by Restate's retry mechanism and Temporal's retry policies.
6
+ """
7
+
8
+ import time
9
+ from dataclasses import dataclass, field
10
+ from typing import Any
11
+
12
+
13
+ @dataclass
14
+ class RetryPolicy:
15
+ """
16
+ Retry policy configuration for activities.
17
+
18
+ Inspired by Restate's retry mechanism with Edda-specific optimizations.
19
+
20
+ Attributes:
21
+ initial_interval: First retry delay in seconds
22
+ backoff_coefficient: Exponential backoff multiplier
23
+ max_interval: Maximum retry delay in seconds (caps exponential growth)
24
+ max_attempts: Maximum retry attempts (None = infinite, use with caution)
25
+ max_duration: Maximum total retry duration in seconds (None = infinite)
26
+ retryable_error_types: Tuple of exception types to retry
27
+ non_retryable_error_types: Tuple of exception types to never retry
28
+
29
+ Example:
30
+ # Default policy (5 attempts, exponential backoff)
31
+ policy = RetryPolicy()
32
+
33
+ # Custom policy
34
+ policy = RetryPolicy(
35
+ initial_interval=0.5,
36
+ backoff_coefficient=1.5,
37
+ max_attempts=10,
38
+ max_duration=120.0,
39
+ )
40
+
41
+ # Infinite retry (Restate-style, use with caution)
42
+ policy = RetryPolicy(max_attempts=None, max_duration=None)
43
+ """
44
+
45
+ # Backoff parameters
46
+ initial_interval: float = 1.0 # seconds
47
+ backoff_coefficient: float = 2.0 # exponential multiplier
48
+ max_interval: float = 60.0 # seconds (cap exponential growth)
49
+
50
+ # Retry limits
51
+ max_attempts: int | None = 5 # None = infinite (Restate-style)
52
+ max_duration: float | None = 300.0 # seconds (5 minutes), None = infinite
53
+
54
+ # Exception filtering
55
+ retryable_error_types: tuple[type[Exception], ...] = (Exception,)
56
+ non_retryable_error_types: tuple[type[Exception], ...] = ()
57
+
58
+ def is_retryable(self, error: Exception) -> bool:
59
+ """
60
+ Determine if an error is retryable.
61
+
62
+ Priority:
63
+ 1. TerminalError -> always non-retryable
64
+ 2. non_retryable_error_types -> non-retryable
65
+ 3. retryable_error_types -> retryable
66
+ 4. Default: non-retryable (safe default)
67
+
68
+ Args:
69
+ error: Exception to check
70
+
71
+ Returns:
72
+ True if error should be retried, False otherwise
73
+ """
74
+ # Import here to avoid circular dependency
75
+ from edda.exceptions import TerminalError
76
+
77
+ # TerminalError always stops retry
78
+ if isinstance(error, TerminalError):
79
+ return False
80
+
81
+ # Check explicit non-retryable types
82
+ if self.non_retryable_error_types and isinstance(error, self.non_retryable_error_types):
83
+ return False
84
+
85
+ # Check explicit retryable types (default: non-retryable)
86
+ return bool(self.retryable_error_types and isinstance(error, self.retryable_error_types))
87
+
88
+ def calculate_delay(self, attempt: int) -> float:
89
+ """
90
+ Calculate backoff delay for given attempt number.
91
+
92
+ Formula: delay = initial_interval * (backoff_coefficient ^ (attempt - 1))
93
+ Capped at max_interval to prevent excessive delays.
94
+
95
+ Args:
96
+ attempt: Current attempt number (1-indexed)
97
+
98
+ Returns:
99
+ Delay in seconds (exponential backoff, capped at max_interval)
100
+
101
+ Example:
102
+ # Default policy: initial=1.0, coefficient=2.0, max=60.0
103
+ # Attempt 1: 1.0s
104
+ # Attempt 2: 2.0s
105
+ # Attempt 3: 4.0s
106
+ # Attempt 4: 8.0s
107
+ # Attempt 5: 16.0s
108
+ # Attempt 6: 32.0s
109
+ # Attempt 7: 60.0s (capped)
110
+ # Attempt 8: 60.0s (capped)
111
+ """
112
+ delay = self.initial_interval * (self.backoff_coefficient ** (attempt - 1))
113
+ return min(delay, self.max_interval)
114
+
115
+
116
+ @dataclass
117
+ class RetryMetadata:
118
+ """
119
+ Track retry attempts for observability.
120
+
121
+ This metadata is stored in workflow history for debugging and monitoring.
122
+
123
+ Attributes:
124
+ total_attempts: Total number of attempts made
125
+ total_duration_ms: Total time spent retrying (milliseconds)
126
+ exhausted: Whether max retries were reached
127
+ errors: List of error information for each attempt
128
+ last_error: Information about the last error encountered
129
+ """
130
+
131
+ total_attempts: int = 0
132
+ total_duration_ms: int = 0
133
+ exhausted: bool = False
134
+ errors: list[dict[str, Any]] = field(default_factory=list)
135
+ last_error: dict[str, Any] | None = None
136
+
137
+ def add_attempt(self, attempt: int, error: Exception) -> None:
138
+ """
139
+ Record a failed attempt.
140
+
141
+ Args:
142
+ attempt: Attempt number (1-indexed)
143
+ error: Exception that caused the failure
144
+ """
145
+ self.total_attempts = attempt
146
+ error_info = {
147
+ "attempt": attempt,
148
+ "error_type": type(error).__name__,
149
+ "message": str(error),
150
+ "timestamp_ms": int(time.time() * 1000),
151
+ }
152
+ self.errors.append(error_info)
153
+ self.last_error = {
154
+ "error_type": type(error).__name__,
155
+ "message": str(error),
156
+ }
157
+
158
+ def to_dict(self) -> dict[str, Any]:
159
+ """
160
+ Convert to JSON-serializable dict for storage.
161
+
162
+ Returns:
163
+ Dictionary representation of retry metadata
164
+ """
165
+ return {
166
+ "total_attempts": self.total_attempts,
167
+ "total_duration_ms": self.total_duration_ms,
168
+ "exhausted": self.exhausted,
169
+ "errors": self.errors,
170
+ "last_error": self.last_error,
171
+ }
172
+
173
+
174
+ # Default retry policy
175
+ DEFAULT_RETRY_POLICY = RetryPolicy(
176
+ initial_interval=1.0, # Start with 1 second delay
177
+ backoff_coefficient=2.0, # Standard exponential backoff
178
+ max_interval=60.0, # Cap at 60 seconds
179
+ max_attempts=5, # Balance between resilience and fail-fast
180
+ max_duration=300.0, # 5 minutes total (prevents runaway retry)
181
+ )
182
+
183
+
184
+ # Preset policies for common scenarios
185
+ AGGRESSIVE_RETRY = RetryPolicy(
186
+ initial_interval=0.1, # Fast retries for low-latency services
187
+ backoff_coefficient=1.5, # Slower exponential growth
188
+ max_interval=10.0, # Short max delay
189
+ max_attempts=10, # More attempts
190
+ max_duration=60.0, # 1 minute total
191
+ )
192
+
193
+ CONSERVATIVE_RETRY = RetryPolicy(
194
+ initial_interval=5.0, # Wait longer between attempts
195
+ backoff_coefficient=2.0, # Standard exponential
196
+ max_interval=300.0, # Up to 5 minutes between retries
197
+ max_attempts=3, # Fewer attempts (fail faster)
198
+ max_duration=900.0, # 15 minutes total
199
+ )
200
+
201
+ INFINITE_RETRY = RetryPolicy(
202
+ initial_interval=1.0,
203
+ backoff_coefficient=2.0,
204
+ max_interval=60.0,
205
+ max_attempts=None, # Infinite attempts (Restate-style)
206
+ max_duration=None, # Infinite duration (use with caution)
207
+ )
@@ -0,0 +1,9 @@
1
+ """Serialization layer for Edda framework."""
2
+
3
+ from edda.serialization.base import SerializerProtocol
4
+ from edda.serialization.json import JSONSerializer
5
+
6
+ __all__ = [
7
+ "SerializerProtocol",
8
+ "JSONSerializer",
9
+ ]
@@ -0,0 +1,83 @@
1
+ """
2
+ Base serialization protocol for Edda framework.
3
+
4
+ This module defines the SerializerProtocol that all serializers must implement.
5
+ """
6
+
7
+ from typing import Any, Protocol, runtime_checkable
8
+
9
+
10
+ @runtime_checkable
11
+ class SerializerProtocol(Protocol):
12
+ """
13
+ Protocol for serialization implementations.
14
+
15
+ Serializers are used to encode/decode CloudEvent data payloads.
16
+ Edda supports JSON serialization.
17
+ """
18
+
19
+ @property
20
+ def content_type(self) -> str:
21
+ """
22
+ Get the Content-Type header value for this serializer.
23
+
24
+ Returns:
25
+ Content-Type string (e.g., "application/json")
26
+ """
27
+ ...
28
+
29
+ def serialize(self, data: Any) -> bytes:
30
+ """
31
+ Serialize data to bytes.
32
+
33
+ Args:
34
+ data: Data to serialize (typically a dict for JSON)
35
+
36
+ Returns:
37
+ Serialized bytes
38
+
39
+ Raises:
40
+ ValueError: If data cannot be serialized
41
+ """
42
+ ...
43
+
44
+ def deserialize(self, data: bytes, message_type: type[Any] | None = None) -> Any:
45
+ """
46
+ Deserialize bytes to data.
47
+
48
+ Args:
49
+ data: Serialized bytes
50
+ message_type: Optional message type (unused for JSON serializer)
51
+
52
+ Returns:
53
+ Deserialized data (typically a dict for JSON)
54
+
55
+ Raises:
56
+ ValueError: If data cannot be deserialized
57
+ """
58
+ ...
59
+
60
+ def to_dict(self, data: Any) -> dict[str, Any]:
61
+ """
62
+ Convert data to dictionary (for storage).
63
+
64
+ Args:
65
+ data: Data to convert
66
+
67
+ Returns:
68
+ Dictionary representation
69
+ """
70
+ ...
71
+
72
+ def from_dict(self, data: dict[str, Any], message_type: type[Any] | None = None) -> Any:
73
+ """
74
+ Convert dictionary to data (from storage).
75
+
76
+ Args:
77
+ data: Dictionary representation
78
+ message_type: Optional message type (unused for JSON serializer)
79
+
80
+ Returns:
81
+ Reconstructed data
82
+ """
83
+ ...
@@ -0,0 +1,102 @@
1
+ """
2
+ JSON serialization implementation for Edda framework.
3
+
4
+ This is the default serializer, using Python's standard library json module.
5
+ """
6
+
7
+ import json
8
+ from typing import Any
9
+
10
+
11
+ class JSONSerializer:
12
+ """
13
+ JSON serializer implementation.
14
+
15
+ Uses Python's standard library json module for serialization.
16
+ This is the default and recommended serializer for most use cases.
17
+ """
18
+
19
+ @property
20
+ def content_type(self) -> str:
21
+ """Get Content-Type header."""
22
+ return "application/json"
23
+
24
+ def serialize(self, data: Any) -> bytes:
25
+ """
26
+ Serialize data to JSON bytes.
27
+
28
+ Args:
29
+ data: Data to serialize (must be JSON-serializable)
30
+
31
+ Returns:
32
+ UTF-8 encoded JSON bytes
33
+
34
+ Raises:
35
+ TypeError: If data is not JSON-serializable
36
+ """
37
+ try:
38
+ json_str = json.dumps(data, ensure_ascii=False, sort_keys=True)
39
+ return json_str.encode("utf-8")
40
+ except (TypeError, ValueError) as e:
41
+ raise ValueError(f"Failed to serialize data to JSON: {e}") from e
42
+
43
+ def deserialize(self, data: bytes, _message_type: type[Any] | None = None) -> Any:
44
+ """
45
+ Deserialize JSON bytes to data.
46
+
47
+ Args:
48
+ data: UTF-8 encoded JSON bytes
49
+ _message_type: Ignored for JSON serializer
50
+
51
+ Returns:
52
+ Deserialized Python data (dict, list, etc.)
53
+
54
+ Raises:
55
+ ValueError: If data is not valid JSON
56
+ """
57
+ try:
58
+ json_str = data.decode("utf-8")
59
+ return json.loads(json_str)
60
+ except (UnicodeDecodeError, json.JSONDecodeError) as e:
61
+ raise ValueError(f"Failed to deserialize JSON data: {e}") from e
62
+
63
+ def to_dict(self, data: Any) -> dict[str, Any]:
64
+ """
65
+ Convert data to dictionary.
66
+
67
+ For JSON serializer, this is typically a no-op if data is already a dict.
68
+
69
+ Args:
70
+ data: Data to convert
71
+
72
+ Returns:
73
+ Dictionary representation
74
+ """
75
+ if isinstance(data, dict):
76
+ return data
77
+ elif isinstance(data, str):
78
+ # Try to parse as JSON
79
+ try:
80
+ result = json.loads(data)
81
+ if isinstance(result, dict):
82
+ return result
83
+ except json.JSONDecodeError:
84
+ pass
85
+
86
+ # Wrap in dict if not already
87
+ return {"value": data}
88
+
89
+ def from_dict(self, data: dict[str, Any], _message_type: type[Any] | None = None) -> Any:
90
+ """
91
+ Convert dictionary to data.
92
+
93
+ For JSON serializer, this is typically a no-op.
94
+
95
+ Args:
96
+ data: Dictionary representation
97
+ _message_type: Ignored for JSON serializer
98
+
99
+ Returns:
100
+ Data (usually just returns the dict)
101
+ """
102
+ return data
@@ -0,0 +1,9 @@
1
+ """Storage layer for Edda framework."""
2
+
3
+ from edda.storage.protocol import StorageProtocol
4
+ from edda.storage.sqlalchemy_storage import SQLAlchemyStorage
5
+
6
+ __all__ = [
7
+ "StorageProtocol",
8
+ "SQLAlchemyStorage",
9
+ ]
edda/storage/models.py ADDED
@@ -0,0 +1,194 @@
1
+ """
2
+ SQLite database schema for Edda framework.
3
+
4
+ This module defines the table schemas for storing workflow instances,
5
+ execution history, compensations, event subscriptions, and outbox events.
6
+ """
7
+
8
+ # SQL schema for workflow definitions (source code storage)
9
+ WORKFLOW_DEFINITIONS_TABLE = """
10
+ CREATE TABLE IF NOT EXISTS workflow_definitions (
11
+ workflow_name TEXT NOT NULL,
12
+ source_hash TEXT NOT NULL,
13
+ source_code TEXT NOT NULL,
14
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
15
+ PRIMARY KEY (workflow_name, source_hash)
16
+ );
17
+ """
18
+
19
+ # Indexes for workflow definitions
20
+ WORKFLOW_DEFINITIONS_INDEXES = [
21
+ "CREATE INDEX IF NOT EXISTS idx_definitions_name ON workflow_definitions(workflow_name);",
22
+ "CREATE INDEX IF NOT EXISTS idx_definitions_hash ON workflow_definitions(source_hash);",
23
+ ]
24
+
25
+ # SQL schema for workflow instances table with distributed locking support
26
+ WORKFLOW_INSTANCES_TABLE = """
27
+ CREATE TABLE IF NOT EXISTS workflow_instances (
28
+ instance_id TEXT PRIMARY KEY,
29
+ workflow_name TEXT NOT NULL,
30
+ source_hash TEXT NOT NULL,
31
+ owner_service TEXT NOT NULL,
32
+ status TEXT NOT NULL DEFAULT 'running',
33
+ current_activity_id TEXT,
34
+ started_at TEXT NOT NULL DEFAULT (datetime('now')),
35
+ updated_at TEXT NOT NULL DEFAULT (datetime('now')),
36
+ input_data TEXT NOT NULL,
37
+ output_data TEXT,
38
+ locked_by TEXT,
39
+ locked_at TEXT,
40
+ lock_timeout_seconds INTEGER,
41
+ CONSTRAINT valid_status CHECK (
42
+ status IN ('running', 'completed', 'failed', 'waiting_for_event', 'waiting_for_timer', 'compensating', 'cancelled')
43
+ ),
44
+ FOREIGN KEY (workflow_name, source_hash) REFERENCES workflow_definitions(workflow_name, source_hash)
45
+ );
46
+ """
47
+
48
+ # Indexes for workflow instances
49
+ WORKFLOW_INSTANCES_INDEXES = [
50
+ "CREATE INDEX IF NOT EXISTS idx_instances_status ON workflow_instances(status);",
51
+ "CREATE INDEX IF NOT EXISTS idx_instances_workflow ON workflow_instances(workflow_name);",
52
+ "CREATE INDEX IF NOT EXISTS idx_instances_owner ON workflow_instances(owner_service);",
53
+ "CREATE INDEX IF NOT EXISTS idx_instances_locked ON workflow_instances(locked_by, locked_at);",
54
+ "CREATE INDEX IF NOT EXISTS idx_instances_updated ON workflow_instances(updated_at);",
55
+ "CREATE INDEX IF NOT EXISTS idx_instances_hash ON workflow_instances(source_hash);",
56
+ ]
57
+
58
+ # SQL schema for workflow execution history (for deterministic replay)
59
+ WORKFLOW_HISTORY_TABLE = """
60
+ CREATE TABLE IF NOT EXISTS workflow_history (
61
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
62
+ instance_id TEXT NOT NULL,
63
+ activity_id TEXT NOT NULL,
64
+ event_type TEXT NOT NULL,
65
+ event_data TEXT NOT NULL,
66
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
67
+ FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
68
+ CONSTRAINT unique_instance_activity UNIQUE (instance_id, activity_id)
69
+ );
70
+ """
71
+
72
+ # Indexes for workflow history
73
+ WORKFLOW_HISTORY_INDEXES = [
74
+ "CREATE INDEX IF NOT EXISTS idx_history_instance ON workflow_history(instance_id, activity_id);",
75
+ "CREATE INDEX IF NOT EXISTS idx_history_created ON workflow_history(created_at);",
76
+ ]
77
+
78
+ # SQL schema for compensation transactions (LIFO stack for Saga pattern)
79
+ WORKFLOW_COMPENSATIONS_TABLE = """
80
+ CREATE TABLE IF NOT EXISTS workflow_compensations (
81
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
82
+ instance_id TEXT NOT NULL,
83
+ activity_id TEXT NOT NULL,
84
+ activity_name TEXT NOT NULL,
85
+ args TEXT NOT NULL,
86
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
87
+ FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE
88
+ );
89
+ """
90
+
91
+ # Indexes for workflow compensations
92
+ WORKFLOW_COMPENSATIONS_INDEXES = [
93
+ "CREATE INDEX IF NOT EXISTS idx_compensations_instance ON workflow_compensations(instance_id, created_at DESC);",
94
+ ]
95
+
96
+ # SQL schema for event subscriptions (for wait_event)
97
+ WORKFLOW_EVENT_SUBSCRIPTIONS_TABLE = """
98
+ CREATE TABLE IF NOT EXISTS workflow_event_subscriptions (
99
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
100
+ instance_id TEXT NOT NULL,
101
+ event_type TEXT NOT NULL,
102
+ activity_id TEXT,
103
+ timeout_at TEXT,
104
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
105
+ FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
106
+ CONSTRAINT unique_instance_event UNIQUE (instance_id, event_type)
107
+ );
108
+ """
109
+
110
+ # Indexes for event subscriptions
111
+ WORKFLOW_EVENT_SUBSCRIPTIONS_INDEXES = [
112
+ "CREATE INDEX IF NOT EXISTS idx_subscriptions_event ON workflow_event_subscriptions(event_type);",
113
+ "CREATE INDEX IF NOT EXISTS idx_subscriptions_timeout ON workflow_event_subscriptions(timeout_at);",
114
+ "CREATE INDEX IF NOT EXISTS idx_subscriptions_instance ON workflow_event_subscriptions(instance_id);",
115
+ ]
116
+
117
+ # SQL schema for timer subscriptions (for wait_timer)
118
+ WORKFLOW_TIMER_SUBSCRIPTIONS_TABLE = """
119
+ CREATE TABLE IF NOT EXISTS workflow_timer_subscriptions (
120
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
121
+ instance_id TEXT NOT NULL,
122
+ timer_id TEXT NOT NULL,
123
+ expires_at TEXT NOT NULL,
124
+ activity_id TEXT,
125
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
126
+ FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
127
+ CONSTRAINT unique_instance_timer UNIQUE (instance_id, timer_id)
128
+ );
129
+ """
130
+
131
+ # Indexes for timer subscriptions
132
+ WORKFLOW_TIMER_SUBSCRIPTIONS_INDEXES = [
133
+ "CREATE INDEX IF NOT EXISTS idx_timer_subscriptions_expires ON workflow_timer_subscriptions(expires_at);",
134
+ "CREATE INDEX IF NOT EXISTS idx_timer_subscriptions_instance ON workflow_timer_subscriptions(instance_id);",
135
+ ]
136
+
137
+ # SQL schema for transactional outbox pattern
138
+ OUTBOX_EVENTS_TABLE = """
139
+ CREATE TABLE IF NOT EXISTS outbox_events (
140
+ event_id TEXT PRIMARY KEY,
141
+ event_type TEXT NOT NULL,
142
+ event_source TEXT NOT NULL,
143
+ event_data TEXT NOT NULL,
144
+ content_type TEXT NOT NULL DEFAULT 'application/json',
145
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
146
+ published_at TEXT,
147
+ status TEXT NOT NULL DEFAULT 'pending',
148
+ retry_count INTEGER DEFAULT 0,
149
+ last_error TEXT,
150
+ CONSTRAINT valid_outbox_status CHECK (status IN ('pending', 'processing', 'published', 'failed', 'invalid', 'expired'))
151
+ );
152
+ """
153
+
154
+ # SQL schema for schema version tracking
155
+ SCHEMA_VERSION_TABLE = """
156
+ CREATE TABLE IF NOT EXISTS schema_version (
157
+ version INTEGER PRIMARY KEY,
158
+ applied_at TEXT NOT NULL DEFAULT (datetime('now')),
159
+ description TEXT NOT NULL
160
+ );
161
+ """
162
+
163
+ # Indexes for outbox events
164
+ OUTBOX_EVENTS_INDEXES = [
165
+ "CREATE INDEX IF NOT EXISTS idx_outbox_status ON outbox_events(status, created_at);",
166
+ "CREATE INDEX IF NOT EXISTS idx_outbox_retry ON outbox_events(status, retry_count);",
167
+ "CREATE INDEX IF NOT EXISTS idx_outbox_published ON outbox_events(published_at);",
168
+ ]
169
+
170
+ # Current schema version
171
+ CURRENT_SCHEMA_VERSION = 1
172
+
173
+ # All table creation statements
174
+ ALL_TABLES = [
175
+ SCHEMA_VERSION_TABLE,
176
+ WORKFLOW_DEFINITIONS_TABLE,
177
+ WORKFLOW_INSTANCES_TABLE,
178
+ WORKFLOW_HISTORY_TABLE,
179
+ WORKFLOW_COMPENSATIONS_TABLE,
180
+ WORKFLOW_EVENT_SUBSCRIPTIONS_TABLE,
181
+ WORKFLOW_TIMER_SUBSCRIPTIONS_TABLE,
182
+ OUTBOX_EVENTS_TABLE,
183
+ ]
184
+
185
+ # All index creation statements
186
+ ALL_INDEXES = (
187
+ WORKFLOW_DEFINITIONS_INDEXES
188
+ + WORKFLOW_INSTANCES_INDEXES
189
+ + WORKFLOW_HISTORY_INDEXES
190
+ + WORKFLOW_COMPENSATIONS_INDEXES
191
+ + WORKFLOW_EVENT_SUBSCRIPTIONS_INDEXES
192
+ + WORKFLOW_TIMER_SUBSCRIPTIONS_INDEXES
193
+ + OUTBOX_EVENTS_INDEXES
194
+ )