@devvit/protos 0.12.0 → 0.12.1-next-2025-08-13-20-47-05-c6474cff6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. package/meta.min.json +8076 -0
  2. package/package.json +4 -4
  3. package/protos.min.js +2 -0
  4. package/protos.min.js.map +7 -0
  5. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/callback/v1/callback_service.proto +92 -0
  6. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/event/v1/event.proto +52 -0
  7. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/experimental/v1/experimental.proto +71 -0
  8. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/interop/analytics/v1/analytics.proto +12 -0
  9. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/interop/generic/v1/generic.proto +12 -0
  10. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/loadbalancing/v1/loadbalancing.proto +43 -0
  11. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/metadata/v1/metadata.proto +20 -0
  12. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/operation/v1/operation.proto +148 -0
  13. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/publication/v1/publication_service.proto +56 -0
  14. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/scheduling/v1/offsets.proto +17 -0
  15. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/scheduling/v1/scheduling.proto +25 -0
  16. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/testing/v1/testing.proto +72 -0
  17. package/schema/.snootobuf/remoteSrcs/reddit/coreplatform/cloudroutines/workerpool/v1/workerpool.proto +213 -0
@@ -0,0 +1,92 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.callback.v1;
4
+
5
+ import "google/protobuf/any.proto";
6
+ import "google/protobuf/duration.proto";
7
+ import "reddit/coreplatform/cloudroutines/event/v1/event.proto";
8
+ import "reddit/coreplatform/events/cloudevent/v1/cloudevent.proto";
9
+
10
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/callbackpb/v1;callbackpb";
11
+
12
+ // RPC Service for working with Cloudroutines callbacks
13
+ service CallbackService {
14
+ // Invoke makes an RPC call to the user's registered callback in their application
15
+ rpc Invoke(InvokeRequest) returns (InvokeResponse) {
16
+ option idempotency_level = IDEMPOTENT;
17
+ }
18
+
19
+ // TODO(marco.ferrer):12/11/23 we may want to add an RPC that the cloudroutines server can
20
+ // call when a consumer is in trouble or didnt like the result returned by the callback server
21
+ // then the sdk can record metrics or change application health, or deregister the callback.
22
+ }
23
+
24
+ // Request message containing the event to be processed by the user's registered callback
25
+ message InvokeRequest {
26
+ // CloudEvent compliant event payload
27
+ reddit.coreplatform.events.cloudevent.v1.CloudEvent event = 1;
28
+ // Name of the callback function to be invoked
29
+ string callback_name = 2;
30
+ }
31
+
32
+ // Response message containing the user's registered callback response after processing an event
33
+ message InvokeResponse {
34
+ OperationResult result = 1;
35
+ }
36
+
37
+ // OperationResult represents the result of a Cloudroutines operation, currently only a publication result
38
+ message OperationResult {
39
+ oneof result {
40
+ Publication publication = 1;
41
+ }
42
+
43
+ // Publication represents a result of a Cloudroutines operation, consisting of a protobuf message
44
+ // and message route of the protobuf message
45
+ message Publication {
46
+ google.protobuf.Any message = 1;
47
+ reddit.coreplatform.cloudroutines.event.v1.MessageRoute message_route = 2;
48
+ }
49
+ }
50
+
51
+ // A finite set of policies which inform the Cloudroutines server on how to react to an error returned by the SDK server.
52
+ message FailurePolicy {
53
+ // Undefined is the default error policy attached to all status errors when no policy was explicitly set
54
+ // by the user. This policy message enables the Cloudroutines server to detect when errors are surfaced
55
+ // from the client vs the network infrastructure.
56
+ message Undefined {}
57
+
58
+ // RetryAfter is the failure policy that indicates a message should be retried after the specified duration.
59
+ // This is the default failure policy applied if no failure policy is found on the callback error.
60
+ message RetryAfter {
61
+ google.protobuf.Duration duration = 1;
62
+ }
63
+
64
+ // BlockExecutionis the failure policy that does a best effort attempt at blocking further execution
65
+ // from the current message. Manual intervention is required.
66
+ message BlockExecution {
67
+ string reason = 1;
68
+ }
69
+
70
+ // Skip is the failure policy that indicates the current message can be dropped.
71
+ message Skip {
72
+ string reason = 1;
73
+ }
74
+
75
+ // DeadLetterQueue is the failure policy that adds the current message to a DLQ. The message routing
76
+ // can be set, otherwise defaults will be used based on the message.
77
+ message DeadLetterQueue {
78
+ string reason = 1;
79
+
80
+ // Message route allows a user to configure the destination for the DLQ message. If no route is defined,
81
+ // the SDK will infer the default route based on the message's protobuf fullname, eg. `dlq_{proto_full_name}`.
82
+ reddit.coreplatform.cloudroutines.event.v1.MessageRoute message_route = 2;
83
+ }
84
+
85
+ oneof policy {
86
+ Undefined undefined = 1;
87
+ RetryAfter retry_after = 2;
88
+ BlockExecution block_execution = 3;
89
+ Skip skip = 4;
90
+ DeadLetterQueue dead_letter_queue = 5;
91
+ }
92
+ }
@@ -0,0 +1,52 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.event.v1;
4
+
5
+ import "google/protobuf/descriptor.proto";
6
+ import "reddit/coreplatform/cloudroutines/workerpool/v1/workerpool.proto";
7
+ import "validate/validate.proto";
8
+
9
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/eventpb/v1;eventpb";
10
+
11
+ extend google.protobuf.MessageOptions {
12
+ EventMessageMetadata message = 78110999;
13
+ }
14
+
15
+ extend google.protobuf.FieldOptions {
16
+ EventFieldMetadata field = 78110999;
17
+ }
18
+
19
+ message EventMessageMetadata {
20
+ string default_topic = 1;
21
+ repeated reddit.coreplatform.cloudroutines.workerpool.v1.WorkerPool.Source publication_source_whitelist = 2;
22
+ }
23
+
24
+ message EventFieldMetadata {
25
+ bool sharding_identifier = 1;
26
+ string sharding_field_path = 2;
27
+ }
28
+
29
+ message MessageRoute {
30
+ // Cluster URI is optional. Defaults to the same cluster as the original protobuf message.
31
+ // This will be mapped internally to the correct publisher.
32
+ optional string cluster_uri = 1 [(validate.rules).string = {
33
+ // TODO: figure out the regex on :/?.&=[]
34
+ // pattern: "^[-_a-zA-Z0-9]+$",
35
+ min_len: 1
36
+ max_len: 64
37
+ }];
38
+
39
+ // Topic is optional. Defaults to `dlq_{proto_full_name}`.
40
+ optional string topic = 2 [(validate.rules).string = {
41
+ pattern: "^[-_a-zA-Z0-9]+$"
42
+ min_len: 1
43
+ max_len: 255
44
+ }];
45
+
46
+ // Partitioning value is optional. Defaults to the annotated field on the original protobuf message.
47
+ optional string partitioning_value = 3 [(validate.rules).string = {
48
+ pattern: "^[-_.a-zA-Z0-9]+$"
49
+ min_len: 1
50
+ max_len: 255
51
+ }];
52
+ }
@@ -0,0 +1,71 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.experimental.v1;
4
+
5
+ import "google/protobuf/duration.proto";
6
+ import "validate/validate.proto";
7
+
8
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/experimental/v1;experimentalpb";
9
+
10
+ // Ensures that messages are processed in the strict order they are received.
11
+ // This guarantees the sequence of message execution but may reduce throughput.
12
+ // This is the default ordering if not specified, so it is not necessary to set this explicitly.
13
+ //
14
+ // *Note: If you're using the Python SDK, then KeyMessageOrdering is the default ordering.
15
+ message StrictMessageOrdering {}
16
+
17
+ // Enables out-of-order message execution.
18
+ // Messages are batched and processed concurrently, increasing throughput but not guaranteeing order of receipt.
19
+ message LooseMessageOrdering {}
20
+
21
+ // Messages are processed concurrently, similar to LooseMessageOrdering, improving throughput.
22
+ // However, messages with the same Kafka partitioning key will be processed in the order they arrive in.
23
+ // This allows for controlled parallel processing with guarantees on order based on specific keys.
24
+ //
25
+ // *Note: This is the default ordering if not specified when using the Python SDK.
26
+ message KeyMessageOrdering {}
27
+
28
+ // Wrapper struct for all client alert configurations.
29
+ message AlertConfig {
30
+ string sdk_version = 1;
31
+ optional ConsumerLagAlertConfig consumer_lag_alert_config = 2;
32
+ }
33
+
34
+ // Alert configuration for consumer lag. If the entire struct is left blank, the stated defaults are applied; otherwise, all fields must be set.
35
+ // Currently, this is based off of Burrow's consumer lag metric.
36
+ message ConsumerLagAlertConfig {
37
+ // Whether or not the alert is disabled.
38
+ // Defaults to false.
39
+ bool disabled = 1;
40
+
41
+ // The number of messages the consumer group needs to lag by for the alert to fire.
42
+ // Defaults to 25000.
43
+ int64 threshold = 2;
44
+
45
+ // How long the alert threshold must be sustained for the alert to fire.
46
+ // Defaults to 5 minutes. Maximum of 30 minutes.
47
+ google.protobuf.Duration threshold_duration = 3;
48
+
49
+ // Infrared v2 reddit-component name to route the alert to (see https://atlas.snooguts.net/catalog/default/component).
50
+ // Defaults to an empty value, which will route the alert based on the client namespace.
51
+ string component_name = 4;
52
+ }
53
+
54
+ message ExperimentalCallbackOptions {
55
+ // Specifies the delivery order of messages.
56
+ // Defaults to strict ordering if unset.
57
+ oneof message_ordering {
58
+ StrictMessageOrdering strict_ordering = 1;
59
+ LooseMessageOrdering loose_ordering = 2;
60
+ KeyMessageOrdering key_ordering = 3;
61
+ }
62
+
63
+ // Wrapper type containing configuration for all client alerts.
64
+ AlertConfig alert_config = 4;
65
+
66
+ // URI representing an arbitrary data source to consume messages from.
67
+ optional string data_source_uri = 5 [(validate.rules).string = {
68
+ uri: true
69
+ max_len: 255
70
+ }];
71
+ }
@@ -0,0 +1,12 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.interop.analytics.v1;
4
+
5
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/interop/analyticspb/v1;analyticspb";
6
+
7
+ // A protobuf wrapper for reddit analytics events. Used as a support shim for older, loosely structured
8
+ // event payloads.
9
+ message AnalyticsEvent {
10
+ // The event payload.
11
+ bytes data = 1;
12
+ }
@@ -0,0 +1,12 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.interop.generic.v1;
4
+
5
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/interop/genericpb/v1;genericpb";
6
+
7
+ // A protobuf wrapper for unprocessed messages. Used as a support shim for events containing raw byte payloads.
8
+ // DO NOT use this without first contacting #core-events in Slack to discuss your use case.
9
+ message Message {
10
+ // The event payload.
11
+ bytes payload = 1;
12
+ }
@@ -0,0 +1,43 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.loadbalancing.v1;
4
+
5
+ import "google/protobuf/duration.proto";
6
+
7
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/loadbalancingpb/v1;loadbalancingpb";
8
+
9
+ // Configuration for slow start throttling performed by the server on new worker instances.
10
+ message SlowStartConfig {
11
+ // The duration of the slow start window. This is the amount of time spent ramping traffic. At the start of the window
12
+ // traffic is throttled to `min_allow_rate`. At the end of the window, traffic will no longer be throttled.
13
+ // This value defaults to 15s.
14
+ google.protobuf.Duration window = 1;
15
+
16
+ // The factor used to increase the traffic allowed as the ramp window progresses. An aggression of `1` results in a
17
+ // linearly increasing allow rate.
18
+ float ramp_aggression = 2;
19
+
20
+ // This value determines how much traffic is allowed to proceed when max throttling is applied. Usually at the start of
21
+ // the window.
22
+ float min_allow_rate = 3;
23
+ }
24
+
25
+ // This message is attached to an error status when the request failed due to reaching the concurrency limit.
26
+ message ConcurrencyLimitReached {
27
+ // We're not adding any fields for now.
28
+ }
29
+
30
+ // This message is attached to an error status when the request failed due to slow start throttling being applied.
31
+ // When this error is encountered, the client should retry the request at some point in the future. The new request
32
+ // will either get routed to another address with less throttling or the current address's allow rate has increased.
33
+ message SlowStartThrottle {
34
+ // The address that is being throttled.
35
+ string address = 1;
36
+
37
+ // How long it has been since the worker server was started and entered the ready state.
38
+ google.protobuf.Duration worker_age = 2;
39
+
40
+ // The percentage of requests that are allowed to proceed for this upstream address. This is a float64
41
+ // between 0.0-1.0.
42
+ double allow_rate = 3;
43
+ }
@@ -0,0 +1,20 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.metadata.v1;
4
+
5
+ import "google/protobuf/descriptor.proto";
6
+ import "google/protobuf/duration.proto";
7
+
8
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/metadatapb/v1;metadatapb";
9
+
10
+ extend google.protobuf.MessageOptions {
11
+ MessageMetadata cloudroutines = 78010999;
12
+ }
13
+
14
+ message MessageMetadata {
15
+ bool enabled = 1;
16
+
17
+ google.protobuf.Duration retention_period = 2;
18
+
19
+ int32 partition_qty = 3;
20
+ }
@@ -0,0 +1,148 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.operation.v1;
4
+
5
+ import "google/protobuf/timestamp.proto";
6
+ import "google/rpc/status.proto";
7
+ import "validate/validate.proto";
8
+
9
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/operationpb/v1;operationpb";
10
+
11
+ // RPC Service for working with Cloudroutines operations
12
+ service OperationService {
13
+ // Poll for the status of an operation
14
+ rpc PollOperationStatuses(PollOperationStatusesRequest) returns (PollOperationStatusesResponse) {
15
+ option idempotency_level = NO_SIDE_EFFECTS;
16
+ }
17
+
18
+ // Update the status of an operation
19
+ rpc PublishOperationStatus(PublishOperationStatusRequest) returns (PublishOperationStatusResponse) {
20
+ option idempotency_level = IDEMPOTENT;
21
+ }
22
+
23
+ // Mark an operation as skippable
24
+ rpc SkipOperation(SkipOperationRequest) returns (SkipOperationResponse) {
25
+ option idempotency_level = IDEMPOTENT;
26
+ }
27
+ }
28
+
29
+ // Request payload to check the statuses of Cloudroutines operations
30
+ // This request requires the operationURI, consisting of the fully qualified Cloudroutines callback name and event ID.
31
+ // If the retention period has elapsed, then requesting a previously valid operation URI will result in
32
+ // a NOT_FOUND status code being returned.
33
+ message PollOperationStatusesRequest {
34
+ // A list of operation URIs (RFC 3986) to request statuses for, each formatted as
35
+ // `{worker_pool_namespace}:{callback_name}:{eventID}`.
36
+ repeated string operation_uris = 1 [(validate.rules).repeated = {
37
+ min_items: 1
38
+ max_items: 10
39
+ items: {
40
+ string: {
41
+ uri: true
42
+ min_len: 1
43
+ max_len: 255
44
+ }
45
+ }
46
+ }];
47
+ }
48
+
49
+ // Response payload to check the statuses of Cloudroutines operations
50
+ message PollOperationStatusesResponse {
51
+ // A map of operations by their URI to their statuses
52
+ map<string, Operation> associated_operations = 2;
53
+ }
54
+
55
+ // Request payload to update the status of a Cloudroutines operation
56
+ // This request requires the operationURI, consisting of the fully qualified Cloudroutines callback name and event ID.
57
+ message PublishOperationStatusRequest {
58
+ // The operation URI (RFC 3986), formatted as `{worker_pool_namespace}:{callback_name}:{eventID}`.
59
+ string operation_uri = 1 [(validate.rules).string = {
60
+ uri: true
61
+ min_len: 1
62
+ max_len: 255
63
+ }];
64
+
65
+ // The status to update the operation to, must be a valid status (not UNKNOWN). If the operation on the request
66
+ // is in an initial phase, any provided status value on the request will be ignored and default to UNKNOWN.
67
+ // Terminal statuses (any status that is not UNKNOWN) cannot be overwritten.
68
+ google.rpc.Status status = 3;
69
+
70
+ // The phase corresponding to the status update for the operation.
71
+ Phase phase = 4 [(validate.rules).enum = {
72
+ not_in: [0]
73
+ }];
74
+ }
75
+
76
+ // Response payload to update the status of a Cloudroutines operation
77
+ message PublishOperationStatusResponse {}
78
+
79
+ // Request payload to mark an operation as skippable
80
+ message SkipOperationRequest {
81
+ // The operation URI (RFC 3986), formatted as `{worker_pool_namespace}:{callback_name}:{eventID}`.
82
+ string operation_uri = 1 [(validate.rules).string = {
83
+ uri: true
84
+ min_len: 1
85
+ max_len: 255
86
+ }];
87
+ }
88
+
89
+ // Response payload to mark an operation as skippable
90
+ message SkipOperationResponse {}
91
+
92
+ // Phase represents the individual states a Cloudroutines operation can be in throughout its lifecycle.
93
+ enum Phase {
94
+ // No status was specified or set.
95
+ PHASE_UNSPECIFIED = 0;
96
+
97
+ // An operation is acknowledged by the system but not processed yet. The initial state.
98
+ PHASE_PENDING = 1;
99
+
100
+ // An operation has started processing within the system. Used only for long running operations.
101
+ PHASE_IN_PROGRESS = 2;
102
+
103
+ // An operation has successfully processed or ended up in an unrecoverable error state.
104
+ PHASE_COMPLETED = 3;
105
+ }
106
+
107
+ // An operation represents the Cloudroutines operation state including its current status.
108
+ // Due to the nature of Cloudroutines, these are eventually consistent values.
109
+ message Operation {
110
+ // The operation URI, formatted as `{worker_pool_namespace}:{callback_name}:{eventID}`.
111
+ // TODO: fix regex
112
+ string uri = 1 [(validate.rules).string = {
113
+ // pattern: "^[-_.a-zA-Z0-9]+$",
114
+ min_len: 1
115
+ max_len: 255
116
+ }];
117
+
118
+ // The phase of the operation.
119
+ Phase phase = 2;
120
+
121
+ // Operations that complete successfully will have a status `OK`. Any other status code indicates failure.
122
+ // Status will only be populated when phase is completed. If an operation produces any metadata or results,
123
+ // they will be available as additional details on the status.
124
+ google.rpc.Status status = 3;
125
+
126
+ // Time when the operation expires from both the event queue and status store.
127
+ google.protobuf.Timestamp expires_at = 4;
128
+ }
129
+
130
+ // Used internally within Cloudroutines to asynchronously store the given operation
131
+ message StoreOperationStatus {
132
+ // The event ID, a unique identifier for an event.
133
+ string event_id = 1 [(validate.rules).string = {
134
+ min_len: 1
135
+ max_len: 255
136
+ }];
137
+
138
+ // The fully qualified Cloudroutines callback name (worker pool namespace + callback name) that processed the operation.
139
+ string fq_callback_name = 2 [(validate.rules).string = {
140
+ ignore_empty: true
141
+ // pattern: "^[-_.a-zA-Z0-9]+$",
142
+ min_len: 1
143
+ max_len: 255
144
+ }];
145
+
146
+ // The status to update the operation to, must be a valid status (not UNKNOWN).
147
+ google.rpc.Status status = 3;
148
+ }
@@ -0,0 +1,56 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.publication.v1;
4
+
5
+ import "reddit/coreplatform/events/cloudevent/v1/cloudevent.proto";
6
+ import "validate/validate.proto";
7
+
8
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/publicationpb/v1;publicationpb";
9
+
10
+ service PublicationService {
11
+ // PublishEvent makes an RPC call to the Cloudroutines server to publish the given event at the given topic
12
+ rpc PublishEvent(PublishEventRequest) returns (PublishEventResponse) {
13
+ option idempotency_level = IDEMPOTENT;
14
+ }
15
+ }
16
+
17
+ // Request message containing the event to be published by the user's registered callback
18
+ // Server performs checks to ensure the request parameters are valid:
19
+ // event type and topic are compatible, key is provided if topic is ordered
20
+ message PublishEventRequest {
21
+ // CloudEvent compliant event payload
22
+ reddit.coreplatform.events.cloudevent.v1.CloudEvent event = 1 [(validate.rules).message.required = true];
23
+
24
+ // Topic to publish the given event to. Typically, this is the fully qualified message name of the event.
25
+ string topic = 2 [(validate.rules).string = {
26
+ pattern: "^[-_.a-zA-Z0-9]+$"
27
+ min_len: 1
28
+ max_len: 255
29
+ }];
30
+
31
+ // Value to use as the partition key (used for ordering) when publishing the given event.
32
+ // Must be set to guarantee ordering within a partition, if no ordering is necessary, then pass the event ID.
33
+ string partition_key = 3 [(validate.rules).string = {
34
+ pattern: "^[-_.a-zA-Z0-9]+$"
35
+ min_len: 1
36
+ max_len: 255
37
+ }];
38
+ }
39
+
40
+ // Response message containing the published event details
41
+ message PublishEventResponse {
42
+ // Event detail information
43
+ EventDetails event_details = 1;
44
+ }
45
+
46
+ // Message containing the published event details
47
+ message EventDetails {
48
+ // The event ID, a unique identifier of the event published
49
+ string event_id = 1;
50
+
51
+ // The topic the event was published to
52
+ string topic = 2;
53
+
54
+ // The partition key the event was published to
55
+ string partition_key = 3;
56
+ }
@@ -0,0 +1,17 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.scheduling.v1;
4
+
5
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/schedulingpb/v1;schedulingpb";
6
+
7
+ message DataSourceOffsetRange {
8
+ message KafkaTopicOffsetRange {
9
+ string topic = 1;
10
+ map<int32, int64> start_offset_by_partition = 2;
11
+ map<int32, int64> end_offset_by_partition = 3;
12
+ }
13
+
14
+ oneof offset_range {
15
+ KafkaTopicOffsetRange kafka_topic_offset_range = 1;
16
+ }
17
+ }
@@ -0,0 +1,25 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.scheduling.v1;
4
+
5
+ import "reddit/coreplatform/events/cloudevent/v1/cloudevent.proto";
6
+
7
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/schedulingpb/v1;schedulingpb";
8
+
9
+ service SchedulerService {
10
+ rpc ScheduleTask(ScheduleTaskRequest) returns (ScheduleTaskResponse) {
11
+ option idempotency_level = IDEMPOTENT;
12
+ }
13
+ }
14
+
15
+ message TaskReference {
16
+ string id = 1;
17
+ }
18
+
19
+ message ScheduleTaskRequest {
20
+ reddit.coreplatform.events.cloudevent.v1.CloudEvent event = 1;
21
+ }
22
+
23
+ message ScheduleTaskResponse {
24
+ TaskReference task_reference = 1;
25
+ }
@@ -0,0 +1,72 @@
1
+ syntax = "proto3";
2
+
3
+ package reddit.coreplatform.cloudroutines.testing.v1;
4
+
5
+ import "reddit/coreplatform/cloudroutines/event/v1/event.proto";
6
+
7
+ option go_package = "github.snooguts.net/reddit/cloudroutines/go/testingpb/v1;testingpb";
8
+
9
+ // Request message containing the event to be processed by the user's registered event handler
10
+ message ExampleEvent {
11
+ string user_id = 1 [(reddit.coreplatform.cloudroutines.event.v1.field) = {sharding_identifier: true}];
12
+
13
+ string subreddit_id = 2;
14
+ }
15
+
16
+ message ExampleTask {
17
+ string user_id = 1 [(reddit.coreplatform.cloudroutines.event.v1.field) = {sharding_identifier: true}];
18
+
19
+ string greeting = 2;
20
+ }
21
+
22
+ message ExampleCustomizedEvent {
23
+ option (reddit.coreplatform.cloudroutines.event.v1.message) = {
24
+ default_topic: "my_existing_topic"
25
+ publication_source_whitelist: [
26
+ {
27
+ deployment_reference: {
28
+ name: "reddit-service-cloudroutines-dummy-client"
29
+ namespace: "reddit-service-cloudroutines"
30
+ }
31
+ }
32
+ ]
33
+ };
34
+
35
+ string subreddit_id = 1 [(reddit.coreplatform.cloudroutines.event.v1.field) = {sharding_identifier: true}];
36
+ }
37
+
38
+ message ExampleCustomizedNestedEvent {
39
+ message Relation {
40
+ string relation_id = 1;
41
+ RelationDetails details = 2;
42
+ }
43
+
44
+ message RelationDetails {
45
+ string thing1_id = 1; // this is the sharding identifier
46
+ string thing2_id = 2;
47
+ int64 int64_field = 3;
48
+ repeated string repeated_string_field = 4;
49
+ float float_field = 5;
50
+ }
51
+
52
+ string event_id = 1;
53
+
54
+ Relation relation = 2 [(reddit.coreplatform.cloudroutines.event.v1.field) = {
55
+ sharding_identifier: true
56
+ sharding_field_path: "details.thing1_id"
57
+ }];
58
+
59
+ int64 int64_field = 3;
60
+ repeated string repeated_string_field = 4;
61
+ float float_field = 5;
62
+ }
63
+
64
+ message ExampleDLQEvent {
65
+ string user_id = 1 [(reddit.coreplatform.cloudroutines.event.v1.field) = {sharding_identifier: true}];
66
+
67
+ string subreddit_id = 2;
68
+ }
69
+
70
+ message ExampleResult {
71
+ string greeting = 1;
72
+ }