grpc 0.13.0 → 0.13.1.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (155) hide show
  1. checksums.yaml +4 -4
  2. data/.yardopts +1 -0
  3. data/Makefile +1114 -937
  4. data/include/grpc/census.h +71 -89
  5. data/include/grpc/compression.h +7 -7
  6. data/include/grpc/grpc.h +65 -68
  7. data/include/grpc/grpc_security.h +38 -38
  8. data/include/grpc/impl/codegen/alloc.h +7 -7
  9. data/include/grpc/impl/codegen/byte_buffer.h +13 -13
  10. data/include/grpc/impl/codegen/grpc_types.h +7 -2
  11. data/include/grpc/impl/codegen/log.h +5 -5
  12. data/include/grpc/impl/codegen/port_platform.h +14 -6
  13. data/include/grpc/impl/codegen/slice.h +15 -15
  14. data/include/grpc/impl/codegen/slice_buffer.h +17 -17
  15. data/include/grpc/impl/codegen/sync.h +26 -22
  16. data/include/grpc/impl/codegen/time.h +22 -24
  17. data/include/grpc/support/avl.h +9 -8
  18. data/include/grpc/support/cmdline.h +12 -12
  19. data/include/grpc/support/cpu.h +2 -2
  20. data/include/grpc/support/histogram.h +22 -22
  21. data/include/grpc/support/host_port.h +2 -2
  22. data/include/grpc/support/log_win32.h +1 -1
  23. data/include/grpc/support/string_util.h +2 -2
  24. data/include/grpc/support/subprocess.h +5 -5
  25. data/include/grpc/support/thd.h +9 -9
  26. data/include/grpc/support/useful.h +3 -1
  27. data/src/core/census/context.c +64 -85
  28. data/src/core/census/grpc_filter.c +2 -2
  29. data/src/core/census/mlog.c +600 -0
  30. data/src/core/census/mlog.h +95 -0
  31. data/src/core/channel/channel_args.c +67 -6
  32. data/src/core/channel/channel_args.h +7 -1
  33. data/src/core/channel/client_channel.c +26 -36
  34. data/src/core/channel/client_uchannel.c +1 -1
  35. data/src/core/channel/http_client_filter.c +2 -2
  36. data/src/core/channel/http_server_filter.c +2 -2
  37. data/src/core/channel/subchannel_call_holder.c +5 -7
  38. data/src/core/client_config/connector.c +3 -2
  39. data/src/core/client_config/connector.h +2 -2
  40. data/src/core/client_config/lb_policies/load_balancer_api.c +163 -0
  41. data/src/core/client_config/lb_policies/load_balancer_api.h +85 -0
  42. data/src/core/client_config/lb_policies/pick_first.c +10 -11
  43. data/src/core/client_config/lb_policies/round_robin.c +7 -8
  44. data/src/core/client_config/lb_policy.c +3 -3
  45. data/src/core/client_config/lb_policy.h +3 -2
  46. data/src/core/client_config/subchannel.c +51 -21
  47. data/src/core/client_config/subchannel.h +15 -6
  48. data/src/core/client_config/subchannel_index.c +261 -0
  49. data/src/core/client_config/subchannel_index.h +77 -0
  50. data/src/core/compression/{algorithm.c → compression_algorithm.c} +0 -0
  51. data/src/core/httpcli/httpcli.c +13 -11
  52. data/src/core/httpcli/httpcli.h +3 -2
  53. data/src/core/httpcli/httpcli_security_connector.c +7 -7
  54. data/src/core/iomgr/fd_posix.c +4 -2
  55. data/src/core/iomgr/iocp_windows.c +10 -6
  56. data/src/core/iomgr/iocp_windows.h +9 -2
  57. data/src/core/iomgr/iomgr.c +18 -2
  58. data/src/core/iomgr/iomgr_internal.h +5 -1
  59. data/src/core/iomgr/pollset.h +9 -10
  60. data/src/core/iomgr/pollset_multipoller_with_epoll.c +1 -0
  61. data/src/core/iomgr/pollset_multipoller_with_poll_posix.c +10 -5
  62. data/src/core/iomgr/pollset_posix.c +30 -35
  63. data/src/core/iomgr/pollset_posix.h +10 -6
  64. data/src/core/iomgr/pollset_set.h +3 -9
  65. data/src/core/iomgr/pollset_set_posix.c +23 -3
  66. data/src/core/iomgr/pollset_set_posix.h +2 -18
  67. data/src/core/iomgr/pollset_set_windows.c +3 -3
  68. data/src/core/iomgr/pollset_set_windows.h +2 -2
  69. data/src/core/iomgr/pollset_windows.c +24 -21
  70. data/src/core/iomgr/pollset_windows.h +1 -5
  71. data/src/core/iomgr/tcp_client_posix.c +7 -5
  72. data/src/core/iomgr/tcp_posix.c +4 -2
  73. data/src/core/iomgr/tcp_server_windows.c +1 -2
  74. data/src/core/iomgr/timer.c +2 -3
  75. data/src/core/iomgr/timer.h +21 -1
  76. data/src/core/iomgr/timer_heap.c +10 -12
  77. data/src/core/iomgr/udp_server.c +5 -4
  78. data/src/core/iomgr/udp_server.h +1 -0
  79. data/src/core/iomgr/workqueue_posix.c +1 -0
  80. data/src/core/iomgr/workqueue_posix.h +3 -1
  81. data/src/core/proto/grpc/lb/v0/load_balancer.pb.c +119 -0
  82. data/src/core/proto/grpc/lb/v0/load_balancer.pb.h +182 -0
  83. data/src/core/security/{base64.c → b64.c} +1 -1
  84. data/src/core/security/{base64.h → b64.h} +1 -1
  85. data/src/core/security/client_auth_filter.c +0 -1
  86. data/src/core/security/credentials.c +12 -5
  87. data/src/core/security/credentials.h +3 -3
  88. data/src/core/security/google_default_credentials.c +24 -19
  89. data/src/core/security/handshake.c +15 -7
  90. data/src/core/security/handshake.h +2 -1
  91. data/src/core/security/json_token.c +1 -1
  92. data/src/core/security/jwt_verifier.c +1 -1
  93. data/src/core/security/security_connector.c +84 -64
  94. data/src/core/security/security_connector.h +42 -22
  95. data/src/core/security/security_context.c +8 -3
  96. data/src/core/security/server_auth_filter.c +2 -2
  97. data/src/core/security/server_secure_chttp2.c +7 -7
  98. data/src/core/support/avl.c +2 -2
  99. data/src/core/support/env_linux.c +17 -0
  100. data/src/core/support/{file.c → load_file.c} +2 -2
  101. data/src/core/support/{file.h → load_file.h} +4 -12
  102. data/src/core/support/sync.c +6 -1
  103. data/src/core/support/time_posix.c +1 -1
  104. data/src/core/{iomgr/timer_internal.h → support/tmpfile.h} +17 -23
  105. data/src/core/support/{file_posix.c → tmpfile_posix.c} +2 -2
  106. data/src/core/support/{file_win32.c → tmpfile_win32.c} +2 -2
  107. data/src/core/surface/alarm.c +3 -2
  108. data/src/core/surface/call.c +102 -52
  109. data/src/core/surface/channel_create.c +1 -1
  110. data/src/core/surface/completion_queue.c +73 -41
  111. data/src/core/surface/init.c +4 -0
  112. data/src/core/surface/lame_client.c +1 -2
  113. data/src/core/surface/secure_channel_create.c +6 -7
  114. data/src/core/surface/server.c +13 -5
  115. data/src/core/surface/validate_metadata.c +1 -1
  116. data/src/core/surface/version.c +1 -1
  117. data/src/core/transport/chttp2/internal.h +22 -10
  118. data/src/core/transport/chttp2/parsing.c +3 -3
  119. data/src/core/transport/chttp2/stream_lists.c +39 -21
  120. data/src/core/transport/chttp2/writing.c +19 -28
  121. data/src/core/transport/chttp2_transport.c +80 -37
  122. data/src/core/transport/metadata.c +8 -0
  123. data/src/core/transport/static_metadata.c +17 -17
  124. data/src/core/transport/static_metadata.h +3 -3
  125. data/src/core/transport/transport.c +2 -1
  126. data/src/core/transport/transport.h +12 -5
  127. data/src/ruby/ext/grpc/extconf.rb +1 -0
  128. data/src/ruby/ext/grpc/rb_call.c +6 -0
  129. data/src/ruby/ext/grpc/rb_call_credentials.c +12 -14
  130. data/src/ruby/ext/grpc/rb_channel.c +8 -14
  131. data/src/ruby/ext/grpc/rb_channel_credentials.c +11 -12
  132. data/src/ruby/ext/grpc/rb_grpc.c +19 -18
  133. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -0
  134. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +8 -2
  135. data/src/ruby/lib/grpc/core/time_consts.rb +2 -2
  136. data/src/ruby/lib/grpc/errors.rb +2 -2
  137. data/src/ruby/lib/grpc/generic/rpc_server.rb +58 -39
  138. data/src/ruby/lib/grpc/version.rb +1 -1
  139. data/src/ruby/pb/README.md +2 -2
  140. data/src/ruby/pb/generate_proto_ruby.sh +2 -2
  141. data/src/ruby/pb/grpc/health/checker.rb +11 -11
  142. data/src/ruby/pb/grpc/health/v1/health.rb +28 -0
  143. data/src/ruby/pb/grpc/health/{v1alpha → v1}/health_services.rb +4 -4
  144. data/src/ruby/spec/client_server_spec.rb +2 -1
  145. data/src/ruby/spec/generic/rpc_server_spec.rb +3 -22
  146. data/src/ruby/spec/pb/health/checker_spec.rb +22 -36
  147. data/third_party/nanopb/pb.h +547 -0
  148. data/third_party/nanopb/pb_common.c +97 -0
  149. data/third_party/nanopb/pb_common.h +42 -0
  150. data/third_party/nanopb/pb_decode.c +1319 -0
  151. data/third_party/nanopb/pb_decode.h +149 -0
  152. data/third_party/nanopb/pb_encode.c +690 -0
  153. data/third_party/nanopb/pb_encode.h +154 -0
  154. metadata +32 -16
  155. data/src/ruby/pb/grpc/health/v1alpha/health.rb +0 -29
@@ -42,7 +42,7 @@ extern "C" {
42
42
  * formatted error message, corresponding to the error messageid.
43
43
  * Use in conjunction with GetLastError() et al.
44
44
  */
45
- GPR_API char *gpr_format_message(int messageid);
45
+ GPRAPI char *gpr_format_message(int messageid);
46
46
 
47
47
  #ifdef __cplusplus
48
48
  }
@@ -42,7 +42,7 @@ extern "C" {
42
42
 
43
43
  /* Returns a copy of src that can be passed to gpr_free().
44
44
  If allocation fails or if src is NULL, returns NULL. */
45
- GPR_API char *gpr_strdup(const char *src);
45
+ GPRAPI char *gpr_strdup(const char *src);
46
46
 
47
47
  /* printf to a newly-allocated string. The set of supported formats may vary
48
48
  between platforms.
@@ -52,7 +52,7 @@ GPR_API char *gpr_strdup(const char *src);
52
52
 
53
53
  On error, returns -1 and sets *strp to NULL. If the format string is bad,
54
54
  the result is undefined. */
55
- GPR_API int gpr_asprintf(char **strp, const char *format, ...);
55
+ GPRAPI int gpr_asprintf(char **strp, const char *format, ...);
56
56
 
57
57
  #ifdef __cplusplus
58
58
  }
@@ -43,14 +43,14 @@ extern "C" {
43
43
  typedef struct gpr_subprocess gpr_subprocess;
44
44
 
45
45
  /* .exe on windows, empty on unices */
46
- GPR_API const char *gpr_subprocess_binary_extension();
46
+ GPRAPI const char *gpr_subprocess_binary_extension();
47
47
 
48
- GPR_API gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
48
+ GPRAPI gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
49
49
  /* if subprocess has not been joined, kill it */
50
- GPR_API void gpr_subprocess_destroy(gpr_subprocess *p);
50
+ GPRAPI void gpr_subprocess_destroy(gpr_subprocess *p);
51
51
  /* returns exit status; can be called at most once */
52
- GPR_API int gpr_subprocess_join(gpr_subprocess *p);
53
- GPR_API void gpr_subprocess_interrupt(gpr_subprocess *p);
52
+ GPRAPI int gpr_subprocess_join(gpr_subprocess *p);
53
+ GPRAPI void gpr_subprocess_interrupt(gpr_subprocess *p);
54
54
 
55
55
  #ifdef __cplusplus
56
56
  } // extern "C"
@@ -59,30 +59,30 @@ typedef struct {
59
59
  in *t, and return true. If there are insufficient resources, return false.
60
60
  If options==NULL, default options are used.
61
61
  The thread is immediately runnable, and exits when (*thd_body)() returns. */
62
- GPR_API int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
63
- const gpr_thd_options *options);
62
+ GPRAPI int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg,
63
+ const gpr_thd_options *options);
64
64
 
65
65
  /* Return a gpr_thd_options struct with all fields set to defaults. */
66
- GPR_API gpr_thd_options gpr_thd_options_default(void);
66
+ GPRAPI gpr_thd_options gpr_thd_options_default(void);
67
67
 
68
68
  /* Set the thread to become detached on startup - this is the default. */
69
- GPR_API void gpr_thd_options_set_detached(gpr_thd_options *options);
69
+ GPRAPI void gpr_thd_options_set_detached(gpr_thd_options *options);
70
70
 
71
71
  /* Set the thread to become joinable - mutually exclusive with detached. */
72
- GPR_API void gpr_thd_options_set_joinable(gpr_thd_options *options);
72
+ GPRAPI void gpr_thd_options_set_joinable(gpr_thd_options *options);
73
73
 
74
74
  /* Returns non-zero if the option detached is set. */
75
- GPR_API int gpr_thd_options_is_detached(const gpr_thd_options *options);
75
+ GPRAPI int gpr_thd_options_is_detached(const gpr_thd_options *options);
76
76
 
77
77
  /* Returns non-zero if the option joinable is set. */
78
- GPR_API int gpr_thd_options_is_joinable(const gpr_thd_options *options);
78
+ GPRAPI int gpr_thd_options_is_joinable(const gpr_thd_options *options);
79
79
 
80
80
  /* Returns the identifier of the current thread. */
81
- GPR_API gpr_thd_id gpr_thd_currentid(void);
81
+ GPRAPI gpr_thd_id gpr_thd_currentid(void);
82
82
 
83
83
  /* Blocks until the specified thread properly terminates.
84
84
  Calling this on a detached thread has unpredictable results. */
85
- GPR_API void gpr_thd_join(gpr_thd_id t);
85
+ GPRAPI void gpr_thd_join(gpr_thd_id t);
86
86
 
87
87
  #ifdef __cplusplus
88
88
  }
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015, Google Inc.
3
+ * Copyright 2015-2016, Google Inc.
4
4
  * All rights reserved.
5
5
  *
6
6
  * Redistribution and use in source and binary forms, with or without
@@ -72,4 +72,6 @@
72
72
  0x0f0f0f0f) % \
73
73
  255)
74
74
 
75
+ #define GPR_ICMP(a, b) ((a) < (b) ? -1 : ((a) > (b) ? 1 : 0))
76
+
75
77
  #endif /* GRPC_SUPPORT_USEFUL_H */
@@ -60,10 +60,10 @@
60
60
  // limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
61
61
  // * Keep all tag information (keys/values/flags) in a single memory buffer,
62
62
  // that can be directly copied to the wire.
63
- // * Binary tags share the same structure as, but are encoded separately from,
64
- // non-binary tags. This is primarily because non-binary tags are far more
65
- // likely to be repeated across multiple RPC calls, so are more efficiently
66
- // cached and compressed in any metadata schemes.
63
+
64
+ // min and max valid chars in tag keys and values. All printable ASCII is OK.
65
+ #define MIN_VALID_TAG_CHAR 32 // ' '
66
+ #define MAX_VALID_TAG_CHAR 126 // '~'
67
67
 
68
68
  // Structure representing a set of tags. Essentially a count of number of tags
69
69
  // present, and pointer to a chunk of memory that contains the per-tag details.
@@ -77,7 +77,7 @@ struct tag_set {
77
77
  char *kvm; // key/value memory. Consists of repeated entries of:
78
78
  // Offset Size Description
79
79
  // 0 1 Key length, including trailing 0. (K)
80
- // 1 1 Value length. (V)
80
+ // 1 1 Value length, including trailing 0 (V)
81
81
  // 2 1 Flags
82
82
  // 3 K Key bytes
83
83
  // 3 + K V Value bytes
@@ -108,19 +108,36 @@ struct raw_tag {
108
108
  #define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
109
109
  #define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
110
110
 
111
- // Primary (external) representation of a context. Composed of 3 underlying
112
- // tag_set structs, one for each of the binary/printable propagated tags, and
113
- // one for everything else. This is to efficiently support tag
114
- // encoding/decoding.
111
+ // Primary representation of a context. Composed of 2 underlying tag_set
112
+ // structs, one each for propagated and local (non-propagated) tags. This is
113
+ // to efficiently support tag encoding/decoding.
114
+ // TODO(aveitch): need to add tracing id's/structure.
115
115
  struct census_context {
116
- struct tag_set tags[3];
116
+ struct tag_set tags[2];
117
117
  census_context_status status;
118
118
  };
119
119
 
120
120
  // Indices into the tags member of census_context
121
121
  #define PROPAGATED_TAGS 0
122
- #define PROPAGATED_BINARY_TAGS 1
123
- #define LOCAL_TAGS 2
122
+ #define LOCAL_TAGS 1
123
+
124
+ // Validate (check all characters are in range and size is less than limit) a
125
+ // key or value string. Returns 0 if the string is invalid, or the length
126
+ // (including terminator) if valid.
127
+ static size_t validate_tag(const char *kv) {
128
+ size_t len = 1;
129
+ char ch;
130
+ while ((ch = *kv++) != 0) {
131
+ if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
132
+ return 0;
133
+ }
134
+ len++;
135
+ }
136
+ if (len > CENSUS_MAX_TAG_KV_LEN) {
137
+ return 0;
138
+ }
139
+ return len;
140
+ }
124
141
 
125
142
  // Extract a raw tag given a pointer (raw) to the tag header. Allow for some
126
143
  // extra bytes in the tag header (see encode/decode functions for usage: this
@@ -166,9 +183,7 @@ static bool context_delete_tag(census_context *context, const census_tag *tag,
166
183
  size_t key_len) {
167
184
  return (
168
185
  tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
169
- tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len) ||
170
- tag_set_delete_tag(&context->tags[PROPAGATED_BINARY_TAGS], tag->key,
171
- key_len));
186
+ tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
172
187
  }
173
188
 
174
189
  // Add a tag to a tag_set. Return true on success, false if the tag could
@@ -176,11 +191,11 @@ static bool context_delete_tag(census_context *context, const census_tag *tag,
176
191
  // not be called if the tag may already exist (in a non-deleted state) in
177
192
  // the tag_set, as that would result in two tags with the same key.
178
193
  static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
179
- size_t key_len) {
194
+ size_t key_len, size_t value_len) {
180
195
  if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
181
196
  return false;
182
197
  }
183
- const size_t tag_size = key_len + tag->value_len + TAG_HEADER_SIZE;
198
+ const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
184
199
  if (tags->kvm_used + tag_size > tags->kvm_size) {
185
200
  // allocate new memory if needed
186
201
  tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
@@ -191,13 +206,12 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
191
206
  }
192
207
  char *kvp = tags->kvm + tags->kvm_used;
193
208
  *kvp++ = (char)key_len;
194
- *kvp++ = (char)tag->value_len;
209
+ *kvp++ = (char)value_len;
195
210
  // ensure reserved flags are not used.
196
- *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS |
197
- CENSUS_TAG_BINARY));
211
+ *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
198
212
  memcpy(kvp, tag->key, key_len);
199
213
  kvp += key_len;
200
- memcpy(kvp, tag->value, tag->value_len);
214
+ memcpy(kvp, tag->value, value_len);
201
215
  tags->kvm_used += tag_size;
202
216
  tags->ntags++;
203
217
  tags->ntags_alloc++;
@@ -207,30 +221,20 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
207
221
  // Add/modify/delete a tag to/in a context. Caller must validate that tag key
208
222
  // etc. are valid.
209
223
  static void context_modify_tag(census_context *context, const census_tag *tag,
210
- size_t key_len) {
224
+ size_t key_len, size_t value_len) {
211
225
  // First delete the tag if it is already present.
212
226
  bool deleted = context_delete_tag(context, tag, key_len);
213
- // Determine if we need to add it back.
214
- bool call_add = tag->value != NULL && tag->value_len != 0;
215
227
  bool added = false;
216
- if (call_add) {
217
- if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
218
- if (CENSUS_TAG_IS_BINARY(tag->flags)) {
219
- added = tag_set_add_tag(&context->tags[PROPAGATED_BINARY_TAGS], tag,
220
- key_len);
221
- } else {
222
- added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len);
223
- }
224
- } else {
225
- added = tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len);
226
- }
228
+ if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
229
+ added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
230
+ value_len);
231
+ } else {
232
+ added =
233
+ tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
227
234
  }
235
+
228
236
  if (deleted) {
229
- if (call_add) {
230
- context->status.n_modified_tags++;
231
- } else {
232
- context->status.n_deleted_tags++;
233
- }
237
+ context->status.n_modified_tags++;
234
238
  } else {
235
239
  if (added) {
236
240
  context->status.n_added_tags++;
@@ -292,8 +296,6 @@ census_context *census_context_create(const census_context *base,
292
296
  memset(context, 0, sizeof(census_context));
293
297
  } else {
294
298
  tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
295
- tag_set_copy(&context->tags[PROPAGATED_BINARY_TAGS],
296
- &base->tags[PROPAGATED_BINARY_TAGS]);
297
299
  tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
298
300
  memset(&context->status, 0, sizeof(context->status));
299
301
  }
@@ -301,22 +303,29 @@ census_context *census_context_create(const census_context *base,
301
303
  // the context to add/replace/delete as required.
302
304
  for (int i = 0; i < ntags; i++) {
303
305
  const census_tag *tag = &tags[i];
304
- size_t key_len = strlen(tag->key) + 1;
305
- // ignore the tag if it is too long/short.
306
- if (key_len != 1 && key_len <= CENSUS_MAX_TAG_KV_LEN &&
307
- tag->value_len <= CENSUS_MAX_TAG_KV_LEN) {
308
- context_modify_tag(context, tag, key_len);
309
- } else {
306
+ size_t key_len = validate_tag(tag->key);
307
+ // ignore the tag if it is invalid or too short.
308
+ if (key_len <= 1) {
310
309
  context->status.n_invalid_tags++;
310
+ } else {
311
+ if (tag->value != NULL) {
312
+ size_t value_len = validate_tag(tag->value);
313
+ if (value_len != 0) {
314
+ context_modify_tag(context, tag, key_len, value_len);
315
+ } else {
316
+ context->status.n_invalid_tags++;
317
+ }
318
+ } else {
319
+ if (context_delete_tag(context, tag, key_len)) {
320
+ context->status.n_deleted_tags++;
321
+ }
322
+ }
311
323
  }
312
324
  }
313
325
  // Remove any deleted tags, update status if needed, and return.
314
326
  tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
315
- tag_set_flatten(&context->tags[PROPAGATED_BINARY_TAGS]);
316
327
  tag_set_flatten(&context->tags[LOCAL_TAGS]);
317
328
  context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
318
- context->status.n_propagated_binary_tags =
319
- context->tags[PROPAGATED_BINARY_TAGS].ntags;
320
329
  context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
321
330
  if (status) {
322
331
  *status = &context->status;
@@ -331,7 +340,6 @@ const census_context_status *census_context_get_status(
331
340
 
332
341
  void census_context_destroy(census_context *context) {
333
342
  gpr_free(context->tags[PROPAGATED_TAGS].kvm);
334
- gpr_free(context->tags[PROPAGATED_BINARY_TAGS].kvm);
335
343
  gpr_free(context->tags[LOCAL_TAGS].kvm);
336
344
  gpr_free(context);
337
345
  }
@@ -343,9 +351,6 @@ void census_context_initialize_iterator(const census_context *context,
343
351
  if (context->tags[PROPAGATED_TAGS].ntags != 0) {
344
352
  iterator->base = PROPAGATED_TAGS;
345
353
  iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
346
- } else if (context->tags[PROPAGATED_BINARY_TAGS].ntags != 0) {
347
- iterator->base = PROPAGATED_BINARY_TAGS;
348
- iterator->kvm = context->tags[PROPAGATED_BINARY_TAGS].kvm;
349
354
  } else if (context->tags[LOCAL_TAGS].ntags != 0) {
350
355
  iterator->base = LOCAL_TAGS;
351
356
  iterator->kvm = context->tags[LOCAL_TAGS].kvm;
@@ -363,7 +368,6 @@ int census_context_next_tag(census_context_iterator *iterator,
363
368
  iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
364
369
  tag->key = raw.key;
365
370
  tag->value = raw.value;
366
- tag->value_len = raw.value_len;
367
371
  tag->flags = raw.flags;
368
372
  if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
369
373
  do {
@@ -388,7 +392,6 @@ static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
388
392
  if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
389
393
  tag->key = raw.key;
390
394
  tag->value = raw.value;
391
- tag->value_len = raw.value_len;
392
395
  tag->flags = raw.flags;
393
396
  return true;
394
397
  }
@@ -403,8 +406,6 @@ int census_context_get_tag(const census_context *context, const char *key,
403
406
  return 0;
404
407
  }
405
408
  if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
406
- tag_set_get_tag(&context->tags[PROPAGATED_BINARY_TAGS], key, key_len,
407
- tag) ||
408
409
  tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
409
410
  return 1;
410
411
  }
@@ -447,21 +448,9 @@ static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
447
448
  return ENCODED_HEADER_SIZE + tags->kvm_used;
448
449
  }
449
450
 
450
- char *census_context_encode(const census_context *context, char *buffer,
451
- size_t buf_size, size_t *print_buf_size,
452
- size_t *bin_buf_size) {
453
- *print_buf_size =
454
- tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
455
- if (*print_buf_size == 0) {
456
- return NULL;
457
- }
458
- char *b_buffer = buffer + *print_buf_size;
459
- *bin_buf_size = tag_set_encode(&context->tags[PROPAGATED_BINARY_TAGS],
460
- b_buffer, buf_size - *print_buf_size);
461
- if (*bin_buf_size == 0) {
462
- return NULL;
463
- }
464
- return b_buffer;
451
+ size_t census_context_encode(const census_context *context, char *buffer,
452
+ size_t buf_size) {
453
+ return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
465
454
  }
466
455
 
467
456
  // Decode a tag set.
@@ -506,8 +495,7 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
506
495
  }
507
496
  }
508
497
 
509
- census_context *census_context_decode(const char *buffer, size_t size,
510
- const char *bin_buffer, size_t bin_size) {
498
+ census_context *census_context_decode(const char *buffer, size_t size) {
511
499
  census_context *context = gpr_malloc(sizeof(census_context));
512
500
  memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
513
501
  if (buffer == NULL) {
@@ -515,16 +503,7 @@ census_context *census_context_decode(const char *buffer, size_t size,
515
503
  } else {
516
504
  tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
517
505
  }
518
- if (bin_buffer == NULL) {
519
- memset(&context->tags[PROPAGATED_BINARY_TAGS], 0, sizeof(struct tag_set));
520
- } else {
521
- tag_set_decode(&context->tags[PROPAGATED_BINARY_TAGS], bin_buffer,
522
- bin_size);
523
- }
524
506
  memset(&context->status, 0, sizeof(context->status));
525
507
  context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
526
- context->status.n_propagated_binary_tags =
527
- context->tags[PROPAGATED_BINARY_TAGS].ntags;
528
- // TODO(aveitch): check that BINARY flag is correct for each type.
529
508
  return context;
530
509
  }
@@ -107,8 +107,8 @@ static void server_mutate_op(grpc_call_element *elem,
107
107
  if (op->recv_initial_metadata) {
108
108
  /* substitute our callback for the op callback */
109
109
  calld->recv_initial_metadata = op->recv_initial_metadata;
110
- calld->on_done_recv = op->on_complete;
111
- op->on_complete = &calld->finish_recv;
110
+ calld->on_done_recv = op->recv_initial_metadata_ready;
111
+ op->recv_initial_metadata_ready = &calld->finish_recv;
112
112
  }
113
113
  }
114
114
 
@@ -0,0 +1,600 @@
1
+ /*
2
+ *
3
+ * Copyright 2015-2016, Google Inc.
4
+ * All rights reserved.
5
+ *
6
+ * Redistribution and use in source and binary forms, with or without
7
+ * modification, are permitted provided that the following conditions are
8
+ * met:
9
+ *
10
+ * * Redistributions of source code must retain the above copyright
11
+ * notice, this list of conditions and the following disclaimer.
12
+ * * Redistributions in binary form must reproduce the above
13
+ * copyright notice, this list of conditions and the following disclaimer
14
+ * in the documentation and/or other materials provided with the
15
+ * distribution.
16
+ * * Neither the name of Google Inc. nor the names of its
17
+ * contributors may be used to endorse or promote products derived from
18
+ * this software without specific prior written permission.
19
+ *
20
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+ *
32
+ */
33
+
34
+ // Implements an efficient in-memory log, optimized for multiple writers and
35
+ // a single reader. Available log space is divided up in blocks of
36
+ // CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following
37
+ // three data structures:
38
+ // - Free blocks (free_block_list)
39
+ // - Blocks with unread data (dirty_block_list)
40
+ // - Blocks currently attached to cores (core_local_blocks[])
41
+ //
42
+ // census_log_start_write() moves a block from core_local_blocks[] to the end of
43
+ // dirty_block_list when block:
44
+ // - is out-of-space OR
45
+ // - has an incomplete record (an incomplete record occurs when a thread calls
46
+ // census_log_start_write() and is context-switched before calling
47
+ // census_log_end_write()
48
+ // So, blocks in dirty_block_list are ordered, from oldest to newest, by the
49
+ // time when block is detached from the core.
50
+ //
51
+ // census_log_read_next() first iterates over dirty_block_list and then
52
+ // core_local_blocks[]. It moves completely read blocks from dirty_block_list
53
+ // to free_block_list. Blocks in core_local_blocks[] are not freed, even when
54
+ // completely read.
55
+ //
56
+ // If the log is configured to discard old records and free_block_list is empty,
57
+ // census_log_start_write() iterates over dirty_block_list to allocate a
58
+ // new block. It moves the oldest available block (no pending read/write) to
59
+ // core_local_blocks[].
60
+ //
61
+ // core_local_block_struct is used to implement a map from core id to the block
62
+ // associated with that core. This mapping is advisory. It is possible that the
63
+ // block returned by this mapping is no longer associated with that core. This
64
+ // mapping is updated, lazily, by census_log_start_write().
65
+ //
66
+ // Locking in block struct:
67
+ //
68
+ // Exclusive g_log.lock must be held before calling any functions operating on
69
+ // block structs except census_log_start_write() and census_log_end_write().
70
+ //
71
+ // Writes to a block are serialized via writer_lock. census_log_start_write()
72
+ // acquires this lock and census_log_end_write() releases it. On failure to
73
+ // acquire the lock, writer allocates a new block for the current core and
74
+ // updates core_local_block accordingly.
75
+ //
76
+ // Simultaneous read and write access is allowed. Readers can safely read up to
77
+ // committed bytes (bytes_committed).
78
+ //
79
+ // reader_lock protects the block, currently being read, from getting recycled.
80
+ // start_read() acquires reader_lock and end_read() releases the lock.
81
+ //
82
+ // Read/write access to a block is disabled via try_disable_access(). It returns
83
+ // with both writer_lock and reader_lock held. These locks are subsequently
84
+ // released by enable_access() to enable access to the block.
85
+ //
86
+ // A note on naming: Most function/struct names are prepended by cl_
87
+ // (shorthand for census_log). Further, functions that manipulate structures
88
+ // include the name of the structure, which will be passed as the first
89
+ // argument. E.g. cl_block_initialize() will initialize a cl_block.
90
+
91
+ #include "src/core/census/mlog.h"
92
+ #include <grpc/support/alloc.h>
93
+ #include <grpc/support/atm.h>
94
+ #include <grpc/support/cpu.h>
95
+ #include <grpc/support/log.h>
96
+ #include <grpc/support/sync.h>
97
+ #include <grpc/support/useful.h>
98
+ #include <stdbool.h>
99
+ #include <string.h>
100
+
101
+ // End of platform specific code
102
+
103
+ typedef struct census_log_block_list_struct {
104
+ struct census_log_block_list_struct* next;
105
+ struct census_log_block_list_struct* prev;
106
+ struct census_log_block* block;
107
+ } cl_block_list_struct;
108
+
109
+ typedef struct census_log_block {
110
+ // Pointer to underlying buffer.
111
+ char* buffer;
112
+ gpr_atm writer_lock;
113
+ gpr_atm reader_lock;
114
+ // Keeps completely written bytes. Declared atomic because accessed
115
+ // simultaneously by reader and writer.
116
+ gpr_atm bytes_committed;
117
+ // Bytes already read.
118
+ size_t bytes_read;
119
+ // Links for list.
120
+ cl_block_list_struct link;
121
+ // We want this structure to be cacheline aligned. We assume the following
122
+ // sizes for the various parts on 32/64bit systems:
123
+ // type 32b size 64b size
124
+ // char* 4 8
125
+ // 3x gpr_atm 12 24
126
+ // size_t 4 8
127
+ // cl_block_list_struct 12 24
128
+ // TOTAL 32 64
129
+ //
130
+ // Depending on the size of our cacheline and the architecture, we
131
+ // selectively add char buffering to this structure. The size is checked
132
+ // via assert in census_log_initialize().
133
+ #if defined(GPR_ARCH_64)
134
+ #define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64)
135
+ #else
136
+ #if defined(GPR_ARCH_32)
137
+ #define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32)
138
+ #else
139
+ #error "Unknown architecture"
140
+ #endif
141
+ #endif
142
+ #if CL_BLOCK_PAD_SIZE > 0
143
+ char padding[CL_BLOCK_PAD_SIZE];
144
+ #endif
145
+ } cl_block;
146
+
147
+ // A list of cl_blocks, doubly-linked through cl_block::link.
148
+ typedef struct census_log_block_list {
149
+ int32_t count; // Number of items in list.
150
+ cl_block_list_struct ht; // head/tail of linked list.
151
+ } cl_block_list;
152
+
153
+ // Cacheline aligned block pointers to avoid false sharing. Block pointer must
154
+ // be initialized via set_block(), before calling other functions
155
+ typedef struct census_log_core_local_block {
156
+ gpr_atm block;
157
+ // Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8
158
+ #if defined(GPR_ARCH_64)
159
+ #define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8)
160
+ #else
161
+ #if defined(GPR_ARCH_32)
162
+ #define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4)
163
+ #else
164
+ #error "Unknown architecture"
165
+ #endif
166
+ #endif
167
+ #if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0
168
+ char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE];
169
+ #endif
170
+ } cl_core_local_block;
171
+
172
+ struct census_log {
173
+ int discard_old_records;
174
+ // Number of cores (aka hardware-contexts)
175
+ unsigned num_cores;
176
+ // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log
177
+ uint32_t num_blocks;
178
+ cl_block* blocks; // Block metadata.
179
+ cl_core_local_block* core_local_blocks; // Keeps core to block mappings.
180
+ gpr_mu lock;
181
+ int initialized; // has log been initialized?
182
+ // Keeps the state of the reader iterator. A value of 0 indicates that
183
+ // iterator has reached the end. census_log_init_reader() resets the value
184
+ // to num_core to restart iteration.
185
+ uint32_t read_iterator_state;
186
+ // Points to the block being read. If non-NULL, the block is locked for
187
+ // reading(block_being_read_->reader_lock is held).
188
+ cl_block* block_being_read;
189
+ char* buffer;
190
+ cl_block_list free_block_list;
191
+ cl_block_list dirty_block_list;
192
+ gpr_atm out_of_space_count;
193
+ };
194
+
195
+ // Single internal log.
196
+ static struct census_log g_log;
197
+
198
+ // Functions that operate on an atomic memory location used as a lock.
199
+
200
+ // Returns non-zero if lock is acquired.
201
+ static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); }
202
+
203
+ static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); }
204
+
205
+ // Functions that operate on cl_core_local_block's.
206
+
207
+ static void cl_core_local_block_set_block(cl_core_local_block* clb,
208
+ cl_block* block) {
209
+ gpr_atm_rel_store(&clb->block, (gpr_atm)block);
210
+ }
211
+
212
+ static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) {
213
+ return (cl_block*)gpr_atm_acq_load(&clb->block);
214
+ }
215
+
216
+ // Functions that operate on cl_block_list_struct's.
217
+
218
+ static void cl_block_list_struct_initialize(cl_block_list_struct* bls,
219
+ cl_block* block) {
220
+ bls->next = bls->prev = bls;
221
+ bls->block = block;
222
+ }
223
+
224
+ // Functions that operate on cl_block_list's.
225
+
226
+ static void cl_block_list_initialize(cl_block_list* list) {
227
+ list->count = 0;
228
+ cl_block_list_struct_initialize(&list->ht, NULL);
229
+ }
230
+
231
+ // Returns head of *this, or NULL if empty.
232
+ static cl_block* cl_block_list_head(cl_block_list* list) {
233
+ return list->ht.next->block;
234
+ }
235
+
236
+ // Insert element *e after *pos.
237
+ static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos,
238
+ cl_block_list_struct* e) {
239
+ list->count++;
240
+ e->next = pos->next;
241
+ e->prev = pos;
242
+ e->next->prev = e;
243
+ e->prev->next = e;
244
+ }
245
+
246
+ // Insert block at the head of the list
247
+ static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) {
248
+ cl_block_list_insert(list, &list->ht, &block->link);
249
+ }
250
+
251
+ // Insert block at the tail of the list.
252
+ static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) {
253
+ cl_block_list_insert(list, list->ht.prev, &block->link);
254
+ }
255
+
256
+ // Removes block *b. Requires *b be in the list.
257
+ static void cl_block_list_remove(cl_block_list* list, cl_block* b) {
258
+ list->count--;
259
+ b->link.next->prev = b->link.prev;
260
+ b->link.prev->next = b->link.next;
261
+ }
262
+
263
+ // Functions that operate on cl_block's
264
+
265
+ static void cl_block_initialize(cl_block* block, char* buffer) {
266
+ block->buffer = buffer;
267
+ gpr_atm_rel_store(&block->writer_lock, 0);
268
+ gpr_atm_rel_store(&block->reader_lock, 0);
269
+ gpr_atm_rel_store(&block->bytes_committed, 0);
270
+ block->bytes_read = 0;
271
+ cl_block_list_struct_initialize(&block->link, block);
272
+ }
273
+
274
+ // Guards against exposing partially written buffer to the reader.
275
+ static void cl_block_set_bytes_committed(cl_block* block,
276
+ size_t bytes_committed) {
277
+ gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed);
278
+ }
279
+
280
+ static size_t cl_block_get_bytes_committed(cl_block* block) {
281
+ return (size_t)gpr_atm_acq_load(&block->bytes_committed);
282
+ }
283
+
284
+ // Tries to disable future read/write access to this block. Succeeds if:
285
+ // - no in-progress write AND
286
+ // - no in-progress read AND
287
+ // - 'discard_data' set to true OR no unread data
288
+ // On success, clears the block state and returns with writer_lock_ and
289
+ // reader_lock_ held. These locks are released by a subsequent
290
+ // cl_block_access_enable() call.
291
+ static bool cl_block_try_disable_access(cl_block* block, int discard_data) {
292
+ if (!cl_try_lock(&block->writer_lock)) {
293
+ return false;
294
+ }
295
+ if (!cl_try_lock(&block->reader_lock)) {
296
+ cl_unlock(&block->writer_lock);
297
+ return false;
298
+ }
299
+ if (!discard_data &&
300
+ (block->bytes_read != cl_block_get_bytes_committed(block))) {
301
+ cl_unlock(&block->reader_lock);
302
+ cl_unlock(&block->writer_lock);
303
+ return false;
304
+ }
305
+ cl_block_set_bytes_committed(block, 0);
306
+ block->bytes_read = 0;
307
+ return true;
308
+ }
309
+
310
+ static void cl_block_enable_access(cl_block* block) {
311
+ cl_unlock(&block->reader_lock);
312
+ cl_unlock(&block->writer_lock);
313
+ }
314
+
315
+ // Returns with writer_lock held.
316
+ static void* cl_block_start_write(cl_block* block, size_t size) {
317
+ if (!cl_try_lock(&block->writer_lock)) {
318
+ return NULL;
319
+ }
320
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
321
+ if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) {
322
+ cl_unlock(&block->writer_lock);
323
+ return NULL;
324
+ }
325
+ return block->buffer + bytes_committed;
326
+ }
327
+
328
+ // Releases writer_lock and increments committed bytes by 'bytes_written'.
329
+ // 'bytes_written' must be <= 'size' specified in the corresponding
330
+ // StartWrite() call. This function is thread-safe.
331
+ static void cl_block_end_write(cl_block* block, size_t bytes_written) {
332
+ cl_block_set_bytes_committed(
333
+ block, cl_block_get_bytes_committed(block) + bytes_written);
334
+ cl_unlock(&block->writer_lock);
335
+ }
336
+
337
+ // Returns a pointer to the first unread byte in buffer. The number of bytes
338
+ // available are returned in 'bytes_available'. Acquires reader lock that is
339
+ // released by a subsequent cl_block_end_read() call. Returns NULL if:
340
+ // - read in progress
341
+ // - no data available
342
+ static void* cl_block_start_read(cl_block* block, size_t* bytes_available) {
343
+ if (!cl_try_lock(&block->reader_lock)) {
344
+ return NULL;
345
+ }
346
+ // bytes_committed may change from under us. Use bytes_available to update
347
+ // bytes_read below.
348
+ size_t bytes_committed = cl_block_get_bytes_committed(block);
349
+ GPR_ASSERT(bytes_committed >= block->bytes_read);
350
+ *bytes_available = bytes_committed - block->bytes_read;
351
+ if (*bytes_available == 0) {
352
+ cl_unlock(&block->reader_lock);
353
+ return NULL;
354
+ }
355
+ void* record = block->buffer + block->bytes_read;
356
+ block->bytes_read += *bytes_available;
357
+ return record;
358
+ }
359
+
360
+ static void cl_block_end_read(cl_block* block) {
361
+ cl_unlock(&block->reader_lock);
362
+ }
363
+
364
+ // Internal functions operating on g_log
365
+
366
+ // Allocates a new free block (or recycles an available dirty block if log is
367
+ // configured to discard old records). Returns NULL if out-of-space.
368
+ static cl_block* cl_allocate_block(void) {
369
+ cl_block* block = cl_block_list_head(&g_log.free_block_list);
370
+ if (block != NULL) {
371
+ cl_block_list_remove(&g_log.free_block_list, block);
372
+ return block;
373
+ }
374
+ if (!g_log.discard_old_records) {
375
+ // No free block and log is configured to keep old records.
376
+ return NULL;
377
+ }
378
+ // Recycle dirty block. Start from the oldest.
379
+ for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL;
380
+ block = block->link.next->block) {
381
+ if (cl_block_try_disable_access(block, 1 /* discard data */)) {
382
+ cl_block_list_remove(&g_log.dirty_block_list, block);
383
+ return block;
384
+ }
385
+ }
386
+ return NULL;
387
+ }
388
+
389
+ // Allocates a new block and updates core id => block mapping. 'old_block'
390
+ // points to the block that the caller thinks is attached to
391
+ // 'core_id'. 'old_block' may be NULL. Returns true if:
392
+ // - allocated a new block OR
393
+ // - 'core_id' => 'old_block' mapping changed (another thread allocated a
394
+ // block before lock was acquired).
395
+ static bool cl_allocate_core_local_block(uint32_t core_id,
396
+ cl_block* old_block) {
397
+ // Now that we have the lock, check if core-local mapping has changed.
398
+ cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id];
399
+ cl_block* block = cl_core_local_block_get_block(core_local_block);
400
+ if ((block != NULL) && (block != old_block)) {
401
+ return true;
402
+ }
403
+ if (block != NULL) {
404
+ cl_core_local_block_set_block(core_local_block, NULL);
405
+ cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
406
+ }
407
+ block = cl_allocate_block();
408
+ if (block == NULL) {
409
+ return false;
410
+ }
411
+ cl_core_local_block_set_block(core_local_block, block);
412
+ cl_block_enable_access(block);
413
+ return true;
414
+ }
415
+
416
+ static cl_block* cl_get_block(void* record) {
417
+ uintptr_t p = (uintptr_t)((char*)record - g_log.buffer);
418
+ uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE;
419
+ return &g_log.blocks[index];
420
+ }
421
+
422
+ // Gets the next block to read and tries to free 'prev' block (if not NULL).
423
+ // Returns NULL if reached the end.
424
+ static cl_block* cl_next_block_to_read(cl_block* prev) {
425
+ cl_block* block = NULL;
426
+ if (g_log.read_iterator_state == g_log.num_cores) {
427
+ // We are traversing dirty list; find the next dirty block.
428
+ if (prev != NULL) {
429
+ // Try to free the previous block if there is no unread data. This
430
+ // block
431
+ // may have unread data if previously incomplete record completed
432
+ // between
433
+ // read_next() calls.
434
+ block = prev->link.next->block;
435
+ if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
436
+ cl_block_list_remove(&g_log.dirty_block_list, prev);
437
+ cl_block_list_insert_at_head(&g_log.free_block_list, prev);
438
+ }
439
+ } else {
440
+ block = cl_block_list_head(&g_log.dirty_block_list);
441
+ }
442
+ if (block != NULL) {
443
+ return block;
444
+ }
445
+ // We are done with the dirty list; moving on to core-local blocks.
446
+ }
447
+ while (g_log.read_iterator_state > 0) {
448
+ g_log.read_iterator_state--;
449
+ block = cl_core_local_block_get_block(
450
+ &g_log.core_local_blocks[g_log.read_iterator_state]);
451
+ if (block != NULL) {
452
+ return block;
453
+ }
454
+ }
455
+ return NULL;
456
+ }
457
+
458
+ #define CL_LOG_2_MB 20 // 2^20 = 1MB
459
+
460
+ // External functions: primary stats_log interface
461
+ void census_log_initialize(size_t size_in_mb, int discard_old_records) {
462
+ // Check cacheline alignment.
463
+ GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
464
+ GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
465
+ GPR_ASSERT(!g_log.initialized);
466
+ g_log.discard_old_records = discard_old_records;
467
+ g_log.num_cores = gpr_cpu_num_cores();
468
+ // Ensure that we will not get any overflow in calaculating num_blocks
469
+ GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
470
+ GPR_ASSERT(size_in_mb < 1000);
471
+ // Ensure at least 2x as many blocks as there are cores.
472
+ g_log.num_blocks =
473
+ (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
474
+ CENSUS_LOG_2_MAX_RECORD_SIZE);
475
+ gpr_mu_init(&g_log.lock);
476
+ g_log.read_iterator_state = 0;
477
+ g_log.block_being_read = NULL;
478
+ g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
479
+ g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
480
+ memset(g_log.core_local_blocks, 0,
481
+ g_log.num_cores * sizeof(cl_core_local_block));
482
+ g_log.blocks = (cl_block*)gpr_malloc_aligned(
483
+ g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
484
+ memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
485
+ g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
486
+ memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
487
+ cl_block_list_initialize(&g_log.free_block_list);
488
+ cl_block_list_initialize(&g_log.dirty_block_list);
489
+ for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
490
+ cl_block* block = g_log.blocks + i;
491
+ cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
492
+ cl_block_try_disable_access(block, 1 /* discard data */);
493
+ cl_block_list_insert_at_tail(&g_log.free_block_list, block);
494
+ }
495
+ gpr_atm_rel_store(&g_log.out_of_space_count, 0);
496
+ g_log.initialized = 1;
497
+ }
498
+
499
+ void census_log_shutdown(void) {
500
+ GPR_ASSERT(g_log.initialized);
501
+ gpr_mu_destroy(&g_log.lock);
502
+ gpr_free_aligned(g_log.core_local_blocks);
503
+ g_log.core_local_blocks = NULL;
504
+ gpr_free_aligned(g_log.blocks);
505
+ g_log.blocks = NULL;
506
+ gpr_free(g_log.buffer);
507
+ g_log.buffer = NULL;
508
+ g_log.initialized = 0;
509
+ }
510
+
511
+ void* census_log_start_write(size_t size) {
512
+ // Used to bound number of times block allocation is attempted.
513
+ GPR_ASSERT(size > 0);
514
+ GPR_ASSERT(g_log.initialized);
515
+ if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
516
+ return NULL;
517
+ }
518
+ uint32_t attempts_remaining = g_log.num_blocks;
519
+ uint32_t core_id = gpr_cpu_current_cpu();
520
+ do {
521
+ void* record = NULL;
522
+ cl_block* block =
523
+ cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
524
+ if (block && (record = cl_block_start_write(block, size))) {
525
+ return record;
526
+ }
527
+ // Need to allocate a new block. We are here if:
528
+ // - No block associated with the core OR
529
+ // - Write in-progress on the block OR
530
+ // - block is out of space
531
+ gpr_mu_lock(&g_log.lock);
532
+ bool allocated = cl_allocate_core_local_block(core_id, block);
533
+ gpr_mu_unlock(&g_log.lock);
534
+ if (!allocated) {
535
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
536
+ return NULL;
537
+ }
538
+ } while (attempts_remaining--);
539
+ // Give up.
540
+ gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
541
+ return NULL;
542
+ }
543
+
544
+ void census_log_end_write(void* record, size_t bytes_written) {
545
+ GPR_ASSERT(g_log.initialized);
546
+ cl_block_end_write(cl_get_block(record), bytes_written);
547
+ }
548
+
549
+ void census_log_init_reader(void) {
550
+ GPR_ASSERT(g_log.initialized);
551
+ gpr_mu_lock(&g_log.lock);
552
+ // If a block is locked for reading unlock it.
553
+ if (g_log.block_being_read != NULL) {
554
+ cl_block_end_read(g_log.block_being_read);
555
+ g_log.block_being_read = NULL;
556
+ }
557
+ g_log.read_iterator_state = g_log.num_cores;
558
+ gpr_mu_unlock(&g_log.lock);
559
+ }
560
+
561
+ const void* census_log_read_next(size_t* bytes_available) {
562
+ GPR_ASSERT(g_log.initialized);
563
+ gpr_mu_lock(&g_log.lock);
564
+ if (g_log.block_being_read != NULL) {
565
+ cl_block_end_read(g_log.block_being_read);
566
+ }
567
+ do {
568
+ g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read);
569
+ if (g_log.block_being_read != NULL) {
570
+ void* record =
571
+ cl_block_start_read(g_log.block_being_read, bytes_available);
572
+ if (record != NULL) {
573
+ gpr_mu_unlock(&g_log.lock);
574
+ return record;
575
+ }
576
+ }
577
+ } while (g_log.block_being_read != NULL);
578
+ gpr_mu_unlock(&g_log.lock);
579
+ return NULL;
580
+ }
581
+
582
+ size_t census_log_remaining_space(void) {
583
+ GPR_ASSERT(g_log.initialized);
584
+ size_t space = 0;
585
+ gpr_mu_lock(&g_log.lock);
586
+ if (g_log.discard_old_records) {
587
+ // Remaining space is not meaningful; just return the entire log space.
588
+ space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE;
589
+ } else {
590
+ GPR_ASSERT(g_log.free_block_list.count >= 0);
591
+ space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE;
592
+ }
593
+ gpr_mu_unlock(&g_log.lock);
594
+ return space;
595
+ }
596
+
597
+ int64_t census_log_out_of_space_count(void) {
598
+ GPR_ASSERT(g_log.initialized);
599
+ return gpr_atm_acq_load(&g_log.out_of_space_count);
600
+ }