kanayago 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +12 -0
  3. data/.ruby-version +1 -0
  4. data/README.md +20 -29
  5. data/Rakefile +43 -96
  6. data/ext/kanayago/extconf.rb +6 -0
  7. data/ext/kanayago/id.h +12 -5
  8. data/ext/kanayago/id_table.h +15 -0
  9. data/ext/kanayago/include/ruby/st.h +199 -0
  10. data/ext/kanayago/internal/array.h +3 -0
  11. data/ext/kanayago/internal/basic_operators.h +1 -0
  12. data/ext/kanayago/internal/bignum.h +1 -0
  13. data/ext/kanayago/internal/bits.h +82 -0
  14. data/ext/kanayago/internal/encoding.h +4 -1
  15. data/ext/kanayago/internal/error.h +33 -0
  16. data/ext/kanayago/internal/fixnum.h +1 -0
  17. data/ext/kanayago/internal/gc.h +47 -11
  18. data/ext/kanayago/internal/hash.h +3 -0
  19. data/ext/kanayago/internal/imemo.h +93 -32
  20. data/ext/kanayago/internal/io.h +30 -7
  21. data/ext/kanayago/internal/namespace.h +81 -0
  22. data/ext/kanayago/internal/numeric.h +1 -0
  23. data/ext/kanayago/internal/parse.h +17 -3
  24. data/ext/kanayago/internal/re.h +7 -2
  25. data/ext/kanayago/internal/sanitizers.h +88 -39
  26. data/ext/kanayago/internal/set_table.h +70 -0
  27. data/ext/kanayago/internal/string.h +33 -16
  28. data/ext/kanayago/internal/symbol.h +4 -3
  29. data/ext/kanayago/internal/thread.h +42 -9
  30. data/ext/kanayago/internal/variable.h +13 -11
  31. data/ext/kanayago/internal/vm.h +4 -5
  32. data/ext/kanayago/internal.h +0 -3
  33. data/ext/kanayago/kanayago.c +554 -235
  34. data/ext/kanayago/kanayago.h +5 -0
  35. data/ext/kanayago/literal_node.c +343 -0
  36. data/ext/kanayago/literal_node.h +30 -0
  37. data/ext/kanayago/method.h +18 -2
  38. data/ext/kanayago/node.c +7 -1
  39. data/ext/kanayago/node.h +14 -3
  40. data/ext/kanayago/parse.c +7602 -7156
  41. data/ext/kanayago/parse.h +39 -39
  42. data/ext/kanayago/parser_st.c +2 -1
  43. data/ext/kanayago/pattern_node.c +78 -0
  44. data/ext/kanayago/pattern_node.h +13 -0
  45. data/ext/kanayago/ruby_atomic.h +43 -0
  46. data/ext/kanayago/ruby_parser.c +7 -35
  47. data/ext/kanayago/rubyparser.h +83 -80
  48. data/ext/kanayago/scope_node.c +34 -0
  49. data/ext/kanayago/scope_node.h +8 -0
  50. data/ext/kanayago/shape.h +321 -111
  51. data/ext/kanayago/st.c +905 -21
  52. data/ext/kanayago/statement_node.c +795 -0
  53. data/ext/kanayago/statement_node.h +66 -0
  54. data/ext/kanayago/string_node.c +192 -0
  55. data/ext/kanayago/string_node.h +19 -0
  56. data/ext/kanayago/symbol.h +2 -9
  57. data/ext/kanayago/thread_pthread.h +10 -3
  58. data/ext/kanayago/universal_parser.c +1 -20
  59. data/ext/kanayago/variable_node.c +72 -0
  60. data/ext/kanayago/variable_node.h +12 -0
  61. data/ext/kanayago/vm_core.h +205 -71
  62. data/lib/kanayago/literal_node.rb +87 -0
  63. data/lib/kanayago/pattern_node.rb +19 -0
  64. data/lib/kanayago/statement_node.rb +222 -0
  65. data/lib/kanayago/string_node.rb +43 -0
  66. data/lib/kanayago/variable_node.rb +23 -0
  67. data/lib/kanayago/version.rb +1 -1
  68. data/lib/kanayago.rb +22 -0
  69. data/patch/3.4/copy_target.rb +78 -0
  70. data/patch/3.4/kanayago.patch +162 -0
  71. data/patch/head/copy_target.rb +84 -0
  72. data/patch/head/kanayago.patch +162 -0
  73. data/sample/minitest_generator.rb +266 -0
  74. data/sample/test_generator.rb +272 -0
  75. data/typeprof.conf.json +9 -0
  76. metadata +32 -4
  77. data/ext/kanayago/parse.tmp.y +0 -16145
@@ -56,7 +56,8 @@
56
56
  #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
57
 
58
58
  #if VM_CHECK_MODE > 0
59
- #define VM_ASSERT(/*expr, */...) RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, __VA_ARGS__)
59
+ #define VM_ASSERT(expr, ...) \
60
+ RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
60
61
  #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
61
62
  #define RUBY_ASSERT_CRITICAL_SECTION
62
63
  #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
@@ -69,7 +70,30 @@
69
70
  #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
70
71
 
71
72
  #if defined(RUBY_ASSERT_CRITICAL_SECTION)
72
- // TODO add documentation
73
+ /*
74
+ # Critical Section Assertions
75
+
76
+ These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77
+ such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
+
79
+ The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80
+ may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81
+ held by someone else.
82
+
83
+ These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84
+ is set.
85
+
86
+ ## Example Usage
87
+
88
+ ```c
89
+ RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90
+ // ... some code which does not invoke rb_vm_check_ints() ...
91
+ RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92
+ ```
93
+
94
+ If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95
+ `RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96
+ */
73
97
  extern int ruby_assert_critical_section_entered;
74
98
  #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
75
99
  #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
@@ -94,8 +118,10 @@ extern int ruby_assert_critical_section_entered;
94
118
  #include "internal.h"
95
119
  #include "internal/array.h"
96
120
  #include "internal/basic_operators.h"
121
+ #include "internal/namespace.h"
97
122
  #include "internal/sanitizers.h"
98
123
  #include "internal/serial.h"
124
+ #include "internal/set_table.h"
99
125
  #include "internal/vm.h"
100
126
  #include "method.h"
101
127
  #include "node.h"
@@ -229,14 +255,14 @@ union ic_serial_entry {
229
255
  VALUE data[2];
230
256
  };
231
257
 
258
+ #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
+
232
260
  // imemo_constcache
233
261
  struct iseq_inline_constant_cache_entry {
234
262
  VALUE flags;
235
263
 
236
- VALUE value; // v0
237
- VALUE _unused1; // v1
238
- VALUE _unused2; // v2
239
- const rb_cref_t *ic_cref; // v3
264
+ VALUE value;
265
+ const rb_cref_t *ic_cref;
240
266
  };
241
267
  STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
242
268
  (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
@@ -260,7 +286,7 @@ struct iseq_inline_constant_cache {
260
286
  };
261
287
 
262
288
  struct iseq_inline_iv_cache_entry {
263
- uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
289
+ uint64_t value; // dest_shape_id in former half, attr_index in latter half
264
290
  ID iv_set_name;
265
291
  };
266
292
 
@@ -339,8 +365,6 @@ pathobj_realpath(VALUE pathobj)
339
365
  }
340
366
 
341
367
  /* Forward declarations */
342
- struct rb_rjit_unit;
343
-
344
368
  typedef uintptr_t iseq_bits_t;
345
369
 
346
370
  #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
@@ -369,6 +393,8 @@ enum rb_builtin_attr {
369
393
  BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
370
394
  // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
371
395
  BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396
+ // The iseq acts like a C method in backtraces.
397
+ BUILTIN_ATTR_C_TRACE = 0x08,
372
398
  };
373
399
 
374
400
  typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
@@ -469,6 +495,12 @@ struct rb_iseq_constant_body {
469
495
 
470
496
  const ID *local_table; /* must free */
471
497
 
498
+ enum lvar_state {
499
+ lvar_uninitialized,
500
+ lvar_initialized,
501
+ lvar_reassigned,
502
+ } *lvar_states;
503
+
472
504
  /* catch table */
473
505
  struct iseq_catch_table *catch_table;
474
506
 
@@ -508,31 +540,28 @@ struct rb_iseq_constant_body {
508
540
 
509
541
  const rb_iseq_t *mandatory_only_iseq;
510
542
 
511
- #if USE_RJIT || USE_YJIT
543
+ #if USE_YJIT || USE_ZJIT
512
544
  // Function pointer for JIT code on jit_exec()
513
545
  rb_jit_func_t jit_entry;
514
546
  // Number of calls on jit_exec()
515
547
  long unsigned jit_entry_calls;
516
- #endif
517
-
518
- #if USE_YJIT
519
548
  // Function pointer for JIT code on jit_exec_exception()
520
549
  rb_jit_func_t jit_exception;
521
550
  // Number of calls on jit_exec_exception()
522
551
  long unsigned jit_exception_calls;
523
552
  #endif
524
553
 
525
- #if USE_RJIT
526
- // RJIT stores some data on each iseq.
527
- VALUE rjit_blocks;
528
- #endif
529
-
530
554
  #if USE_YJIT
531
555
  // YJIT stores some data on each iseq.
532
556
  void *yjit_payload;
533
557
  // Used to estimate how frequently this ISEQ gets called
534
558
  uint64_t yjit_calls_at_interv;
535
559
  #endif
560
+
561
+ #if USE_ZJIT
562
+ // ZJIT stores some data on each iseq.
563
+ void *zjit_payload;
564
+ #endif
536
565
  };
537
566
 
538
567
  /* T_IMEMO/iseq */
@@ -578,6 +607,12 @@ rb_iseq_check(const rb_iseq_t *iseq)
578
607
  return iseq;
579
608
  }
580
609
 
610
+ static inline bool
611
+ rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
612
+ {
613
+ return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
614
+ }
615
+
581
616
  static inline const rb_iseq_t *
582
617
  def_iseq_ptr(rb_method_definition_t *def)
583
618
  {
@@ -651,12 +686,15 @@ typedef struct rb_vm_struct {
651
686
  bool terminate_waiting;
652
687
 
653
688
  #ifndef RUBY_THREAD_PTHREAD_H
689
+ // win32
654
690
  bool barrier_waiting;
655
691
  unsigned int barrier_cnt;
656
- rb_nativethread_cond_t barrier_cond;
692
+ rb_nativethread_cond_t barrier_complete_cond;
693
+ rb_nativethread_cond_t barrier_release_cond;
657
694
  #endif
658
695
  } sync;
659
696
 
697
+ #ifdef RUBY_THREAD_PTHREAD_H
660
698
  // ractor scheduling
661
699
  struct {
662
700
  rb_nativethread_lock_t lock;
@@ -690,7 +728,10 @@ typedef struct rb_vm_struct {
690
728
  bool barrier_waiting;
691
729
  unsigned int barrier_waiting_cnt;
692
730
  unsigned int barrier_serial;
731
+ struct rb_ractor_struct *barrier_ractor;
732
+ unsigned int barrier_lock_rec;
693
733
  } sched;
734
+ #endif
694
735
  } ractor;
695
736
 
696
737
  #ifdef USE_SIGALTSTACK
@@ -698,7 +739,6 @@ typedef struct rb_vm_struct {
698
739
  #endif
699
740
 
700
741
  rb_serial_t fork_gen;
701
- struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
702
742
 
703
743
  /* set in single-threaded processes only: */
704
744
  volatile int ubf_async_safe;
@@ -713,18 +753,11 @@ typedef struct rb_vm_struct {
713
753
  struct global_object_list *global_object_list;
714
754
  const VALUE special_exceptions[ruby_special_error_count];
715
755
 
756
+ /* namespace */
757
+ rb_namespace_t *root_namespace;
758
+ rb_namespace_t *main_namespace;
759
+
716
760
  /* load */
717
- VALUE top_self;
718
- VALUE load_path;
719
- VALUE load_path_snapshot;
720
- VALUE load_path_check_cache;
721
- VALUE expanded_load_path;
722
- VALUE loaded_features;
723
- VALUE loaded_features_snapshot;
724
- VALUE loaded_features_realpaths;
725
- VALUE loaded_features_realpath_map;
726
- struct st_table *loaded_features_index;
727
- struct st_table *loading_table;
728
761
  // For running the init function of statically linked
729
762
  // extensions when they are loaded
730
763
  struct st_table *static_ext_inits;
@@ -747,25 +780,30 @@ typedef struct rb_vm_struct {
747
780
  VALUE coverages, me2counter;
748
781
  int coverage_mode;
749
782
 
750
- struct rb_objspace *objspace;
783
+ struct {
784
+ struct rb_objspace *objspace;
785
+ struct gc_mark_func_data_struct {
786
+ void *data;
787
+ void (*mark_func)(VALUE v, void *data);
788
+ } *mark_func_data;
789
+ } gc;
751
790
 
752
791
  rb_at_exit_list *at_exit;
753
792
 
754
- st_table *frozen_strings;
755
-
756
793
  const struct rb_builtin_function *builtin_function_table;
757
794
 
758
795
  st_table *ci_table;
759
796
  struct rb_id_table *negative_cme_table;
760
797
  st_table *overloaded_cme_table; // cme -> overloaded_cme
761
- st_table *unused_block_warning_table;
762
- bool unused_block_warning_strict;
798
+ set_table *unused_block_warning_table;
799
+ set_table *cc_refinement_table;
763
800
 
764
801
  // This id table contains a mapping from ID to ICs. It does this with ID
765
802
  // keys and nested st_tables as values. The nested tables have ICs as keys
766
803
  // and Qtrue as values. It is used when inline constant caches need to be
767
804
  // invalidated or ISEQs are being freed.
768
805
  struct rb_id_table *constant_cache;
806
+ ID inserting_constant_cache_id;
769
807
 
770
808
  #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
771
809
  #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
@@ -783,9 +821,10 @@ typedef struct rb_vm_struct {
783
821
  size_t fiber_vm_stack_size;
784
822
  size_t fiber_machine_stack_size;
785
823
  } default_params;
786
-
787
824
  } rb_vm_t;
788
825
 
826
+ extern bool ruby_vm_during_cleanup;
827
+
789
828
  /* default values */
790
829
 
791
830
  #define RUBY_VM_SIZE_ALIGN 4096
@@ -1023,6 +1062,11 @@ struct rb_execution_context_struct {
1023
1062
 
1024
1063
  VALUE private_const_reference;
1025
1064
 
1065
+ struct {
1066
+ VALUE obj;
1067
+ VALUE fields_obj;
1068
+ } gen_fields_cache;
1069
+
1026
1070
  /* for GC */
1027
1071
  struct {
1028
1072
  VALUE *stack_start;
@@ -1066,7 +1110,7 @@ typedef struct rb_ractor_struct rb_ractor_t;
1066
1110
  struct rb_native_thread;
1067
1111
 
1068
1112
  typedef struct rb_thread_struct {
1069
- struct ccan_list_node lt_node; // managed by a ractor
1113
+ struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1070
1114
  VALUE self;
1071
1115
  rb_ractor_t *ractor;
1072
1116
  rb_vm_t *vm;
@@ -1117,6 +1161,7 @@ typedef struct rb_thread_struct {
1117
1161
  struct rb_unblock_callback unblock;
1118
1162
  VALUE locking_mutex;
1119
1163
  struct rb_mutex_struct *keeping_mutexes;
1164
+ struct ccan_list_head interrupt_exec_tasks;
1120
1165
 
1121
1166
  struct rb_waiting_list *join_list;
1122
1167
 
@@ -1139,9 +1184,6 @@ typedef struct rb_thread_struct {
1139
1184
  thread_invoke_type_func
1140
1185
  } invoke_type;
1141
1186
 
1142
- /* statistics data for profiler */
1143
- VALUE stat_insn_usage;
1144
-
1145
1187
  /* fiber */
1146
1188
  rb_fiber_t *root_fiber;
1147
1189
 
@@ -1233,8 +1275,8 @@ typedef struct {
1233
1275
 
1234
1276
  RUBY_SYMBOL_EXPORT_BEGIN
1235
1277
  VALUE rb_proc_isolate(VALUE self);
1236
- VALUE rb_proc_isolate_bang(VALUE self);
1237
- VALUE rb_proc_ractor_make_shareable(VALUE self);
1278
+ VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1279
+ VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1238
1280
  RUBY_SYMBOL_EXPORT_END
1239
1281
 
1240
1282
  typedef struct {
@@ -1273,6 +1315,7 @@ enum vm_opt_newarray_send_type {
1273
1315
  VM_OPT_NEWARRAY_SEND_HASH = 3,
1274
1316
  VM_OPT_NEWARRAY_SEND_PACK = 4,
1275
1317
  VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1318
+ VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1276
1319
  };
1277
1320
 
1278
1321
  enum vm_special_object_type {
@@ -1316,11 +1359,11 @@ typedef rb_control_frame_t *
1316
1359
 
1317
1360
  enum vm_frame_env_flags {
1318
1361
  /* Frame/Environment flag bits:
1319
- * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1362
+ * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1320
1363
  *
1321
1364
  * X : tag for GC marking (It seems as Fixnum)
1322
1365
  * EEE : 4 bits Env flags
1323
- * FF..: 7 bits Frame flags
1366
+ * FF..: 8 bits Frame flags
1324
1367
  * MM..: 15 bits frame magic (to check frame corruption)
1325
1368
  */
1326
1369
 
@@ -1343,8 +1386,9 @@ enum vm_frame_env_flags {
1343
1386
  VM_FRAME_FLAG_CFRAME = 0x0080,
1344
1387
  VM_FRAME_FLAG_LAMBDA = 0x0100,
1345
1388
  VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1346
- VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1347
- VM_FRAME_FLAG_PASSED = 0x0800,
1389
+ VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1390
+ VM_FRAME_FLAG_PASSED = 0x0800,
1391
+ VM_FRAME_FLAG_NS_REQUIRE = 0x1000,
1348
1392
 
1349
1393
  /* env flag */
1350
1394
  VM_ENV_FLAG_LOCAL = 0x0002,
@@ -1388,12 +1432,31 @@ VM_ENV_FLAGS(const VALUE *ep, long flag)
1388
1432
  return flags & flag;
1389
1433
  }
1390
1434
 
1435
+ static inline unsigned long
1436
+ VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1437
+ {
1438
+ VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1439
+ return flags & flag;
1440
+ }
1441
+
1442
+ static inline unsigned long
1443
+ VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1444
+ {
1445
+ return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1446
+ }
1447
+
1391
1448
  static inline unsigned long
1392
1449
  VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1393
1450
  {
1394
1451
  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1395
1452
  }
1396
1453
 
1454
+ static inline unsigned long
1455
+ VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1456
+ {
1457
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1458
+ }
1459
+
1397
1460
  static inline int
1398
1461
  VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1399
1462
  {
@@ -1412,6 +1475,12 @@ VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1412
1475
  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1413
1476
  }
1414
1477
 
1478
+ static inline int
1479
+ VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1480
+ {
1481
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1482
+ }
1483
+
1415
1484
  static inline int
1416
1485
  VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1417
1486
  {
@@ -1437,12 +1506,30 @@ VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1437
1506
  return cframe_p;
1438
1507
  }
1439
1508
 
1509
+ static inline int
1510
+ VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1511
+ {
1512
+ return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1513
+ }
1514
+
1440
1515
  static inline int
1441
1516
  VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1442
1517
  {
1443
1518
  return !VM_FRAME_CFRAME_P(cfp);
1444
1519
  }
1445
1520
 
1521
+ static inline int
1522
+ VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1523
+ {
1524
+ return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1525
+ }
1526
+
1527
+ static inline int
1528
+ VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1529
+ {
1530
+ return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_REQUIRE) != 0;
1531
+ }
1532
+
1446
1533
  #define RUBYVM_CFUNC_FRAME_P(cfp) \
1447
1534
  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1448
1535
 
@@ -1455,20 +1542,57 @@ VM_ENV_LOCAL_P(const VALUE *ep)
1455
1542
  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1456
1543
  }
1457
1544
 
1545
+ static inline int
1546
+ VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1547
+ {
1548
+ return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1549
+ }
1550
+
1551
+ static inline const VALUE *
1552
+ VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1553
+ {
1554
+ return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1555
+ }
1556
+
1458
1557
  static inline const VALUE *
1459
1558
  VM_ENV_PREV_EP(const VALUE *ep)
1460
1559
  {
1461
1560
  VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1462
- return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1561
+ return VM_ENV_PREV_EP_UNCHECKED(ep);
1562
+ }
1563
+
1564
+ static inline bool
1565
+ VM_ENV_NAMESPACED_P(const VALUE *ep)
1566
+ {
1567
+ return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1463
1568
  }
1464
1569
 
1465
1570
  static inline VALUE
1466
1571
  VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1467
1572
  {
1573
+ if (VM_ENV_NAMESPACED_P(ep)) {
1574
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1575
+ return VM_BLOCK_HANDLER_NONE;
1576
+ }
1577
+
1468
1578
  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1469
1579
  return ep[VM_ENV_DATA_INDEX_SPECVAL];
1470
1580
  }
1471
1581
 
1582
+ static inline const rb_namespace_t *
1583
+ VM_ENV_NAMESPACE(const VALUE *ep)
1584
+ {
1585
+ VM_ASSERT(VM_ENV_NAMESPACED_P(ep));
1586
+ VM_ASSERT(VM_ENV_LOCAL_P(ep));
1587
+ return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1588
+ }
1589
+
1590
+ static inline const rb_namespace_t *
1591
+ VM_ENV_NAMESPACE_UNCHECKED(const VALUE *ep)
1592
+ {
1593
+ return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1594
+ }
1595
+
1472
1596
  #if VM_CHECK_MODE > 0
1473
1597
  int rb_vm_ep_in_heap_p(const VALUE *ep);
1474
1598
  #endif
@@ -1480,22 +1604,13 @@ VM_ENV_ESCAPED_P(const VALUE *ep)
1480
1604
  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1481
1605
  }
1482
1606
 
1483
- #if VM_CHECK_MODE > 0
1484
- static inline int
1485
- vm_assert_env(VALUE obj)
1486
- {
1487
- VM_ASSERT(imemo_type_p(obj, imemo_env));
1488
- return 1;
1489
- }
1490
- #endif
1491
-
1492
1607
  RBIMPL_ATTR_NONNULL((1))
1493
1608
  static inline VALUE
1494
1609
  VM_ENV_ENVVAL(const VALUE *ep)
1495
1610
  {
1496
1611
  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1497
1612
  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1498
- VM_ASSERT(vm_assert_env(envval));
1613
+ VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1499
1614
  return envval;
1500
1615
  }
1501
1616
 
@@ -1798,7 +1913,7 @@ NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int
1798
1913
 
1799
1914
  /* functions about thread/vm execution */
1800
1915
  RUBY_SYMBOL_EXPORT_BEGIN
1801
- VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1916
+ VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_namespace_t *ns);
1802
1917
  VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1803
1918
  VALUE rb_iseq_path(const rb_iseq_t *iseq);
1804
1919
  VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
@@ -1827,6 +1942,7 @@ rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_blo
1827
1942
 
1828
1943
  VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1829
1944
  VALUE rb_vm_env_local_variables(const rb_env_t *env);
1945
+ VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1830
1946
  const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1831
1947
  const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1832
1948
  void rb_vm_inc_const_missing_count(void);
@@ -1843,10 +1959,11 @@ void rb_thread_wakeup_timer_thread(int);
1843
1959
  static inline void
1844
1960
  rb_vm_living_threads_init(rb_vm_t *vm)
1845
1961
  {
1846
- ccan_list_head_init(&vm->waiting_fds);
1847
1962
  ccan_list_head_init(&vm->workqueue);
1848
1963
  ccan_list_head_init(&vm->ractor.set);
1964
+ #ifdef RUBY_THREAD_PTHREAD_H
1849
1965
  ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1966
+ #endif
1850
1967
  }
1851
1968
 
1852
1969
  typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
@@ -1856,6 +1973,7 @@ VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t
1856
1973
  int rb_vm_get_sourceline(const rb_control_frame_t *);
1857
1974
  void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1858
1975
  void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1976
+ void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1859
1977
  rb_thread_t * ruby_thread_from_native(void);
1860
1978
  int ruby_thread_set_native(rb_thread_t *th);
1861
1979
  int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
@@ -1870,9 +1988,10 @@ void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE
1870
1988
 
1871
1989
  void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1872
1990
 
1873
- void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1991
+ rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1874
1992
 
1875
1993
  const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1994
+ const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
1876
1995
 
1877
1996
  #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1878
1997
 
@@ -1938,14 +2057,16 @@ rb_ec_vm_ptr(const rb_execution_context_t *ec)
1938
2057
  }
1939
2058
  }
1940
2059
 
2060
+ NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2061
+
1941
2062
  static inline rb_execution_context_t *
1942
2063
  rb_current_execution_context(bool expect_ec)
1943
2064
  {
1944
2065
  #ifdef RB_THREAD_LOCAL_SPECIFIER
1945
- #ifdef __APPLE__
1946
- rb_execution_context_t *ec = rb_current_ec();
2066
+ #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2067
+ rb_execution_context_t * volatile ec = rb_current_ec();
1947
2068
  #else
1948
- rb_execution_context_t *ec = ruby_current_ec;
2069
+ rb_execution_context_t * volatile ec = ruby_current_ec;
1949
2070
  #endif
1950
2071
 
1951
2072
  /* On the shared objects, `__tls_get_addr()` is used to access the TLS
@@ -1962,7 +2083,7 @@ rb_current_execution_context(bool expect_ec)
1962
2083
  */
1963
2084
  VM_ASSERT(ec == rb_current_ec_noinline());
1964
2085
  #else
1965
- rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
2086
+ rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
1966
2087
  #endif
1967
2088
  VM_ASSERT(!expect_ec || ec != NULL);
1968
2089
  return ec;
@@ -2011,12 +2132,21 @@ void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2011
2132
  unsigned int recorded_lock_rec,
2012
2133
  unsigned int current_lock_rec);
2013
2134
 
2135
+ /* This technically is a data race, as it's checked without the lock, however we
2136
+ * check against a value only our own thread will write. */
2137
+ NO_SANITIZE("thread", static inline bool
2138
+ vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2139
+ {
2140
+ VM_ASSERT(cr == GET_RACTOR());
2141
+ return vm->ractor.sync.lock_owner == cr;
2142
+ }
2143
+
2014
2144
  static inline unsigned int
2015
2145
  rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2016
2146
  {
2017
2147
  rb_vm_t *vm = rb_ec_vm_ptr(ec);
2018
2148
 
2019
- if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2149
+ if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2020
2150
  return 0;
2021
2151
  }
2022
2152
  else {
@@ -2043,8 +2173,12 @@ enum {
2043
2173
  #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2044
2174
  #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2045
2175
  #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2046
- #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2047
- (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2176
+
2177
+ static inline bool
2178
+ RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2179
+ {
2180
+ return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2181
+ }
2048
2182
 
2049
2183
  static inline bool
2050
2184
  RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
@@ -2057,7 +2191,7 @@ RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2057
2191
  RUBY_VM_SET_TIMER_INTERRUPT(ec);
2058
2192
  }
2059
2193
  #endif
2060
- return ec->interrupt_flag & ~(ec)->interrupt_mask;
2194
+ return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2061
2195
  }
2062
2196
 
2063
2197
  VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
@@ -2091,7 +2225,7 @@ rb_vm_check_ints(rb_execution_context_t *ec)
2091
2225
  VM_ASSERT(ruby_assert_critical_section_entered == 0);
2092
2226
  #endif
2093
2227
 
2094
- VM_ASSERT(ec == GET_EC());
2228
+ VM_ASSERT(ec == rb_current_ec_noinline());
2095
2229
 
2096
2230
  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2097
2231
  rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
@@ -2118,7 +2252,7 @@ struct rb_trace_arg_struct {
2118
2252
  };
2119
2253
 
2120
2254
  void rb_hook_list_mark(rb_hook_list_t *hooks);
2121
- void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2255
+ void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2122
2256
  void rb_hook_list_free(rb_hook_list_t *hooks);
2123
2257
  void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2124
2258
  void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);