slidge-whatsapp 0.3.0b0__cp313-cp313-manylinux_2_36_aarch64.whl → 0.3.4__cp313-cp313-manylinux_2_36_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of slidge-whatsapp might be problematic. Click here for more details.

Files changed (167) hide show
  1. slidge_whatsapp/contact.py +2 -0
  2. slidge_whatsapp/event.go +72 -22
  3. slidge_whatsapp/generated/_whatsapp.cpython-313-aarch64-linux-gnu.h +206 -206
  4. slidge_whatsapp/generated/_whatsapp.cpython-313-aarch64-linux-gnu.so +0 -0
  5. slidge_whatsapp/generated/build.py +166 -166
  6. slidge_whatsapp/generated/go.py +1 -1
  7. slidge_whatsapp/generated/whatsapp.c +1557 -1557
  8. slidge_whatsapp/generated/whatsapp.go +1227 -1227
  9. slidge_whatsapp/generated/whatsapp.py +1382 -1382
  10. slidge_whatsapp/generated/whatsapp_go.h +206 -206
  11. slidge_whatsapp/go.mod +11 -11
  12. slidge_whatsapp/go.sum +26 -26
  13. slidge_whatsapp/session.go +4 -4
  14. slidge_whatsapp/vendor/github.com/ebitengine/purego/README.md +21 -5
  15. slidge_whatsapp/vendor/github.com/ebitengine/purego/abi_loong64.h +60 -0
  16. slidge_whatsapp/vendor/github.com/ebitengine/purego/cgo.go +1 -1
  17. slidge_whatsapp/vendor/github.com/ebitengine/purego/dlerror.go +1 -1
  18. slidge_whatsapp/vendor/github.com/ebitengine/purego/dlfcn.go +1 -1
  19. slidge_whatsapp/vendor/github.com/ebitengine/purego/dlfcn_netbsd.go +15 -0
  20. slidge_whatsapp/vendor/github.com/ebitengine/purego/dlfcn_nocgo_netbsd.go +9 -0
  21. slidge_whatsapp/vendor/github.com/ebitengine/purego/dlfcn_stubs.s +1 -1
  22. slidge_whatsapp/vendor/github.com/ebitengine/purego/func.go +113 -60
  23. slidge_whatsapp/vendor/github.com/ebitengine/purego/gen.go +6 -0
  24. slidge_whatsapp/vendor/github.com/ebitengine/purego/go_runtime.go +1 -1
  25. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go +2 -2
  26. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go +2 -2
  27. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_loong64.h +60 -0
  28. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_loong64.s +40 -0
  29. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go +1 -1
  30. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go +1 -1
  31. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go +1 -1
  32. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_loong64.go +92 -0
  33. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/go_netbsd.go +106 -0
  34. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go +1 -1
  35. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go +1 -1
  36. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go +1 -1
  37. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go +1 -1
  38. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go +4 -0
  39. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go +4 -0
  40. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go +4 -0
  41. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_netbsd.go +26 -0
  42. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/netbsd.go +23 -0
  43. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go +1 -1
  44. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go +11 -1
  45. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go +1 -0
  46. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go +1 -0
  47. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go +1 -0
  48. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_netbsd.go +30 -0
  49. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_loong64.s +71 -0
  50. slidge_whatsapp/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s +5 -1
  51. slidge_whatsapp/vendor/github.com/ebitengine/purego/nocgo.go +1 -1
  52. slidge_whatsapp/vendor/github.com/ebitengine/purego/struct_amd64.go +8 -4
  53. slidge_whatsapp/vendor/github.com/ebitengine/purego/struct_arm64.go +16 -6
  54. slidge_whatsapp/vendor/github.com/ebitengine/purego/struct_loong64.go +190 -0
  55. slidge_whatsapp/vendor/github.com/ebitengine/purego/struct_other.go +6 -2
  56. slidge_whatsapp/vendor/github.com/ebitengine/purego/sys_amd64.s +1 -1
  57. slidge_whatsapp/vendor/github.com/ebitengine/purego/sys_arm64.s +1 -1
  58. slidge_whatsapp/vendor/github.com/ebitengine/purego/sys_loong64.s +96 -0
  59. slidge_whatsapp/vendor/github.com/ebitengine/purego/sys_unix_arm64.s +1 -1
  60. slidge_whatsapp/vendor/github.com/ebitengine/purego/sys_unix_loong64.s +75 -0
  61. slidge_whatsapp/vendor/github.com/ebitengine/purego/syscall.go +6 -3
  62. slidge_whatsapp/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go +3 -3
  63. slidge_whatsapp/vendor/github.com/ebitengine/purego/syscall_sysv.go +13 -10
  64. slidge_whatsapp/vendor/github.com/ebitengine/purego/syscall_windows.go +1 -1
  65. slidge_whatsapp/vendor/github.com/ebitengine/purego/zcallback_amd64.s +2002 -2002
  66. slidge_whatsapp/vendor/github.com/ebitengine/purego/zcallback_arm64.s +4002 -4002
  67. slidge_whatsapp/vendor/github.com/ebitengine/purego/zcallback_loong64.s +4014 -0
  68. slidge_whatsapp/vendor/go.mau.fi/libsignal/session/SessionCipher.go +7 -2
  69. slidge_whatsapp/vendor/go.mau.fi/util/dbutil/log.go +1 -0
  70. slidge_whatsapp/vendor/go.mau.fi/util/dbutil/module.go +119 -0
  71. slidge_whatsapp/vendor/go.mau.fi/util/dbutil/upgradetable.go +3 -34
  72. slidge_whatsapp/vendor/go.mau.fi/util/exbytes/string.go +20 -0
  73. slidge_whatsapp/vendor/go.mau.fi/util/exbytes/writer.go +78 -0
  74. slidge_whatsapp/vendor/go.mau.fi/util/exslices/cast.go +42 -0
  75. slidge_whatsapp/vendor/go.mau.fi/util/exslices/chunk.go +28 -0
  76. slidge_whatsapp/vendor/go.mau.fi/util/exslices/deduplicate.go +67 -0
  77. slidge_whatsapp/vendor/go.mau.fi/util/exslices/diff.go +63 -0
  78. slidge_whatsapp/vendor/go.mau.fi/util/exsync/event.go +15 -1
  79. slidge_whatsapp/vendor/go.mau.fi/util/exsync/syncmap.go +48 -7
  80. slidge_whatsapp/vendor/go.mau.fi/util/exsync/syncset.go +13 -0
  81. slidge_whatsapp/vendor/go.mau.fi/util/jsontime/helpers.go +16 -5
  82. slidge_whatsapp/vendor/go.mau.fi/util/jsontime/integer.go +27 -12
  83. slidge_whatsapp/vendor/go.mau.fi/util/random/string.go +47 -7
  84. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/appstate/decode.go +1 -0
  85. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/appstate/encode.go +60 -15
  86. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/appstate/hash.go +1 -0
  87. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/appstate.go +20 -2
  88. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/armadillomessage.go +2 -2
  89. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/call.go +6 -0
  90. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/errors.go +1 -0
  91. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/group.go +63 -42
  92. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/internals.go +31 -15
  93. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/message.go +77 -26
  94. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/msgsecret.go +23 -0
  95. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/notification.go +5 -1
  96. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/pair.go +22 -23
  97. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/prekeys.go +21 -0
  98. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waAICommon/WAAICommon.pb.go +7747 -0
  99. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/{waBotMetadata/WABotMetadata.proto → waAICommon/WAAICommon.proto} +269 -9
  100. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waDeviceCapabilities/WAProtobufsDeviceCapabilities.pb.go +128 -14
  101. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waDeviceCapabilities/WAProtobufsDeviceCapabilities.proto +10 -0
  102. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waE2E/WAWebProtobufsE2E.pb.go +8953 -10087
  103. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waE2E/WAWebProtobufsE2E.proto +216 -330
  104. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waHistorySync/WAWebProtobufsHistorySync.pb.go +11 -2
  105. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waHistorySync/WAWebProtobufsHistorySync.proto +1 -0
  106. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waStatusAttributions/WAStatusAttributions.pb.go +226 -83
  107. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waStatusAttributions/WAStatusAttributions.proto +14 -0
  108. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waSyncAction/WASyncAction.pb.go +709 -449
  109. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waSyncAction/WASyncAction.proto +24 -0
  110. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waWa6/WAWebProtobufsWa6.pb.go +78 -24
  111. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waWa6/WAWebProtobufsWa6.proto +6 -0
  112. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waWeb/WAWebProtobufsWeb.pb.go +528 -267
  113. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waWeb/WAWebProtobufsWeb.proto +24 -0
  114. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/receipt.go +47 -14
  115. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/request.go +4 -0
  116. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/retry.go +6 -13
  117. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/send.go +130 -62
  118. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/sendfb.go +33 -32
  119. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/clientpayload.go +1 -1
  120. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/noop.go +16 -0
  121. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/sessioncache.go +125 -0
  122. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/signal.go +8 -0
  123. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/sqlstore/lidmap.go +82 -4
  124. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/sqlstore/store.go +135 -55
  125. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/sqlstore/upgrades/00-latest-schema.sql +8 -7
  126. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/sqlstore/upgrades/11-redacted-phone-contacts.sql +2 -0
  127. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/store/store.go +24 -2
  128. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/types/call.go +6 -5
  129. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/types/jid.go +24 -9
  130. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/types/message.go +7 -1
  131. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/types/user.go +3 -0
  132. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/user.go +43 -3
  133. slidge_whatsapp/vendor/golang.org/x/crypto/curve25519/curve25519.go +7 -4
  134. slidge_whatsapp/vendor/golang.org/x/net/http2/config.go +11 -6
  135. slidge_whatsapp/vendor/golang.org/x/net/http2/config_go125.go +15 -0
  136. slidge_whatsapp/vendor/golang.org/x/net/http2/config_go126.go +15 -0
  137. slidge_whatsapp/vendor/golang.org/x/net/http2/frame.go +24 -1
  138. slidge_whatsapp/vendor/golang.org/x/net/http2/http2.go +0 -1
  139. slidge_whatsapp/vendor/golang.org/x/net/http2/server.go +35 -26
  140. slidge_whatsapp/vendor/golang.org/x/net/http2/transport.go +4 -2
  141. slidge_whatsapp/vendor/golang.org/x/net/http2/writesched.go +2 -0
  142. slidge_whatsapp/vendor/golang.org/x/net/http2/{writesched_priority.go → writesched_priority_rfc7540.go} +52 -52
  143. slidge_whatsapp/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go +209 -0
  144. slidge_whatsapp/vendor/golang.org/x/net/http2/writesched_roundrobin.go +1 -1
  145. slidge_whatsapp/vendor/golang.org/x/net/internal/httpcommon/request.go +2 -2
  146. slidge_whatsapp/vendor/golang.org/x/net/internal/socks/socks.go +1 -1
  147. slidge_whatsapp/vendor/golang.org/x/sys/unix/affinity_linux.go +9 -0
  148. slidge_whatsapp/vendor/golang.org/x/sys/unix/fdset.go +1 -3
  149. slidge_whatsapp/vendor/golang.org/x/sys/unix/ifreq_linux.go +1 -3
  150. slidge_whatsapp/vendor/golang.org/x/sys/unix/mkall.sh +1 -0
  151. slidge_whatsapp/vendor/golang.org/x/sys/unix/syscall_linux.go +1 -3
  152. slidge_whatsapp/vendor/golang.org/x/sys/unix/syscall_netbsd.go +17 -0
  153. slidge_whatsapp/vendor/golang.org/x/sys/windows/syscall_windows.go +2 -0
  154. slidge_whatsapp/vendor/golang.org/x/sys/windows/types_windows.go +16 -0
  155. slidge_whatsapp/vendor/golang.org/x/sys/windows/zsyscall_windows.go +18 -0
  156. slidge_whatsapp/vendor/golang.org/x/text/unicode/bidi/core.go +2 -9
  157. slidge_whatsapp/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +35 -17
  158. slidge_whatsapp/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +14 -0
  159. slidge_whatsapp/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +20 -0
  160. slidge_whatsapp/vendor/google.golang.org/protobuf/internal/version/version.go +1 -1
  161. slidge_whatsapp/vendor/modules.txt +15 -13
  162. {slidge_whatsapp-0.3.0b0.dist-info → slidge_whatsapp-0.3.4.dist-info}/METADATA +4 -3
  163. {slidge_whatsapp-0.3.0b0.dist-info → slidge_whatsapp-0.3.4.dist-info}/RECORD +166 -138
  164. {slidge_whatsapp-0.3.0b0.dist-info → slidge_whatsapp-0.3.4.dist-info}/WHEEL +1 -1
  165. slidge_whatsapp/vendor/go.mau.fi/whatsmeow/proto/waBotMetadata/WABotMetadata.pb.go +0 -5156
  166. {slidge_whatsapp-0.3.0b0.dist-info → slidge_whatsapp-0.3.4.dist-info}/entry_points.txt +0 -0
  167. {slidge_whatsapp-0.3.0b0.dist-info → slidge_whatsapp-0.3.4.dist-info/licenses}/LICENSE +0 -0
@@ -11,7 +11,7 @@ import (
11
11
  )
12
12
 
13
13
  // RFC 7540, Section 5.3.5: the default weight is 16.
14
- const priorityDefaultWeight = 15 // 16 = 15 + 1
14
+ const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
15
15
 
16
16
  // PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
17
17
  type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
66
66
  }
67
67
  }
68
68
 
69
- ws := &priorityWriteScheduler{
70
- nodes: make(map[uint32]*priorityNode),
69
+ ws := &priorityWriteSchedulerRFC7540{
70
+ nodes: make(map[uint32]*priorityNodeRFC7540),
71
71
  maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
72
72
  maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
73
73
  enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
81
81
  return ws
82
82
  }
83
83
 
84
- type priorityNodeState int
84
+ type priorityNodeStateRFC7540 int
85
85
 
86
86
  const (
87
- priorityNodeOpen priorityNodeState = iota
88
- priorityNodeClosed
89
- priorityNodeIdle
87
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
88
+ priorityNodeClosedRFC7540
89
+ priorityNodeIdleRFC7540
90
90
  )
91
91
 
92
- // priorityNode is a node in an HTTP/2 priority tree.
92
+ // priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
93
93
  // Each node is associated with a single stream ID.
94
94
  // See RFC 7540, Section 5.3.
95
- type priorityNode struct {
96
- q writeQueue // queue of pending frames to write
97
- id uint32 // id of the stream, or 0 for the root of the tree
98
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
99
- state priorityNodeState // open | closed | idle
100
- bytes int64 // number of bytes written by this node, or 0 if closed
101
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
95
+ type priorityNodeRFC7540 struct {
96
+ q writeQueue // queue of pending frames to write
97
+ id uint32 // id of the stream, or 0 for the root of the tree
98
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
99
+ state priorityNodeStateRFC7540 // open | closed | idle
100
+ bytes int64 // number of bytes written by this node, or 0 if closed
101
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
102
102
 
103
103
  // These links form the priority tree.
104
- parent *priorityNode
105
- kids *priorityNode // start of the kids list
106
- prev, next *priorityNode // doubly-linked list of siblings
104
+ parent *priorityNodeRFC7540
105
+ kids *priorityNodeRFC7540 // start of the kids list
106
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
107
107
  }
108
108
 
109
- func (n *priorityNode) setParent(parent *priorityNode) {
109
+ func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
110
110
  if n == parent {
111
111
  panic("setParent to self")
112
112
  }
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
141
141
  }
142
142
  }
143
143
 
144
- func (n *priorityNode) addBytes(b int64) {
144
+ func (n *priorityNodeRFC7540) addBytes(b int64) {
145
145
  n.bytes += b
146
146
  for ; n != nil; n = n.parent {
147
147
  n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
154
154
  //
155
155
  // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
156
156
  // if any ancestor p of n is still open (ignoring the root node).
157
- func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
157
+ func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
158
158
  if !n.q.empty() && f(n, openParent) {
159
159
  return true
160
160
  }
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
165
165
  // Don't consider the root "open" when updating openParent since
166
166
  // we can't send data frames on the root stream (only control frames).
167
167
  if n.id != 0 {
168
- openParent = openParent || (n.state == priorityNodeOpen)
168
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
169
169
  }
170
170
 
171
171
  // Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
195
195
  *tmp = append(*tmp, n.kids)
196
196
  n.kids.setParent(nil)
197
197
  }
198
- sort.Sort(sortPriorityNodeSiblings(*tmp))
198
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
199
199
  for i := len(*tmp) - 1; i >= 0; i-- {
200
200
  (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
201
201
  }
@@ -207,11 +207,11 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
207
207
  return false
208
208
  }
209
209
 
210
- type sortPriorityNodeSiblings []*priorityNode
210
+ type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
211
211
 
212
- func (z sortPriorityNodeSiblings) Len() int { return len(z) }
213
- func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
214
- func (z sortPriorityNodeSiblings) Less(i, k int) bool {
212
+ func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
213
+ func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
214
+ func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
215
215
  // Prefer the subtree that has sent fewer bytes relative to its weight.
216
216
  // See sections 5.3.2 and 5.3.4.
217
217
  wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
225
225
  return bi/bk <= wi/wk
226
226
  }
227
227
 
228
- type priorityWriteScheduler struct {
228
+ type priorityWriteSchedulerRFC7540 struct {
229
229
  // root is the root of the priority tree, where root.id = 0.
230
230
  // The root queues control frames that are not associated with any stream.
231
- root priorityNode
231
+ root priorityNodeRFC7540
232
232
 
233
233
  // nodes maps stream ids to priority tree nodes.
234
- nodes map[uint32]*priorityNode
234
+ nodes map[uint32]*priorityNodeRFC7540
235
235
 
236
236
  // maxID is the maximum stream id in nodes.
237
237
  maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
239
239
  // lists of nodes that have been closed or are idle, but are kept in
240
240
  // the tree for improved prioritization. When the lengths exceed either
241
241
  // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
242
- closedNodes, idleNodes []*priorityNode
242
+ closedNodes, idleNodes []*priorityNodeRFC7540
243
243
 
244
244
  // From the config.
245
245
  maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
248
248
  enableWriteThrottle bool
249
249
 
250
250
  // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
251
- tmp []*priorityNode
251
+ tmp []*priorityNodeRFC7540
252
252
 
253
253
  // pool of empty queues for reuse.
254
254
  queuePool writeQueuePool
255
255
  }
256
256
 
257
- func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
257
+ func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
258
258
  // The stream may be currently idle but cannot be opened or closed.
259
259
  if curr := ws.nodes[streamID]; curr != nil {
260
- if curr.state != priorityNodeIdle {
260
+ if curr.state != priorityNodeIdleRFC7540 {
261
261
  panic(fmt.Sprintf("stream %d already opened", streamID))
262
262
  }
263
- curr.state = priorityNodeOpen
263
+ curr.state = priorityNodeOpenRFC7540
264
264
  return
265
265
  }
266
266
 
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
272
272
  if parent == nil {
273
273
  parent = &ws.root
274
274
  }
275
- n := &priorityNode{
275
+ n := &priorityNodeRFC7540{
276
276
  q: *ws.queuePool.get(),
277
277
  id: streamID,
278
- weight: priorityDefaultWeight,
279
- state: priorityNodeOpen,
278
+ weight: priorityDefaultWeightRFC7540,
279
+ state: priorityNodeOpenRFC7540,
280
280
  }
281
281
  n.setParent(parent)
282
282
  ws.nodes[streamID] = n
@@ -285,19 +285,19 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
285
285
  }
286
286
  }
287
287
 
288
- func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
288
+ func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
289
289
  if streamID == 0 {
290
290
  panic("violation of WriteScheduler interface: cannot close stream 0")
291
291
  }
292
292
  if ws.nodes[streamID] == nil {
293
293
  panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
294
294
  }
295
- if ws.nodes[streamID].state != priorityNodeOpen {
295
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
296
296
  panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
297
297
  }
298
298
 
299
299
  n := ws.nodes[streamID]
300
- n.state = priorityNodeClosed
300
+ n.state = priorityNodeClosedRFC7540
301
301
  n.addBytes(-n.bytes)
302
302
 
303
303
  q := n.q
@@ -310,7 +310,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
310
310
  }
311
311
  }
312
312
 
313
- func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
313
+ func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
314
314
  if streamID == 0 {
315
315
  panic("adjustPriority on root")
316
316
  }
@@ -324,11 +324,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
324
324
  return
325
325
  }
326
326
  ws.maxID = streamID
327
- n = &priorityNode{
327
+ n = &priorityNodeRFC7540{
328
328
  q: *ws.queuePool.get(),
329
329
  id: streamID,
330
- weight: priorityDefaultWeight,
331
- state: priorityNodeIdle,
330
+ weight: priorityDefaultWeightRFC7540,
331
+ state: priorityNodeIdleRFC7540,
332
332
  }
333
333
  n.setParent(&ws.root)
334
334
  ws.nodes[streamID] = n
@@ -340,7 +340,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
340
340
  parent := ws.nodes[priority.StreamDep]
341
341
  if parent == nil {
342
342
  n.setParent(&ws.root)
343
- n.weight = priorityDefaultWeight
343
+ n.weight = priorityDefaultWeightRFC7540
344
344
  return
345
345
  }
346
346
 
@@ -381,8 +381,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
381
381
  n.weight = priority.Weight
382
382
  }
383
383
 
384
- func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
385
- var n *priorityNode
384
+ func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
385
+ var n *priorityNodeRFC7540
386
386
  if wr.isControl() {
387
387
  n = &ws.root
388
388
  } else {
@@ -401,8 +401,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
401
401
  n.q.push(wr)
402
402
  }
403
403
 
404
- func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
405
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
404
+ func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
405
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
406
406
  limit := int32(math.MaxInt32)
407
407
  if openParent {
408
408
  limit = ws.writeThrottleLimit
@@ -428,7 +428,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
428
428
  return wr, ok
429
429
  }
430
430
 
431
- func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
431
+ func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
432
432
  if maxSize == 0 {
433
433
  return
434
434
  }
@@ -442,7 +442,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
442
442
  *list = append(*list, n)
443
443
  }
444
444
 
445
- func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
445
+ func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
446
446
  for n.kids != nil {
447
447
  n.kids.setParent(n.parent)
448
448
  }
@@ -0,0 +1,209 @@
1
+ // Copyright 2025 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package http2
6
+
7
+ import (
8
+ "fmt"
9
+ "math"
10
+ )
11
+
12
+ type streamMetadata struct {
13
+ location *writeQueue
14
+ priority PriorityParam
15
+ }
16
+
17
+ type priorityWriteSchedulerRFC9218 struct {
18
+ // control contains control frames (SETTINGS, PING, etc.).
19
+ control writeQueue
20
+
21
+ // heads contain the head of a circular list of streams.
22
+ // We put these heads within a nested array that represents urgency and
23
+ // incremental, as defined in
24
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
25
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
26
+ heads [8][2]*writeQueue
27
+
28
+ // streams contains a mapping between each stream ID and their metadata, so
29
+ // we can quickly locate them when needing to, for example, adjust their
30
+ // priority.
31
+ streams map[uint32]streamMetadata
32
+
33
+ // queuePool are empty queues for reuse.
34
+ queuePool writeQueuePool
35
+
36
+ // prioritizeIncremental is used to determine whether we should prioritize
37
+ // incremental streams or not, when urgency is the same in a given Pop()
38
+ // call.
39
+ prioritizeIncremental bool
40
+ }
41
+
42
+ func newPriorityWriteSchedulerRFC9128() WriteScheduler {
43
+ ws := &priorityWriteSchedulerRFC9218{
44
+ streams: make(map[uint32]streamMetadata),
45
+ }
46
+ return ws
47
+ }
48
+
49
+ func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
50
+ if ws.streams[streamID].location != nil {
51
+ panic(fmt.Errorf("stream %d already opened", streamID))
52
+ }
53
+ q := ws.queuePool.get()
54
+ ws.streams[streamID] = streamMetadata{
55
+ location: q,
56
+ priority: opt.priority,
57
+ }
58
+
59
+ u, i := opt.priority.urgency, opt.priority.incremental
60
+ if ws.heads[u][i] == nil {
61
+ ws.heads[u][i] = q
62
+ q.next = q
63
+ q.prev = q
64
+ } else {
65
+ // Queues are stored in a ring.
66
+ // Insert the new stream before ws.head, putting it at the end of the list.
67
+ q.prev = ws.heads[u][i].prev
68
+ q.next = ws.heads[u][i]
69
+ q.prev.next = q
70
+ q.next.prev = q
71
+ }
72
+ }
73
+
74
+ func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
75
+ metadata := ws.streams[streamID]
76
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
77
+ if q == nil {
78
+ return
79
+ }
80
+ if q.next == q {
81
+ // This was the only open stream.
82
+ ws.heads[u][i] = nil
83
+ } else {
84
+ q.prev.next = q.next
85
+ q.next.prev = q.prev
86
+ if ws.heads[u][i] == q {
87
+ ws.heads[u][i] = q.next
88
+ }
89
+ }
90
+ delete(ws.streams, streamID)
91
+ ws.queuePool.put(q)
92
+ }
93
+
94
+ func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
95
+ metadata := ws.streams[streamID]
96
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
97
+ if q == nil {
98
+ return
99
+ }
100
+
101
+ // Remove stream from current location.
102
+ if q.next == q {
103
+ // This was the only open stream.
104
+ ws.heads[u][i] = nil
105
+ } else {
106
+ q.prev.next = q.next
107
+ q.next.prev = q.prev
108
+ if ws.heads[u][i] == q {
109
+ ws.heads[u][i] = q.next
110
+ }
111
+ }
112
+
113
+ // Insert stream to the new queue.
114
+ u, i = priority.urgency, priority.incremental
115
+ if ws.heads[u][i] == nil {
116
+ ws.heads[u][i] = q
117
+ q.next = q
118
+ q.prev = q
119
+ } else {
120
+ // Queues are stored in a ring.
121
+ // Insert the new stream before ws.head, putting it at the end of the list.
122
+ q.prev = ws.heads[u][i].prev
123
+ q.next = ws.heads[u][i]
124
+ q.prev.next = q
125
+ q.next.prev = q
126
+ }
127
+
128
+ // Update the metadata.
129
+ ws.streams[streamID] = streamMetadata{
130
+ location: q,
131
+ priority: priority,
132
+ }
133
+ }
134
+
135
+ func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
136
+ if wr.isControl() {
137
+ ws.control.push(wr)
138
+ return
139
+ }
140
+ q := ws.streams[wr.StreamID()].location
141
+ if q == nil {
142
+ // This is a closed stream.
143
+ // wr should not be a HEADERS or DATA frame.
144
+ // We push the request onto the control queue.
145
+ if wr.DataSize() > 0 {
146
+ panic("add DATA on non-open stream")
147
+ }
148
+ ws.control.push(wr)
149
+ return
150
+ }
151
+ q.push(wr)
152
+ }
153
+
154
+ func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
155
+ // Control and RST_STREAM frames first.
156
+ if !ws.control.empty() {
157
+ return ws.control.shift(), true
158
+ }
159
+
160
+ // On the next Pop(), we want to prioritize incremental if we prioritized
161
+ // non-incremental request of the same urgency this time. Vice-versa.
162
+ // i.e. when there are incremental and non-incremental requests at the same
163
+ // priority, we give 50% of our bandwidth to the incremental ones in
164
+ // aggregate and 50% to the first non-incremental one (since
165
+ // non-incremental streams do not use round-robin writes).
166
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
167
+
168
+ // Always prioritize lowest u (i.e. highest urgency level).
169
+ for u := range ws.heads {
170
+ for i := range ws.heads[u] {
171
+ // When we want to prioritize incremental, we try to pop i=true
172
+ // first before i=false when u is the same.
173
+ if ws.prioritizeIncremental {
174
+ i = (i + 1) % 2
175
+ }
176
+ q := ws.heads[u][i]
177
+ if q == nil {
178
+ continue
179
+ }
180
+ for {
181
+ if wr, ok := q.consume(math.MaxInt32); ok {
182
+ if i == 1 {
183
+ // For incremental streams, we update head to q.next so
184
+ // we can round-robin between multiple streams that can
185
+ // immediately benefit from partial writes.
186
+ ws.heads[u][i] = q.next
187
+ } else {
188
+ // For non-incremental streams, we try to finish one to
189
+ // completion rather than doing round-robin. However,
190
+ // we update head here so that if q.consume() is !ok
191
+ // (e.g. the stream has no more frame to consume), head
192
+ // is updated to the next q that has frames to consume
193
+ // on future iterations. This way, we do not prioritize
194
+ // writing to unavailable stream on next Pop() calls,
195
+ // preventing head-of-line blocking.
196
+ ws.heads[u][i] = q
197
+ }
198
+ return wr, true
199
+ }
200
+ q = q.next
201
+ if q == ws.heads[u][i] {
202
+ break
203
+ }
204
+ }
205
+
206
+ }
207
+ }
208
+ return FrameWriteRequest{}, false
209
+ }
@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
25
25
  }
26
26
 
27
27
  // newRoundRobinWriteScheduler constructs a new write scheduler.
28
- // The round robin scheduler priorizes control frames
28
+ // The round robin scheduler prioritizes control frames
29
29
  // like SETTINGS and PING over DATA frames.
30
30
  // When there are no control frames to send, it performs a round-robin
31
31
  // selection from the ready streams.
@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
51
51
  DefaultUserAgent string
52
52
  }
53
53
 
54
- // EncodeHeadersParam is the result of EncodeHeaders.
54
+ // EncodeHeadersResult is the result of EncodeHeaders.
55
55
  type EncodeHeadersResult struct {
56
56
  HasBody bool
57
57
  HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
399
399
 
400
400
  // If the request should be rejected, this is a short string suitable for passing
401
401
  // to the http2 package's CountError function.
402
- // It might be a bit odd to return errors this way rather than returing an error,
402
+ // It might be a bit odd to return errors this way rather than returning an error,
403
403
  // but this ensures we don't forget to include a CountError reason.
404
404
  InvalidReason string
405
405
  }
@@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
297
297
  b = append(b, up.Username...)
298
298
  b = append(b, byte(len(up.Password)))
299
299
  b = append(b, up.Password...)
300
- // TODO(mikio): handle IO deadlines and cancelation if
300
+ // TODO(mikio): handle IO deadlines and cancellation if
301
301
  // necessary
302
302
  if _, err := rw.Write(b); err != nil {
303
303
  return err
@@ -41,6 +41,15 @@ func (s *CPUSet) Zero() {
41
41
  clear(s[:])
42
42
  }
43
43
 
44
+ // Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
45
+ // will silently ignore any invalid CPU bits in [CPUSet] so this is an
46
+ // efficient way of resetting the CPU affinity of a process.
47
+ func (s *CPUSet) Fill() {
48
+ for i := range s {
49
+ s[i] = ^cpuMask(0)
50
+ }
51
+ }
52
+
44
53
  func cpuBitsIndex(cpu int) int {
45
54
  return cpu / _NCPUBITS
46
55
  }
@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
23
23
 
24
24
  // Zero clears the set fds.
25
25
  func (fds *FdSet) Zero() {
26
- for i := range fds.Bits {
27
- fds.Bits[i] = 0
28
- }
26
+ clear(fds.Bits[:])
29
27
  }
@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
111
111
  // clear zeroes the ifreq's union field to prevent trailing garbage data from
112
112
  // being sent to the kernel if an ifreq is reused.
113
113
  func (ifr *Ifreq) clear() {
114
- for i := range ifr.raw.Ifru {
115
- ifr.raw.Ifru[i] = 0
116
- }
114
+ clear(ifr.raw.Ifru[:])
117
115
  }
118
116
 
119
117
  // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
@@ -49,6 +49,7 @@ esac
49
49
  if [[ "$GOOS" = "linux" ]]; then
50
50
  # Use the Docker-based build system
51
51
  # Files generated through docker (use $cmd so you can Ctl-C the build or run)
52
+ set -e
52
53
  $cmd docker build --tag generate:$GOOS $GOOS
53
54
  $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
54
55
  exit
@@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
801
801
  // one. The kernel expects SID to be in network byte order.
802
802
  binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
803
803
  copy(sa.raw[8:14], sa.Remote)
804
- for i := 14; i < 14+IFNAMSIZ; i++ {
805
- sa.raw[i] = 0
806
- }
804
+ clear(sa.raw[14 : 14+IFNAMSIZ])
807
805
  copy(sa.raw[14:], sa.Dev)
808
806
  return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
809
807
  }
@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
248
248
  return Statvfs1(path, buf, ST_WAIT)
249
249
  }
250
250
 
251
+ func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
252
+ var (
253
+ _p0 unsafe.Pointer
254
+ bufsize uintptr
255
+ )
256
+ if len(buf) > 0 {
257
+ _p0 = unsafe.Pointer(&buf[0])
258
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
259
+ }
260
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
261
+ n = int(r0)
262
+ if e1 != 0 {
263
+ err = e1
264
+ }
265
+ return
266
+ }
267
+
251
268
  /*
252
269
  * Exposed directly
253
270
  */
@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
321
321
  //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
322
322
  //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
323
323
  //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
324
+ //sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
325
+ //sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
324
326
  //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
325
327
  //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
326
328
  //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
@@ -65,6 +65,22 @@ var signals = [...]string{
65
65
  15: "terminated",
66
66
  }
67
67
 
68
+ // File flags for [os.OpenFile]. The O_ prefix is used to indicate
69
+ // that these flags are specific to the OpenFile function.
70
+ const (
71
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
72
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
73
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
74
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
75
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
76
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
77
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
78
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
79
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
80
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
81
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
82
+ )
83
+
68
84
  const (
69
85
  FILE_READ_DATA = 0x00000001
70
86
  FILE_READ_ATTRIBUTES = 0x00000080