dd-trace 4.8.1 → 4.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTRIBUTING.md +66 -0
- package/MIGRATING.md +0 -14
- package/package.json +18 -18
- package/packages/datadog-esbuild/index.js +109 -31
- package/packages/datadog-instrumentations/index.js +1 -0
- package/packages/datadog-instrumentations/src/couchbase.js +41 -5
- package/packages/datadog-instrumentations/src/helpers/bundler-register.js +54 -0
- package/packages/datadog-instrumentations/src/helpers/hook.js +1 -4
- package/packages/datadog-instrumentations/src/helpers/hooks.js +4 -1
- package/packages/datadog-instrumentations/src/helpers/register.js +7 -2
- package/packages/datadog-instrumentations/src/jest.js +37 -1
- package/packages/datadog-plugin-aws-sdk/src/base.js +22 -10
- package/packages/datadog-plugin-aws-sdk/src/services/lambda.js +16 -0
- package/packages/datadog-plugin-aws-sdk/src/services/sns.js +18 -0
- package/packages/datadog-plugin-aws-sdk/src/services/sqs.js +23 -0
- package/packages/datadog-plugin-cassandra-driver/src/index.js +1 -1
- package/packages/datadog-plugin-couchbase/src/index.js +32 -30
- package/packages/datadog-plugin-elasticsearch/src/index.js +1 -1
- package/packages/datadog-plugin-fetch/src/index.js +1 -0
- package/packages/datadog-plugin-http/src/client.js +7 -17
- package/packages/datadog-plugin-http/src/server.js +10 -2
- package/packages/datadog-plugin-http2/src/client.js +2 -16
- package/packages/datadog-plugin-http2/src/server.js +10 -1
- package/packages/datadog-plugin-memcached/src/index.js +1 -1
- package/packages/datadog-plugin-mongodb-core/src/index.js +2 -2
- package/packages/datadog-plugin-mysql/src/index.js +1 -1
- package/packages/datadog-plugin-next/src/index.js +2 -2
- package/packages/datadog-plugin-oracledb/src/index.js +1 -1
- package/packages/datadog-plugin-pg/src/index.js +1 -1
- package/packages/datadog-plugin-redis/src/index.js +1 -1
- package/packages/datadog-plugin-tedious/src/index.js +1 -1
- package/packages/dd-trace/src/appsec/iast/analyzers/weak-hash-analyzer.js +11 -7
- package/packages/dd-trace/src/appsec/passport.js +1 -1
- package/packages/dd-trace/src/config.js +7 -5
- package/packages/dd-trace/src/plugin_manager.js +6 -1
- package/packages/dd-trace/src/plugins/index.js +1 -0
- package/packages/dd-trace/src/plugins/tracing.js +16 -9
- package/packages/dd-trace/src/proxy.js +2 -2
- package/packages/dd-trace/src/serverless.js +51 -7
- package/packages/dd-trace/src/service-naming/index.js +7 -11
- package/packages/dd-trace/src/service-naming/schemas/definition.js +4 -4
- package/packages/dd-trace/src/service-naming/schemas/util.js +21 -3
- package/packages/dd-trace/src/service-naming/schemas/v0/messaging.js +20 -8
- package/packages/dd-trace/src/service-naming/schemas/v0/storage.js +33 -23
- package/packages/dd-trace/src/service-naming/schemas/v0/web.js +33 -1
- package/packages/dd-trace/src/service-naming/schemas/v1/messaging.js +14 -2
- package/packages/dd-trace/src/service-naming/schemas/v1/storage.js +12 -9
- package/packages/dd-trace/src/service-naming/schemas/v1/web.js +33 -1
- package/packages/dd-trace/src/dcitm.js +0 -53
package/CONTRIBUTING.md
CHANGED
|
@@ -4,4 +4,70 @@ Please reach out before starting work on any major code changes.
|
|
|
4
4
|
This will ensure we avoid duplicating work, or that your code can't be merged due to a rapidly changing
|
|
5
5
|
base. If you would like support for a module that is not listed, [contact support][1] to share a request.
|
|
6
6
|
|
|
7
|
+
## Keep changes small and incremental
|
|
8
|
+
|
|
9
|
+
Changes should be incremental and understandable. As much as possible, large-scale efforts should be broken up into many PRs over time for better reviewability. If a feature would require more changes to be "complete" it's fine to land partial changes if they are not wired up to anything yet, so long as tests are included which at least prove those parts work in isolation.
|
|
10
|
+
|
|
11
|
+
There are great benefits to taking a measured and iterative approach to improvement. When working on code in fewer places there is far less risk of running into merge conflicts or incompatibilities with other systems. Keeping contributions small makes them easy to review which makes that much quicker to land. Additionally, keeping things small and iterative makes it easier for other teams to review and understand what the code does.
|
|
12
|
+
|
|
13
|
+
## Be descriptive
|
|
14
|
+
|
|
15
|
+
Sometimes code can be self-documenting, but often it can't. That is especially true to someone reviewing code they haven't worked on. Be conscious of writing code in a self-describing way and leave comments anywhere that self-description fails. This goes a long way towards making even complex code coherent to one not already familiar with it.
|
|
16
|
+
|
|
17
|
+
Try to write code in a way the describes the intent when read. For example, verbs can be used for function and method names to communicate that they are used to do some specific action. In doing so it becomes clear when referenced by name elsewhere that it is a function and what the function is meant to do. If a function can not be described with a simple verb it's probably too complex or does too many things.
|
|
18
|
+
|
|
19
|
+
## Give your code space
|
|
20
|
+
|
|
21
|
+
Very dense code is hard to read. It helps to make use of empty lines to separate logical groupings of statements. Long lines should be split up into multiple lines to make them more readable. Complex objects or arrays should generally be split over several lines. Sometimes it's a good idea to assign a variable only to immediately use it in a call as it can be more descriptive than just using the expression in place. It's not always clear what an argument is for if it doesn't visibly have a name somehow. Remember, lines are free, our time is not.
|
|
22
|
+
|
|
23
|
+
## Avoid large refactors
|
|
24
|
+
|
|
25
|
+
Large refactors should generally be avoided in favour of iterative approaches. For example, rather than rewriting how every plugin works, one might make a special-case plugin that works a bit different for their particular use-case. If several dozen files need to change to add a feature we've probably done something wrong.
|
|
26
|
+
|
|
27
|
+
Sometimes new patterns or new ideas emerge which would be a substantial improvement over the existing state. It can be tempting to want to go all-in on a new way to do something, but the code churn can be hard to manage. It's best to introduce such new things incrementally and advocate for their adoption gradually through the rest of the codebase. As old systems are gradually phased out, the infrastructure which supports them can be deleted or relegated to lazy-loading only if and when that specific part of the system needs to be used.
|
|
28
|
+
|
|
29
|
+
## Test everything
|
|
30
|
+
|
|
31
|
+
It's very difficult to know if a change is valid unless there are tests to prove it. As an extension of that, it's also difficult to know the _use_ of that code is valid if the way it is integrated is not propertly tested. For this reason we generally favour integration tests over unit tests. If an API is expected to be used in different places or in different ways then it should generally include unit tests too for each unique scenario, however great care should be taken to ensure unit tests are actually testing the _logic_ and not just testing the _mocks_. It's a very common mistake to write a unit test that abstracts away the actual use of the interface so much that it doesn't actually test how that interface works in real-world scenarios. Remember to test how it handles failures, how it operates under heavy load, and how it impacts usability of what its purpose is.
|
|
32
|
+
|
|
33
|
+
## Don't forget benchmarks
|
|
34
|
+
|
|
35
|
+
Observability products tend to have quite a bit of their behaviour running in app code hot paths. It's important we extensively benchmark anything we expect to have heavy use to ensure it performs well and we don't cause any significant regressions through future changes. Measuring once at the time of writing is insufficient--a graph with just one data point is not going to tell you much of anything.
|
|
36
|
+
|
|
37
|
+
## Always consider backportability
|
|
38
|
+
|
|
39
|
+
To reduce delta between release lines and make it easier for us to support older versions we try as much as possible to backport every change we can. We should be diligent about keeping breaking changes to a minimum and ensuring we don't use language or runtime features which are too new. This way we can generally be confident that a change can be backported.
|
|
40
|
+
|
|
41
|
+
To reduce the surface area of a breaking change, the breaking aspects could be placed behind a flag which is disabled by default or isolated to a function. In the next major the change would then be just to change the default of the flag or to start or stop calling the isolated function. By isolating the breaking logic it also becomes easier to delete later when it's no longer relevant on any release line.
|
|
42
|
+
|
|
43
|
+
Currently we do not have CI to test PRs for mergeability to past release lines, but we intend to expand our CI to include that in the future. For the time being, it's recommended when developing locally to try to cherry-pick your changes onto the previous vN.x branches to see if the tests pass there too.
|
|
44
|
+
|
|
45
|
+
## Respect semantic versioning
|
|
46
|
+
|
|
47
|
+
This library follows the semantic versioning standard, but there are some subtleties left under-specified so this section is meant to clarify exactly how we interpret the meaning of semver. Additionally, it exists to communicate that we also use semver labels on all PRs to indicate which type of release the change should land in. Outside contributions should be evaluated and a semver label selected by the relevant team.
|
|
48
|
+
|
|
49
|
+
### semver-patch
|
|
50
|
+
|
|
51
|
+
If the change is a bug or security fix, it should be labelled as semver-patch. These changes should generally not alter existing behaviour in any way other than to correct the specific issue.
|
|
52
|
+
|
|
53
|
+
### semver-minor
|
|
54
|
+
|
|
55
|
+
Any addition of new functionality should be labelled as semver-minor and should not change any existing behaviour either in how any existing API works or in changing the contents or value of any existing data being reported except in purely additive cases where all existing data retains its prior state. Such changes may include new configuration options which when used will change behaviour, or may include the addition of new data being captured such as a new instrumentation, but should not impact the current operating design of any existing features.
|
|
56
|
+
|
|
57
|
+
### semver-major
|
|
58
|
+
|
|
59
|
+
In the event that some existing functionality _does_ need to change, as much as possible the non-breaking aspects of that change should be made in a semver-minor PR and the actually breaking aspects should be done via a follow-up PR with only the specific aspects which are breaking. Remember to [always consider backportability](#always-consider-backportability).
|
|
60
|
+
|
|
61
|
+
## Indicate intended release targets
|
|
62
|
+
|
|
63
|
+
When writing major changes we use a series of labels in the form of `dont-land-on-vN.x` where N is the major release line which a PR should not land in. Every PR marked as semver-major should include these tags. These tags allow our [branch-diff](https://github.com/bengl/branch-diff) tooling to work smoothly as we can exclude PRs not intended for the release line we're preparing a release proposal for. The `semver-major` labels on their own are not sufficient as they don't encode any indication of from _which_ releases they are a major change.
|
|
64
|
+
|
|
65
|
+
For outside contributions we will have the relevant team add these labels when they review and determine when they plan to release it.
|
|
66
|
+
|
|
67
|
+
## Ensure all tests are green
|
|
68
|
+
|
|
69
|
+
We follow an all-green policy which means that for any PR to be merged _all_ tests must be passing. If a test is flaky or failing consistently the owner of that test should make it a priority to fix that test and unblock other teams from landing changes. For outside contributors there are currently several tests which will always fail as full CI permission is required. For these PRs our current process is for the relevant team to copy the PR and resubmit it to run tests as a user with full CI permission.
|
|
70
|
+
|
|
71
|
+
Eventually we plan to look into putting these permission-required tests behind a label which team members can add to their PRs at creation to run the full CI and can add to outside contributor PRs to trigger the CI from their own user credentials. If the label is not present there will be another action which checks the label is present. Rather than showing a bunch of confusing failures to new contributors it would just show a single job failure which indicates an additional label is required, and we can name it in a way that makes it clear that it's not the responsibility of the outside contributor to add it. Something like `approve-full-ci` is one possible choice there.
|
|
72
|
+
|
|
7
73
|
[1]: https://docs.datadoghq.com/help
|
package/MIGRATING.md
CHANGED
|
@@ -29,20 +29,6 @@ switching to `jest-circus` to anyone still using `jest-jasmine2`.
|
|
|
29
29
|
|
|
30
30
|
We now support only Next.js 10.2 and up.
|
|
31
31
|
|
|
32
|
-
### W3C headers are now prioritized over Datadog headers
|
|
33
|
-
|
|
34
|
-
As we move towards open standards, we have decided to prioritize W3C Trace
|
|
35
|
-
Context headers over our own vendor-specific headers for context propagation
|
|
36
|
-
across services. For most applications this shouldn't change anything and
|
|
37
|
-
distributed tracing should continue to work seamlessly.
|
|
38
|
-
|
|
39
|
-
In some rare cases it's possible that some of the services involved in a trace
|
|
40
|
-
are not instrumented by Datadog at all which can cause spans within the trace to
|
|
41
|
-
become disconnected. While the data would still be available in the UI, the
|
|
42
|
-
relationship between spans would no longer be visible. This can be addressed by
|
|
43
|
-
restoring the previous behaviour using
|
|
44
|
-
`DD_TRACE_PROPAGATION_STYLE='datadog,tracecontext'`.
|
|
45
|
-
|
|
46
32
|
## 2.0 to 3.0
|
|
47
33
|
|
|
48
34
|
### Node 12 is no longer supported
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "dd-trace",
|
|
3
|
-
"version": "4.
|
|
3
|
+
"version": "4.10.0",
|
|
4
4
|
"description": "Datadog APM tracing client for JavaScript",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"typings": "index.d.ts",
|
|
@@ -76,10 +76,10 @@
|
|
|
76
76
|
"@opentelemetry/core": "^1.14.0",
|
|
77
77
|
"crypto-randomuuid": "^1.0.0",
|
|
78
78
|
"diagnostics_channel": "^1.1.0",
|
|
79
|
-
"ignore": "^5.2.
|
|
80
|
-
"import-in-the-middle": "^1.
|
|
79
|
+
"ignore": "^5.2.4",
|
|
80
|
+
"import-in-the-middle": "^1.4.1",
|
|
81
81
|
"int64-buffer": "^0.1.9",
|
|
82
|
-
"ipaddr.js": "^2.0
|
|
82
|
+
"ipaddr.js": "^2.1.0",
|
|
83
83
|
"istanbul-lib-coverage": "3.2.0",
|
|
84
84
|
"koalas": "^1.0.2",
|
|
85
85
|
"limiter": "^1.1.4",
|
|
@@ -91,24 +91,24 @@
|
|
|
91
91
|
"methods": "^1.1.2",
|
|
92
92
|
"module-details-from-path": "^1.0.3",
|
|
93
93
|
"msgpack-lite": "^0.1.26",
|
|
94
|
-
"node-abort-controller": "^3.
|
|
94
|
+
"node-abort-controller": "^3.1.1",
|
|
95
95
|
"opentracing": ">=0.12.1",
|
|
96
96
|
"path-to-regexp": "^0.1.2",
|
|
97
97
|
"protobufjs": "^7.2.4",
|
|
98
|
-
"retry": "^0.
|
|
99
|
-
"semver": "^7.
|
|
98
|
+
"retry": "^0.13.1",
|
|
99
|
+
"semver": "^7.5.4"
|
|
100
100
|
},
|
|
101
101
|
"devDependencies": {
|
|
102
102
|
"@types/node": ">=16",
|
|
103
103
|
"autocannon": "^4.5.2",
|
|
104
104
|
"axios": "^0.21.2",
|
|
105
105
|
"benchmark": "^2.1.4",
|
|
106
|
-
"body-parser": "^1.
|
|
107
|
-
"chai": "^4.
|
|
108
|
-
"chalk": "^3.0
|
|
109
|
-
"checksum": "^0.
|
|
110
|
-
"cli-table3": "^0.
|
|
111
|
-
"dotenv": "
|
|
106
|
+
"body-parser": "^1.20.2",
|
|
107
|
+
"chai": "^4.3.7",
|
|
108
|
+
"chalk": "^5.3.0",
|
|
109
|
+
"checksum": "^1.0.0",
|
|
110
|
+
"cli-table3": "^0.6.3",
|
|
111
|
+
"dotenv": "16.3.1",
|
|
112
112
|
"esbuild": "0.16.12",
|
|
113
113
|
"eslint": "^8.23.0",
|
|
114
114
|
"eslint-config-standard": "^11.0.0-beta.0",
|
|
@@ -118,13 +118,13 @@
|
|
|
118
118
|
"eslint-plugin-node": "^5.2.1",
|
|
119
119
|
"eslint-plugin-promise": "^3.6.0",
|
|
120
120
|
"eslint-plugin-standard": "^3.0.1",
|
|
121
|
-
"express": "^4.
|
|
121
|
+
"express": "^4.18.2",
|
|
122
122
|
"get-port": "^3.2.0",
|
|
123
123
|
"glob": "^7.1.6",
|
|
124
124
|
"graphql": "0.13.2",
|
|
125
125
|
"jszip": "^3.5.0",
|
|
126
126
|
"knex": "^2.4.2",
|
|
127
|
-
"mkdirp": "^0.
|
|
127
|
+
"mkdirp": "^3.0.1",
|
|
128
128
|
"mocha": "8",
|
|
129
129
|
"multer": "^1.4.5-lts.1",
|
|
130
130
|
"nock": "^11.3.3",
|
|
@@ -132,9 +132,9 @@
|
|
|
132
132
|
"pprof-format": "^2.0.7",
|
|
133
133
|
"proxyquire": "^1.8.0",
|
|
134
134
|
"rimraf": "^3.0.0",
|
|
135
|
-
"sinon": "^
|
|
135
|
+
"sinon": "^15.2.0",
|
|
136
136
|
"sinon-chai": "^3.7.0",
|
|
137
|
-
"tap": "^16.3.
|
|
138
|
-
"tape": "^
|
|
137
|
+
"tap": "^16.3.7",
|
|
138
|
+
"tape": "^5.6.5"
|
|
139
139
|
}
|
|
140
140
|
}
|
|
@@ -2,73 +2,105 @@
|
|
|
2
2
|
|
|
3
3
|
/* eslint-disable no-console */
|
|
4
4
|
|
|
5
|
-
const
|
|
6
|
-
|
|
7
|
-
const instrumented = Object.keys(require('../datadog-instrumentations/src/helpers/hooks.js'))
|
|
8
|
-
const rawBuiltins = require('module').builtinModules
|
|
5
|
+
const instrumentations = require('../datadog-instrumentations/src/helpers/instrumentations.js')
|
|
6
|
+
const hooks = require('../datadog-instrumentations/src/helpers/hooks.js')
|
|
9
7
|
|
|
10
8
|
warnIfUnsupported()
|
|
11
9
|
|
|
10
|
+
for (const hook of Object.values(hooks)) {
|
|
11
|
+
hook()
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
const modulesOfInterest = new Set()
|
|
15
|
+
|
|
16
|
+
for (const instrumentation of Object.values(instrumentations)) {
|
|
17
|
+
for (const entry of instrumentation) {
|
|
18
|
+
if (!entry.file) {
|
|
19
|
+
modulesOfInterest.add(entry.name) // e.g. "redis"
|
|
20
|
+
} else {
|
|
21
|
+
modulesOfInterest.add(`${entry.name}/${entry.file}`) // e.g. "redis/my/file.js"
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const NAMESPACE = 'datadog'
|
|
27
|
+
const NM = 'node_modules/'
|
|
28
|
+
const INSTRUMENTED = Object.keys(instrumentations)
|
|
29
|
+
const RAW_BUILTINS = require('module').builtinModules
|
|
30
|
+
const CHANNEL = 'dd-trace:bundler:load'
|
|
31
|
+
|
|
12
32
|
const builtins = new Set()
|
|
13
33
|
|
|
14
|
-
for (const builtin of
|
|
34
|
+
for (const builtin of RAW_BUILTINS) {
|
|
15
35
|
builtins.add(builtin)
|
|
16
36
|
builtins.add(`node:${builtin}`)
|
|
17
37
|
}
|
|
18
38
|
|
|
19
|
-
const packagesOfInterest = new Set()
|
|
20
|
-
|
|
21
39
|
const DEBUG = !!process.env.DD_TRACE_DEBUG
|
|
22
40
|
|
|
23
|
-
// We don't want to handle any built-in packages
|
|
41
|
+
// We don't want to handle any built-in packages
|
|
24
42
|
// Those packages will still be handled via RITM
|
|
25
43
|
// Attempting to instrument them would fail as they have no package.json file
|
|
26
|
-
for (const pkg of
|
|
44
|
+
for (const pkg of INSTRUMENTED) {
|
|
27
45
|
if (builtins.has(pkg)) continue
|
|
28
46
|
if (pkg.startsWith('node:')) continue
|
|
29
|
-
|
|
47
|
+
modulesOfInterest.add(pkg)
|
|
30
48
|
}
|
|
31
49
|
|
|
32
|
-
const DC_CHANNEL = 'dd-trace:bundledModuleLoadStart'
|
|
33
|
-
|
|
34
50
|
module.exports.name = 'datadog-esbuild'
|
|
35
51
|
|
|
36
52
|
module.exports.setup = function (build) {
|
|
37
53
|
build.onResolve({ filter: /.*/ }, args => {
|
|
54
|
+
let fullPathToModule
|
|
55
|
+
try {
|
|
56
|
+
fullPathToModule = dotFriendlyResolve(args.path, args.resolveDir)
|
|
57
|
+
} catch (err) {
|
|
58
|
+
console.warn(`Unable to find "${args.path}". Is the package dead code?`)
|
|
59
|
+
return
|
|
60
|
+
}
|
|
61
|
+
const extracted = extractPackageAndModulePath(fullPathToModule)
|
|
38
62
|
const packageName = args.path
|
|
39
63
|
|
|
40
|
-
|
|
64
|
+
const internal = builtins.has(args.path)
|
|
65
|
+
|
|
66
|
+
if (args.namespace === 'file' && (
|
|
67
|
+
modulesOfInterest.has(packageName) || modulesOfInterest.has(`${extracted.pkg}/${extracted.path}`))
|
|
68
|
+
) {
|
|
41
69
|
// The file namespace is used when requiring files from disk in userland
|
|
42
70
|
|
|
43
71
|
let pathToPackageJson
|
|
44
72
|
try {
|
|
45
|
-
pathToPackageJson = require.resolve(`${
|
|
73
|
+
pathToPackageJson = require.resolve(`${extracted.pkg}/package.json`, { paths: [ args.resolveDir ] })
|
|
46
74
|
} catch (err) {
|
|
47
75
|
if (err.code === 'MODULE_NOT_FOUND') {
|
|
48
|
-
|
|
76
|
+
if (!internal) {
|
|
77
|
+
console.warn(`Unable to find "${extracted.pkg}/package.json". Is the package dead code?`)
|
|
78
|
+
}
|
|
49
79
|
return
|
|
50
80
|
} else {
|
|
51
81
|
throw err
|
|
52
82
|
}
|
|
53
83
|
}
|
|
54
84
|
|
|
55
|
-
const
|
|
85
|
+
const packageJson = require(pathToPackageJson)
|
|
56
86
|
|
|
57
|
-
if (DEBUG) {
|
|
58
|
-
console.log(`resolve ${packageName}@${pkg.version}`)
|
|
59
|
-
}
|
|
87
|
+
if (DEBUG) console.log(`RESOLVE ${packageName}@${packageJson.version}`)
|
|
60
88
|
|
|
61
89
|
// https://esbuild.github.io/plugins/#on-resolve-arguments
|
|
62
90
|
return {
|
|
63
|
-
path:
|
|
91
|
+
path: fullPathToModule,
|
|
64
92
|
namespace: NAMESPACE,
|
|
65
93
|
pluginData: {
|
|
66
|
-
version:
|
|
94
|
+
version: packageJson.version,
|
|
95
|
+
pkg: extracted.pkg,
|
|
96
|
+
path: extracted.path,
|
|
97
|
+
full: fullPathToModule,
|
|
98
|
+
raw: packageName,
|
|
99
|
+
internal
|
|
67
100
|
}
|
|
68
101
|
}
|
|
69
|
-
} else if (args.namespace ===
|
|
102
|
+
} else if (args.namespace === NAMESPACE) {
|
|
70
103
|
// The datadog namespace is used when requiring files that are injected during the onLoad stage
|
|
71
|
-
// see note in onLoad
|
|
72
104
|
|
|
73
105
|
if (builtins.has(packageName)) return
|
|
74
106
|
|
|
@@ -80,23 +112,28 @@ module.exports.setup = function (build) {
|
|
|
80
112
|
})
|
|
81
113
|
|
|
82
114
|
build.onLoad({ filter: /.*/, namespace: NAMESPACE }, args => {
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
}
|
|
115
|
+
const data = args.pluginData
|
|
116
|
+
|
|
117
|
+
if (DEBUG) console.log(`LOAD ${data.pkg}@${data.version}, pkg "${data.path}"`)
|
|
118
|
+
|
|
119
|
+
const path = data.raw !== data.pkg
|
|
120
|
+
? `${data.pkg}/${data.path}`
|
|
121
|
+
: data.pkg
|
|
86
122
|
|
|
87
|
-
// JSON.stringify adds double quotes. For perf gain could simply add in quotes when we know it's safe.
|
|
88
123
|
const contents = `
|
|
89
124
|
const dc = require('diagnostics_channel');
|
|
90
|
-
const ch = dc.channel(${
|
|
91
|
-
const mod = require(${
|
|
125
|
+
const ch = dc.channel('${CHANNEL}');
|
|
126
|
+
const mod = require('${args.path}');
|
|
92
127
|
const payload = {
|
|
93
128
|
module: mod,
|
|
94
|
-
|
|
95
|
-
|
|
129
|
+
version: '${data.version}',
|
|
130
|
+
package: '${data.pkg}',
|
|
131
|
+
path: '${path}'
|
|
96
132
|
};
|
|
97
133
|
ch.publish(payload);
|
|
98
134
|
module.exports = payload.module;
|
|
99
135
|
`
|
|
136
|
+
|
|
100
137
|
// https://esbuild.github.io/plugins/#on-load-results
|
|
101
138
|
return {
|
|
102
139
|
contents,
|
|
@@ -121,3 +158,44 @@ function warnIfUnsupported () {
|
|
|
121
158
|
console.error('more recent version is used at runtime, third party packages won\'t be instrumented.')
|
|
122
159
|
}
|
|
123
160
|
}
|
|
161
|
+
|
|
162
|
+
// @see https://github.com/nodejs/node/issues/47000
|
|
163
|
+
function dotFriendlyResolve (path, directory) {
|
|
164
|
+
if (path === '.') {
|
|
165
|
+
path = './'
|
|
166
|
+
} else if (path === '..') {
|
|
167
|
+
path = '../'
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
return require.resolve(path, { paths: [ directory ] })
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* For a given full path to a module,
|
|
175
|
+
* return the package name it belongs to and the local path to the module
|
|
176
|
+
* input: '/foo/node_modules/@co/stuff/foo/bar/baz.js'
|
|
177
|
+
* output: { pkg: '@co/stuff', path: 'foo/bar/baz.js' }
|
|
178
|
+
*/
|
|
179
|
+
function extractPackageAndModulePath (fullPath) {
|
|
180
|
+
const nm = fullPath.lastIndexOf(NM)
|
|
181
|
+
if (nm < 0) {
|
|
182
|
+
return { pkg: null, path: null }
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const subPath = fullPath.substring(nm + NM.length)
|
|
186
|
+
const firstSlash = subPath.indexOf('/')
|
|
187
|
+
|
|
188
|
+
if (subPath[0] === '@') {
|
|
189
|
+
const secondSlash = subPath.substring(firstSlash + 1).indexOf('/')
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
pkg: subPath.substring(0, firstSlash + 1 + secondSlash),
|
|
193
|
+
path: subPath.substring(firstSlash + 1 + secondSlash + 1)
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return {
|
|
198
|
+
pkg: subPath.substring(0, firstSlash),
|
|
199
|
+
path: subPath.substring(firstSlash + 1)
|
|
200
|
+
}
|
|
201
|
+
}
|
|
@@ -74,7 +74,7 @@ function wrap (prefix, fn) {
|
|
|
74
74
|
return asyncResource.runInAsyncScope(() => {
|
|
75
75
|
const cb = callbackResource.bind(arguments[callbackIndex])
|
|
76
76
|
|
|
77
|
-
startCh.publish({ bucket: { name: this.name || this._name } })
|
|
77
|
+
startCh.publish({ bucket: { name: this.name || this._name }, seedNodes: this._dd_hosts })
|
|
78
78
|
|
|
79
79
|
arguments[callbackIndex] = asyncResource.bind(function (error, result) {
|
|
80
80
|
if (error) {
|
|
@@ -146,7 +146,8 @@ function wrapWithName (name) {
|
|
|
146
146
|
return function () { // no arguments used by us
|
|
147
147
|
return wrapCBandPromise(operation, name, {
|
|
148
148
|
collection: { name: this._name || '_default' },
|
|
149
|
-
bucket: { name: this._scope._bucket._name }
|
|
149
|
+
bucket: { name: this._scope._bucket._name },
|
|
150
|
+
seedNodes: this._dd_connStr
|
|
150
151
|
}, this, arguments)
|
|
151
152
|
}
|
|
152
153
|
}
|
|
@@ -155,7 +156,7 @@ function wrapWithName (name) {
|
|
|
155
156
|
function wrapV3Query (query) {
|
|
156
157
|
return function (q) {
|
|
157
158
|
const resource = getQueryResource(q)
|
|
158
|
-
return wrapCBandPromise(query, 'query', { resource }, this, arguments)
|
|
159
|
+
return wrapCBandPromise(query, 'query', { resource, seedNodes: this._connStr }, this, arguments)
|
|
159
160
|
}
|
|
160
161
|
}
|
|
161
162
|
|
|
@@ -179,7 +180,7 @@ addHook({ name: 'couchbase', file: 'lib/bucket.js', versions: ['^2.6.12'] }, Buc
|
|
|
179
180
|
|
|
180
181
|
const asyncResource = new AsyncResource('bound-anonymous-fn')
|
|
181
182
|
return asyncResource.runInAsyncScope(() => {
|
|
182
|
-
startCh.publish({ resource: n1qlQuery, bucket: { name: this.name || this._name } })
|
|
183
|
+
startCh.publish({ resource: n1qlQuery, bucket: { name: this.name || this._name }, seedNodes: this._dd_hosts })
|
|
183
184
|
|
|
184
185
|
emitter.once('rows', asyncResource.bind(() => {
|
|
185
186
|
finishCh.publish(undefined)
|
|
@@ -212,11 +213,32 @@ addHook({ name: 'couchbase', file: 'lib/cluster.js', versions: ['^2.6.12'] }, Cl
|
|
|
212
213
|
Cluster.prototype._maybeInvoke = wrapMaybeInvoke(Cluster.prototype._maybeInvoke)
|
|
213
214
|
Cluster.prototype.query = wrapQuery(Cluster.prototype.query)
|
|
214
215
|
|
|
216
|
+
shimmer.wrap(Cluster.prototype, 'openBucket', openBucket => {
|
|
217
|
+
return function () {
|
|
218
|
+
const bucket = openBucket.apply(this, arguments)
|
|
219
|
+
const hosts = this.dsnObj.hosts
|
|
220
|
+
bucket._dd_hosts = hosts.map(hostAndPort => hostAndPort.join(':')).join(',')
|
|
221
|
+
return bucket
|
|
222
|
+
}
|
|
223
|
+
})
|
|
215
224
|
return Cluster
|
|
216
225
|
})
|
|
217
226
|
|
|
218
227
|
// semver >=3 <3.2.0
|
|
219
228
|
|
|
229
|
+
addHook({ name: 'couchbase', file: 'lib/bucket.js', versions: ['^3.0.7', '^3.1.3'] }, Bucket => {
|
|
230
|
+
shimmer.wrap(Bucket.prototype, 'collection', getCollection => {
|
|
231
|
+
return function () {
|
|
232
|
+
const collection = getCollection.apply(this, arguments)
|
|
233
|
+
const connStr = this._cluster._connStr
|
|
234
|
+
collection._dd_connStr = connStr
|
|
235
|
+
return collection
|
|
236
|
+
}
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
return Bucket
|
|
240
|
+
})
|
|
241
|
+
|
|
220
242
|
addHook({ name: 'couchbase', file: 'lib/collection.js', versions: ['^3.0.7', '^3.1.3'] }, Collection => {
|
|
221
243
|
wrapAllNames(['upsert', 'insert', 'replace'], name => {
|
|
222
244
|
shimmer.wrap(Collection.prototype, name, wrapWithName(name))
|
|
@@ -242,7 +264,21 @@ addHook({ name: 'couchbase', file: 'dist/collection.js', versions: ['>=3.2.0'] }
|
|
|
242
264
|
return collection
|
|
243
265
|
})
|
|
244
266
|
|
|
245
|
-
addHook({ name: 'couchbase', file: 'dist/
|
|
267
|
+
addHook({ name: 'couchbase', file: 'dist/bucket.js', versions: ['>=3.2.0'] }, bucket => {
|
|
268
|
+
const Bucket = bucket.Bucket
|
|
269
|
+
shimmer.wrap(Bucket.prototype, 'collection', getCollection => {
|
|
270
|
+
return function () {
|
|
271
|
+
const collection = getCollection.apply(this, arguments)
|
|
272
|
+
const connStr = this._cluster._connStr
|
|
273
|
+
collection._dd_connStr = connStr
|
|
274
|
+
return collection
|
|
275
|
+
}
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
return bucket
|
|
279
|
+
})
|
|
280
|
+
|
|
281
|
+
addHook({ name: 'couchbase', file: 'dist/cluster.js', versions: ['3.2.0 - 3.2.1', '>=3.2.2'] }, (cluster) => {
|
|
246
282
|
const Cluster = cluster.Cluster
|
|
247
283
|
|
|
248
284
|
shimmer.wrap(Cluster.prototype, 'query', wrapV3Query)
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
'use strict'
|
|
2
|
+
|
|
3
|
+
// eslint-disable-next-line n/no-restricted-require
|
|
4
|
+
const dc = require('diagnostics_channel')
|
|
5
|
+
|
|
6
|
+
const {
|
|
7
|
+
filename,
|
|
8
|
+
loadChannel,
|
|
9
|
+
matchVersion
|
|
10
|
+
} = require('./register.js')
|
|
11
|
+
const hooks = require('./hooks')
|
|
12
|
+
const instrumentations = require('./instrumentations')
|
|
13
|
+
const log = require('../../../dd-trace/src/log')
|
|
14
|
+
|
|
15
|
+
const CHANNEL = 'dd-trace:bundler:load'
|
|
16
|
+
|
|
17
|
+
if (!dc.subscribe) {
|
|
18
|
+
dc.subscribe = (channel, cb) => {
|
|
19
|
+
dc.channel(channel).subscribe(cb)
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
if (!dc.unsubscribe) {
|
|
23
|
+
dc.unsubscribe = (channel, cb) => {
|
|
24
|
+
if (dc.channel(channel).hasSubscribers) {
|
|
25
|
+
dc.channel(channel).unsubscribe(cb)
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
dc.subscribe(CHANNEL, (payload) => {
|
|
31
|
+
try {
|
|
32
|
+
hooks[payload.package]()
|
|
33
|
+
} catch (err) {
|
|
34
|
+
log.error(`esbuild-wrapped ${payload.package} missing in list of hooks`)
|
|
35
|
+
throw err
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
if (!instrumentations[payload.package]) {
|
|
39
|
+
log.error(`esbuild-wrapped ${payload.package} missing in list of instrumentations`)
|
|
40
|
+
return
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
for (const { name, file, versions, hook } of instrumentations[payload.package]) {
|
|
44
|
+
if (payload.path !== filename(name, file)) continue
|
|
45
|
+
if (!matchVersion(payload.version, versions)) continue
|
|
46
|
+
|
|
47
|
+
try {
|
|
48
|
+
loadChannel.publish({ name, version: payload.version, file })
|
|
49
|
+
payload.module = hook(payload.module, payload.version)
|
|
50
|
+
} catch (e) {
|
|
51
|
+
log.error(e)
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
})
|
|
@@ -3,10 +3,9 @@
|
|
|
3
3
|
const path = require('path')
|
|
4
4
|
const iitm = require('../../../dd-trace/src/iitm')
|
|
5
5
|
const ritm = require('../../../dd-trace/src/ritm')
|
|
6
|
-
const dcitm = require('../../../dd-trace/src/dcitm')
|
|
7
6
|
|
|
8
7
|
/**
|
|
9
|
-
* This is called for every module that dd-trace supports instrumentation for
|
|
8
|
+
* This is called for every package/internal-module that dd-trace supports instrumentation for
|
|
10
9
|
* In practice, `modules` is always an array with a single entry.
|
|
11
10
|
*
|
|
12
11
|
* @param {string[]} modules list of modules to hook into
|
|
@@ -41,13 +40,11 @@ function Hook (modules, onrequire) {
|
|
|
41
40
|
return safeHook(moduleExports, moduleName, moduleBaseDir)
|
|
42
41
|
}
|
|
43
42
|
})
|
|
44
|
-
this._dcitmHook = dcitm(modules, {}, safeHook)
|
|
45
43
|
}
|
|
46
44
|
|
|
47
45
|
Hook.prototype.unhook = function () {
|
|
48
46
|
this._ritmHook.unhook()
|
|
49
47
|
this._iitmHook.unhook()
|
|
50
|
-
this._dcitmHook.unhook()
|
|
51
48
|
this._patched = Object.create(null)
|
|
52
49
|
}
|
|
53
50
|
|
|
@@ -10,8 +10,9 @@ module.exports = {
|
|
|
10
10
|
'@grpc/grpc-js': () => require('../grpc'),
|
|
11
11
|
'@hapi/hapi': () => require('../hapi'),
|
|
12
12
|
'@jest/core': () => require('../jest'),
|
|
13
|
-
'@jest/transform': () => require('../jest'),
|
|
14
13
|
'@jest/reporters': () => require('../jest'),
|
|
14
|
+
'@jest/test-sequencer': () => require('../jest'),
|
|
15
|
+
'@jest/transform': () => require('../jest'),
|
|
15
16
|
'@koa/router': () => require('../koa'),
|
|
16
17
|
'@node-redis/client': () => require('../redis'),
|
|
17
18
|
'@opensearch-project/opensearch': () => require('../opensearch'),
|
|
@@ -39,6 +40,7 @@ module.exports = {
|
|
|
39
40
|
'find-my-way': () => require('../find-my-way'),
|
|
40
41
|
'fs': () => require('../fs'),
|
|
41
42
|
'node:fs': () => require('../fs'),
|
|
43
|
+
'generic-pool': () => require('../generic-pool'),
|
|
42
44
|
'graphql': () => require('../graphql'),
|
|
43
45
|
'grpc': () => require('../grpc'),
|
|
44
46
|
'hapi': () => require('../hapi'),
|
|
@@ -52,6 +54,7 @@ module.exports = {
|
|
|
52
54
|
'jest-environment-jsdom': () => require('../jest'),
|
|
53
55
|
'jest-jasmine2': () => require('../jest'),
|
|
54
56
|
'jest-worker': () => require('../jest'),
|
|
57
|
+
'knex': () => require('../knex'),
|
|
55
58
|
'koa': () => require('../koa'),
|
|
56
59
|
'koa-router': () => require('../koa'),
|
|
57
60
|
'kafkajs': () => require('../kafkajs'),
|
|
@@ -20,7 +20,9 @@ const disabledInstrumentations = new Set(
|
|
|
20
20
|
const loadChannel = channel('dd-trace:instrumentation:load')
|
|
21
21
|
|
|
22
22
|
// Globals
|
|
23
|
-
|
|
23
|
+
if (!disabledInstrumentations.has('fetch')) {
|
|
24
|
+
require('../fetch')
|
|
25
|
+
}
|
|
24
26
|
|
|
25
27
|
// TODO: make this more efficient
|
|
26
28
|
|
|
@@ -30,6 +32,7 @@ for (const packageName of names) {
|
|
|
30
32
|
Hook([packageName], (moduleExports, moduleName, moduleBaseDir, moduleVersion) => {
|
|
31
33
|
moduleName = moduleName.replace(pathSepExpr, '/')
|
|
32
34
|
|
|
35
|
+
// This executes the integration file thus adding its entries to `instrumentations`
|
|
33
36
|
hooks[packageName]()
|
|
34
37
|
|
|
35
38
|
if (!instrumentations[packageName]) {
|
|
@@ -74,5 +77,7 @@ function filename (name, file) {
|
|
|
74
77
|
|
|
75
78
|
module.exports = {
|
|
76
79
|
filename,
|
|
77
|
-
pathSepExpr
|
|
80
|
+
pathSepExpr,
|
|
81
|
+
loadChannel,
|
|
82
|
+
matchVersion
|
|
78
83
|
}
|