Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/api/_toc.md1
-rw-r--r--doc/api/all.md1
-rwxr-xr-xdoc/api/perf_hooks.md656
-rw-r--r--doc/api/process.md6
-rw-r--r--lib/internal/bootstrap_node.js38
-rw-r--r--lib/internal/errors.js4
-rw-r--r--lib/internal/module.js4
-rw-r--r--lib/internal/process.js5
-rw-r--r--lib/perf_hooks.js553
-rw-r--r--node.gyp5
-rw-r--r--src/env-inl.h46
-rw-r--r--src/env.h23
-rw-r--r--src/node.cc5
-rw-r--r--src/node_internals.h1
-rw-r--r--src/node_perf.cc389
-rw-r--r--src/node_perf.h174
-rw-r--r--src/node_perf_common.h71
-rw-r--r--test/parallel/test-performance-function.js93
-rwxr-xr-xtest/parallel/test-performance-gc.js51
-rw-r--r--test/parallel/test-performance.js124
-rw-r--r--test/parallel/test-performanceobserver.js139
21 files changed, 2381 insertions, 8 deletions
diff --git a/doc/api/_toc.md b/doc/api/_toc.md
index 6791e63f0c6..67e0c62256f 100644
--- a/doc/api/_toc.md
+++ b/doc/api/_toc.md
@@ -32,6 +32,7 @@
* [Net](net.html)
* [OS](os.html)
* [Path](path.html)
+* [Performance Hooks](perf_hooks.html)
* [Process](process.html)
* [Punycode](punycode.html)
* [Query Strings](querystring.html)
diff --git a/doc/api/all.md b/doc/api/all.md
index 425513e2568..849a39a4bd7 100644
--- a/doc/api/all.md
+++ b/doc/api/all.md
@@ -27,6 +27,7 @@
@include net
@include os
@include path
+@include perf_hooks
@include process
@include punycode
@include querystring
diff --git a/doc/api/perf_hooks.md b/doc/api/perf_hooks.md
new file mode 100755
index 00000000000..7972e1edb24
--- /dev/null
+++ b/doc/api/perf_hooks.md
@@ -0,0 +1,656 @@
+# Performance Timing API
+<!-- YAML
+added: REPLACEME
+-->
+
+The Performance Timing API provides an implementation of the
+[W3C Performance Timeline][] specification. The purpose of the API
+is to support collection of high resolution performance metrics.
+This is the same Performance API as implemented in modern Web browsers.
+
+```js
+const { performance } = require('perf_hooks');
+performance.mark('A');
+doSomeLongRunningProcess(() => {
+ performance.mark('B');
+ performance.measure('A to B', 'A', 'B');
+ const measure = performance.getEntriesByName('A to B')[0];
+ console.log(measure.duration);
+ // Prints the number of milliseconds between Mark 'A' and Mark 'B'
+});
+```
+
+## Class: Performance
+<!-- YAML
+added: REPLACEME
+-->
+
+The `Performance` provides access to performance metric data. A single
+instance of this class is provided via the `performance` property.
+
+### performance.clearFunctions([name])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceFunction` objects from the
+Performance Timeline. If `name` is provided, removes entries with `name`.
+
+### performance.clearMarks([name])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceMark` objects from the
+Performance Timeline. If `name` is provided, removes only the named mark.
+
+### performance.clearMeasures([name])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+
+If `name` is not provided, removes all `PerformanceMeasure` objects from the
+Performance Timeline. If `name` is provided, removes only objects whose
+`performanceEntry.name` matches `name`.
+
+### performance.getEntries()
+<!-- YAML
+added: REPLACEME
+-->
+
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime`.
+
+### performance.getEntriesByName(name[, type])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.name` is
+equal to `name`, and optionally, whose `performanceEntry.entryType` is equal to
+`type`.
+
+### performance.getEntriesByType(type)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of all `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.entryType`
+is equal to `type`.
+
+### performance.mark([name])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+
+Creates a new `PerformanceMark` entry in the Performance Timeline. A
+`PerformanceMark` is a subclass of `PerformanceEntry` whose
+`performanceEntry.entryType` is always `'mark'`, and whose
+`performanceEntry.duration` is always `0`. Performance marks are used
+to mark specific significant moments in the Performance Timeline.
+
+### performance.measure(name, startMark, endMark)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+* `startMark` {string}
+* `endMark` {string}
+
+Creates a new `PerformanceMeasure` entry in the Performance Timeline. A
+`PerformanceMeasure` is a subclass of `PerformanceEntry` whose
+`performanceEntry.entryType` is always `'measure'`, and whose
+`performanceEntry.duration` measures the number of milliseconds elapsed since
+`startMark` and `endMark`.
+
+The `startMark` argument may identify any *existing* `PerformanceMark` in the
+the Performance Timeline, or *may* identify any of the timestamp properties
+provided by the `PerformanceNodeTiming` class. If the named `startMark` does
+not exist, then `startMark` is set to [`timeOrigin`][] by default.
+
+The `endMark` argument must identify any *existing* `PerformanceMark` in the
+the Performance Timeline or any of the timestamp properties provided by the
+`PerformanceNodeTiming` class. If the named `endMark` does not exist, an
+error will be thrown.
+
+### performance.nodeFrame
+<!-- YAML
+added: REPLACEME
+-->
+
+* {PerformanceFrame}
+
+An instance of the `PerformanceFrame` class that provides performance metrics
+for the event loop.
+
+### performance.nodeTiming
+<!-- YAML
+added: REPLACEME
+-->
+
+* {PerformanceNodeTiming}
+
+An instance of the `PerformanceNodeTiming` class that provides performance
+metrics for specific Node.js operational milestones.
+
+### performance.now()
+<!-- YAML
+added: REPLACEME
+-->
+
+* Returns: {number}
+
+Returns the current high resolution millisecond timestamp.
+
+### performance.timeOrigin
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The [`timeOrigin`][] specifies the high resolution millisecond timestamp from
+which all performance metric durations are measured.
+
+### performance.timerify(fn)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `fn` {Function}
+
+Wraps a function within a new function that measures the running time of the
+wrapped function. A `PerformanceObserver` must be subscribed to the `'function'`
+event type in order for the timing details to be accessed.
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+function someFunction() {
+ console.log('hello world');
+}
+
+const wrapped = performance.timerify(someFunction);
+
+const obs = new PerformanceObserver((list) => {
+ console.log(list.getEntries()[0].duration);
+ obs.disconnect();
+ performance.clearFunctions();
+});
+obs.observe({ entryTypes: 'function' });
+
+// A performance timeline entry will be created
+wrapped();
+```
+
+## Class: PerformanceEntry
+<!-- YAML
+added: REPLACEME
+-->
+
+### performanceEntry.duration
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The total number of milliseconds elapsed for this entry. This value will not
+be meaningful for all Performance Entry types.
+
+### performanceEntry.name
+<!-- YAML
+added: REPLACEME
+-->
+
+* {string}
+
+The name of the performance entry.
+
+### performanceEntry.startTime
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp marking the starting time of the
+Performance Entry.
+
+### performanceEntry.entryType
+<!-- YAML
+added: REPLACEME
+-->
+
+* {string}
+
+The type of the performance entry. Current it may be one of: `'node'`, `'mark'`,
+`'measure'`, `'gc'`, or `'function'`.
+
+### performanceEntry.kind
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+When `performanceEntry.entryType` is equal to `'gc'`, the `performance.kind`
+property identifies the type of garbage collection operation that occurred.
+The value may be one of:
+
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_MAJOR`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_MINOR`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_INCREMENTAL`
+* `perf_hooks.constants.NODE_PERFORMANCE_GC_WEAKCB`
+
+## Class: PerformanceNodeFrame extends PerformanceEntry
+<!-- YAML
+added: REPLACEME
+-->
+
+Provides timing details for the Node.js event loop.
+
+### performanceNodeFrame.frameCheck
+
+The high resolution timestamp when `uv_check_t` processing occurred on the
+current loop.
+
+### performanceNodeFrame.frameCount
+
+The total number of event loop iterations (iterated when `uv_idle_t`
+processing occurrs).
+
+### performanceNodeFrame.frameIdle
+
+The high resolution timestamp when `uv_idle_t` processing occurred on the
+current loop.
+
+### performanceNodeFrame.framesPerSecond
+
+The number of event loop iterations per second.
+
+### performanceNodeFrame.framePrepare
+
+The high resolution timestamp when `uv_prepare_t` processing occurred on the
+current loop.
+
+## Class: PerformanceNodeTiming extends PerformanceEntry
+<!-- YAML
+added: REPLACEME
+-->
+
+Provides timing details for Node.js itself.
+
+### performanceNodeTiming.bootstrapComplete
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js process
+completed bootstrap.
+
+### performanceNodeTiming.clusterSetupEnd
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which cluster processing ended.
+
+### performanceNodeTiming.clusterSetupStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which cluster processing started.
+
+### performanceNodeTiming.loopExit
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js event loop
+exited.
+
+### performanceNodeTiming.loopStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js event loop
+started.
+
+### performanceNodeTiming.moduleLoadEnd
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which main module load ended.
+
+### performanceNodeTiming.moduleLoadStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which main module load started.
+
+### performanceNodeTiming.nodeStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which the Node.js process was
+initialized.
+
+### performanceNodeTiming.preloadModuleLoadEnd
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which preload module load ended.
+
+### performanceNodeTiming.preloadModuleLoadStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which preload module load started.
+
+### performanceNodeTiming.thirdPartyMainEnd
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which third_party_main processing
+ended.
+
+### performanceNodeTiming.thirdPartyMainStart
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which third_party_main processing
+started.
+
+### performanceNodeTiming.v8Start
+<!-- YAML
+added: REPLACEME
+-->
+
+* {number}
+
+The high resolution millisecond timestamp at which the V8 platform was
+initialized.
+
+
+## Class: PerformanceObserver(callback)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `callback` {Function} A `PerformanceObserverCallback` callback function.
+
+`PerformanceObserver` objects provide notifications when new
+`PerformanceEntry` instances have been added to the Performance Timeline.
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ console.log(list.getEntries());
+ observer.disconnect();
+});
+obs.observe({ entryTypes: ['mark'], buffered: true });
+
+performance.mark('test');
+```
+
+Because `PerformanceObserver` instances introduce their own additional
+performance overhead, instances should not be left subscribed to notifications
+indefinitely. Users should disconnect observers as soon as they are no
+longer needed.
+
+### Callback: PerformanceObserverCallback(list, observer)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `list` {PerformanceObserverEntryList}
+* `observer` {PerformanceObserver}
+
+The `PerformanceObserverCallback` is invoked when a `PerformanceObserver` is
+notified about new `PerformanceEntry` instances. The callback receives a
+`PerformanceObserverEntryList` instance and a reference to the
+`PerformanceObserver`.
+
+### Class: PerformanceObserverEntryList
+<!-- YAML
+added: REPLACEME
+-->
+
+The `PerformanceObserverEntryList` class is used to provide access to the
+`PerformanceEntry` instances passed to a `PerformanceObserver`.
+
+#### performanceObserverEntryList.getEntries()
+<!-- YAML
+added: REPLACEME
+-->
+
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime`.
+
+#### performanceObserverEntryList.getEntriesByName(name[, type])
+<!-- YAML
+added: REPLACEME
+-->
+
+* `name` {string}
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.name` is
+equal to `name`, and optionally, whose `performanceEntry.entryType` is equal to
+`type`.
+
+#### performanceObserverEntryList.getEntriesByType(type)
+<!-- YAML
+added: REPLACEME
+-->
+
+* `type` {string}
+* Returns: {Array}
+
+Returns a list of `PerformanceEntry` objects in chronological order
+with respect to `performanceEntry.startTime` whose `performanceEntry.entryType`
+is equal to `type`.
+
+### performanceObserver.disconnect()
+<!-- YAML
+added: REPLACEME
+-->
+Disconnects the `PerformanceObserver` instance from all notifications.
+
+### performanceObserver.observe(options)
+<!-- YAML
+added: REPLACEME
+-->
+* `options` {Object}
+ * `entryTypes` {Array} An array of strings identifying the types of
+ `PerformanceEntry` instances the observer is interested in. If not
+ provided an error will be thrown.
+ * `buffered` {boolean} If true, the notification callback will be
+ called using `setImmediate()` and multiple `PerformanceEntry` instance
+ notifications will be buffered internally. If `false`, notifications will
+ be immediate and synchronous. Defaults to `false`.
+
+Subscribes the `PerformanceObserver` instance to notifications of new
+`PerformanceEntry` instances identified by `options.entryTypes`.
+
+When `options.buffered` is `false`, the `callback` will be invoked once for
+every `PerformanceEntry` instance:
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ // called three times synchronously. list contains one item
+});
+obs.observe({ entryTypes: ['mark'] });
+
+for (let n = 0; n < 3; n++)
+ performance.mark(`test${n}`);
+```
+
+```js
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const obs = new PerformanceObserver((list, observer) => {
+ // called once. list contains three items
+});
+obs.observe({ entryTypes: ['mark'], buffered: true });
+
+for (let n = 0; n < 3; n++)
+ performance.mark(`test${n}`);
+```
+
+## Examples
+
+### Measuring the duration of async operations
+
+The following example uses the [Async Hooks][] and Performance APIs to measure
+the actual duration of a Timeout operation (including the amount of time it
+to execute the callback).
+
+```js
+'use strict';
+const async_hooks = require('async_hooks');
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const set = new Set();
+const hook = async_hooks.createHook({
+ init(id, type) {
+ if (type === 'Timeout') {
+ performance.mark(`Timeout-${id}-Init`);
+ set.add(id);
+ }
+ },
+ destroy(id) {
+ if (set.has(id)) {
+ set.delete(id);
+ performance.mark(`Timeout-${id}-Destroy`);
+ performance.measure(`Timeout-${id}`,
+ `Timeout-${id}-Init`,
+ `Timeout-${id}-Destroy`);
+ }
+ }
+});
+hook.enable();
+
+const obs = new PerformanceObserver((list, observer) => {
+ console.log(list.getEntries()[0]);
+ performance.clearMarks();
+ performance.clearMeasures();
+ observer.disconnect();
+});
+obs.observe({ entryTypes: ['measure'], buffered: true });
+
+setTimeout(() => {}, 1000);
+```
+
+### Measuring how long it takes to load dependencies
+
+The following example measures the duration of `require()` operations to load
+dependencies:
+
+<!-- eslint-disable no-global-assign -->
+```js
+'use strict';
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+const mod = require('module');
+
+// Monkey patch the require function
+mod.Module.prototype.require =
+ performance.timerify(mod.Module.prototype.require);
+require = performance.timerify(require);
+
+// Activate the observer
+const obs = new PerformanceObserver((list) => {
+ const entries = list.getEntries();
+ entries.forEach((entry) => {
+ console.log(`require('${entry[0]}')`, entry.duration);
+ });
+ obs.disconnect();
+ // Free memory
+ performance.clearFunctions();
+});
+obs.observe({ entryTypes: ['function'], buffered: true });
+
+require('some-module');
+```
+
+[`timeOrigin`]: https://w3c.github.io/hr-time/#dom-performance-timeorigin
+[W3C Performance Timeline]: https://w3c.github.io/performance-timeline/
diff --git a/doc/api/process.md b/doc/api/process.md
index d723f9721b1..a6da470e782 100644
--- a/doc/api/process.md
+++ b/doc/api/process.md
@@ -1852,11 +1852,11 @@ cases:
[Cluster]: cluster.html
[Duplex]: stream.html#stream_duplex_and_transform_streams
[LTS]: https://github.com/nodejs/LTS/
+[note on process I/O]: process.html#process_a_note_on_process_i_o
+[process_emit_warning]: #process_process_emitwarning_warning_type_code_ctor
+[process_warning]: #process_event_warning
[Readable]: stream.html#stream_readable_streams
[Signal Events]: #process_signal_events
[Stream compatibility]: stream.html#stream_compatibility_with_older_node_js_versions
[TTY]: tty.html#tty_tty
[Writable]: stream.html#stream_writable_streams
-[note on process I/O]: process.html#process_a_note_on_process_i_o
-[process_emit_warning]: #process_process_emitwarning_warning_type_code_ctor
-[process_warning]: #process_event_warning
diff --git a/lib/internal/bootstrap_node.js b/lib/internal/bootstrap_node.js
index 310db50960c..50c630f84b6 100644
--- a/lib/internal/bootstrap_node.js
+++ b/lib/internal/bootstrap_node.js
@@ -33,8 +33,21 @@
}
const _process = NativeModule.require('internal/process');
+ const perf = process.binding('performance');
+ const {
+ NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END
+ } = perf.constants;
_process.setup_hrtime();
+ _process.setup_performance();
_process.setup_cpuUsage();
_process.setupMemoryUsage();
_process.setupConfig(NativeModule._source);
@@ -106,7 +119,9 @@
// one to drop a file lib/_third_party_main.js into the build
// directory which will be executed instead of Node's normal loading.
process.nextTick(function() {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START);
NativeModule.require('_third_party_main');
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END);
});
} else if (process.argv[1] === 'inspect' || process.argv[1] === 'debug') {
@@ -139,22 +154,30 @@
// channel. This needs to be done before any user code gets executed
// (including preload modules).
if (process.argv[1] && process.env.NODE_UNIQUE_ID) {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START);
const cluster = NativeModule.require('cluster');
cluster._setupWorker();
-
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END);
// Make sure it's not accidentally inherited by child processes.
delete process.env.NODE_UNIQUE_ID;
}
if (process._eval != null && !process._forceRepl) {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
// User passed '-e' or '--eval' arguments to Node without '-i' or
// '--interactive'
+
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
const internalModule = NativeModule.require('internal/module');
internalModule.addBuiltinLibsToObject(global);
evalScript('[eval]');
} else if (process.argv[1] && process.argv[1] !== '-') {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
// make process.argv[1] into a full path
const path = NativeModule.require('path');
process.argv[1] = path.resolve(process.argv[1]);
@@ -170,11 +193,21 @@
checkScriptSyntax(source, filename);
process.exit(0);
}
-
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
Module.runMain();
} else {
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START);
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END);
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START);
preloadModules();
+ perf.markMilestone(
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END);
// If -i or --interactive were passed, or stdin is a TTY.
if (process._forceRepl || NativeModule.require('tty').isatty(0)) {
// REPL
@@ -218,6 +251,7 @@
}
}
}
+ perf.markMilestone(NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE);
}
function setupProcessObject() {
diff --git a/lib/internal/errors.js b/lib/internal/errors.js
index 3eee3371460..d270c3dd44b 100644
--- a/lib/internal/errors.js
+++ b/lib/internal/errors.js
@@ -234,6 +234,7 @@ E('ERR_NO_CRYPTO', 'Node.js is not compiled with OpenSSL crypto support');
E('ERR_NO_ICU', '%s is not supported on Node.js compiled without ICU');
E('ERR_NO_LONGER_SUPPORTED', '%s is no longer supported');
E('ERR_PARSE_HISTORY_DATA', 'Could not parse history data in %s');
+E('ERR_INVALID_PERFORMANCE_MARK', 'The "%s" performance mark has not been set');
E('ERR_SOCKET_ALREADY_BOUND', 'Socket is already bound');
E('ERR_SOCKET_BAD_PORT', 'Port should be > 0 and < 65536');
E('ERR_SOCKET_BAD_TYPE',
@@ -266,7 +267,8 @@ E('ERR_UNKNOWN_STDIN_TYPE', 'Unknown stdin file type');
E('ERR_UNKNOWN_STREAM_TYPE', 'Unknown stream file type');
E('ERR_V8BREAKITERATOR', 'Full ICU data not installed. ' +
'See https://github.com/nodejs/node/wiki/Intl');
-
+E('ERR_VALID_PERFORMANCE_ENTRY_TYPE',
+ 'At least one valid performance entry type is required');
function invalidArgType(name, expected, actual) {
assert(name, 'name is required');
diff --git a/lib/internal/module.js b/lib/internal/module.js
index cf994b51c06..a6da58a8d73 100644
--- a/lib/internal/module.js
+++ b/lib/internal/module.js
@@ -79,8 +79,8 @@ function stripShebang(content) {
const builtinLibs = [
'assert', 'async_hooks', 'buffer', 'child_process', 'cluster', 'crypto',
'dgram', 'dns', 'domain', 'events', 'fs', 'http', 'https', 'net',
- 'os', 'path', 'punycode', 'querystring', 'readline', 'repl', 'stream',
- 'string_decoder', 'tls', 'tty', 'url', 'util', 'v8', 'vm', 'zlib'
+ 'os', 'path', 'perf_hooks', 'punycode', 'querystring', 'readline', 'repl',
+ 'stream', 'string_decoder', 'tls', 'tty', 'url', 'util', 'v8', 'vm', 'zlib'
];
const { exposeHTTP2 } = process.binding('config');
diff --git a/lib/internal/process.js b/lib/internal/process.js
index 4935fcb2643..c96f99ccfd8 100644
--- a/lib/internal/process.js
+++ b/lib/internal/process.js
@@ -9,6 +9,10 @@ const assert = process.assert = function(x, msg) {
};
+function setup_performance() {
+ require('perf_hooks');
+}
+
// Set up the process.cpuUsage() function.
function setup_cpuUsage() {
// Get the native function, which will be replaced with a JS version.
@@ -258,6 +262,7 @@ function setupRawDebug() {
}
module.exports = {
+ setup_performance,
setup_cpuUsage,
setup_hrtime,
setupMemoryUsage,
diff --git a/lib/perf_hooks.js b/lib/perf_hooks.js
new file mode 100644
index 00000000000..4e7a0de7eb3
--- /dev/null
+++ b/lib/perf_hooks.js
@@ -0,0 +1,553 @@
+'use strict';
+
+const {
+ PerformanceEntry,
+ mark: _mark,
+ measure: _measure,
+ milestones,
+ observerCounts,
+ setupObservers,
+ timeOrigin,
+ timerify,
+ constants
+} = process.binding('performance');
+
+const {
+ NODE_PERFORMANCE_ENTRY_TYPE_NODE,
+ NODE_PERFORMANCE_ENTRY_TYPE_MARK,
+ NODE_PERFORMANCE_ENTRY_TYPE_MEASURE,
+ NODE_PERFORMANCE_ENTRY_TYPE_GC,
+ NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION,
+
+ NODE_PERFORMANCE_MILESTONE_NODE_START,
+ NODE_PERFORMANCE_MILESTONE_V8_START,
+ NODE_PERFORMANCE_MILESTONE_LOOP_START,
+ NODE_PERFORMANCE_MILESTONE_LOOP_EXIT,
+ NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE,
+ NODE_PERFORMANCE_MILESTONE_ENVIRONMENT,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START,
+ NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START,
+ NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START,
+ NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END
+} = constants;
+
+const L = require('internal/linkedlist');
+const kInspect = require('internal/util').customInspectSymbol;
+const { inherits } = require('util');
+
+const kCallback = Symbol('callback');
+const kTypes = Symbol('types');
+const kEntries = Symbol('entries');
+const kBuffer = Symbol('buffer');
+const kBuffering = Symbol('buffering');
+const kQueued = Symbol('queued');
+const kTimerified = Symbol('timerified');
+const kInsertEntry = Symbol('insert-entry');
+const kIndexEntry = Symbol('index-entry');
+const kClearEntry = Symbol('clear-entry');
+const kGetEntries = Symbol('get-entries');
+const kIndex = Symbol('index');
+const kMarks = Symbol('marks');
+
+observerCounts[NODE_PERFORMANCE_ENTRY_TYPE_MARK] = 1;
+observerCounts[NODE_PERFORMANCE_ENTRY_TYPE_MEASURE] = 1;
+const observers = {};
+const observerableTypes = [
+ 'node',
+ 'mark',
+ 'measure',
+ 'gc',
+ 'function'
+];
+
+let errors;
+function lazyErrors() {
+ if (errors === undefined)
+ errors = require('internal/errors');
+ return errors;
+}
+
+function now() {
+ const hr = process.hrtime();
+ return hr[0] * 1000 + hr[1] / 1e6;
+}
+
+class PerformanceNodeTiming {
+ constructor() {}
+
+ get name() {
+ return 'node';
+ }
+
+ get entryType() {
+ return 'node';
+ }
+
+ get startTime() {
+ return timeOrigin;
+ }
+
+ get duration() {
+ return now() - timeOrigin;
+ }
+
+ get nodeStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_NODE_START];
+ }
+
+ get v8Start() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_V8_START];
+ }
+
+ get environment() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_ENVIRONMENT];
+ }
+
+ get loopStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_LOOP_START];
+ }
+
+ get loopExit() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_LOOP_EXIT];
+ }
+
+ get bootstrapComplete() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_BOOTSTRAP_COMPLETE];
+ }
+
+ get thirdPartyMainStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_START];
+ }
+
+ get thirdPartyMainEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_THIRD_PARTY_MAIN_END];
+ }
+
+ get clusterSetupStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_START];
+ }
+
+ get clusterSetupEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_CLUSTER_SETUP_END];
+ }
+
+ get moduleLoadStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_START];
+ }
+
+ get moduleLoadEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_MODULE_LOAD_END];
+ }
+
+ get preloadModuleLoadStart() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_START];
+ }
+
+ get preloadModuleLoadEnd() {
+ return milestones[NODE_PERFORMANCE_MILESTONE_PRELOAD_MODULE_LOAD_END];
+ }
+
+ [kInspect]() {
+ return {
+ name: 'node',
+ entryType: 'node',
+ startTime: this.startTime,
+ duration: this.duration,
+ nodeStart: this.nodeStart,
+ v8Start: this.v8Start,
+ bootstrapComplete: this.bootstrapComplete,
+ environment: this.environment,
+ loopStart: this.loopStart,
+ loopExit: this.loopExit,
+ thirdPartyMainStart: this.thirdPartyMainStart,
+ thirdPartyMainEnd: this.thirdPartyMainEnd,
+ clusterSetupStart: this.clusterSetupStart,
+ clusterSetupEnd: this.clusterSetupEnd,
+ moduleLoadStart: this.moduleLoadStart,
+ moduleLoadEnd: this.moduleLoadEnd,
+ preloadModuleLoadStart: this.preloadModuleLoadStart,
+ preloadModuleLoadEnd: this.preloadModuleLoadEnd
+ };
+ }
+}
+// Use this instead of Extends because we want PerformanceEntry in the
+// prototype chain but we do not want to use the PerformanceEntry
+// constructor for this.
+inherits(PerformanceNodeTiming, PerformanceEntry);
+
+const nodeTiming = new PerformanceNodeTiming();
+
+// Maintains a list of entries as a linked list stored in insertion order.
+class PerformanceObserverEntryList {
+ constructor() {
+ Object.defineProperty(this, kEntries, {
+ writable: true,
+ enumerable: false,
+ value: {}
+ });
+ L.init(this[kEntries]);
+ }
+
+ [kInsertEntry](entry) {
+ const item = { entry };
+ L.append(this[kEntries], item);
+ this[kIndexEntry](item);
+ }
+
+ [kIndexEntry](entry) {
+ // Default implementation does nothing
+ }
+
+ [kGetEntries](name, type) {
+ const ret = [];
+ const list = this[kEntries];
+ if (!L.isEmpty(list)) {
+ let item = L.peek(list);
+ while (item && item !== list) {
+ const entry = item.entry;
+ if ((name && entry.name !== name) ||
+ (type && entry.entryType !== type)) {
+ item = item._idlePrev;
+ continue;
+ }
+ sortedInsert(ret, entry);
+ item = item._idlePrev;
+ }
+ }
+ return ret;
+ }
+
+ // While the items are stored in insertion order, getEntries() is
+ // required to return items sorted by startTime.
+ getEntries() {
+ return this[kGetEntries]();
+ }
+
+ getEntriesByType(type) {
+ return this[kGetEntries](undefined, `${type}`);
+ }
+
+ getEntriesByName(name, type) {
+ return this[kGetEntries](`${name}`, type !== undefined ? `${type}` : type);
+ }
+}
+
+class PerformanceObserver {
+ constructor(callback) {
+ if (typeof callback !== 'function') {
+ const errors = lazyErrors();
+ throw new errors.TypeError('ERR_INVALID_CALLBACK');
+ }
+ Object.defineProperties(this, {
+ [kTypes]: {
+ enumerable: false,
+ writable: true,
+ value: {}
+ },
+ [kCallback]: {
+ enumerable: false,
+ writable: true,
+ value: callback
+ },
+ [kBuffer]: {
+ enumerable: false,
+ writable: true,
+ value: new PerformanceObserverEntryList()
+ },
+ [kBuffering]: {
+ enumerable: false,
+ writable: true,
+ value: false
+ },
+ [kQueued]: {
+ enumerable: false,
+ writable: true,
+ value: false
+ }
+ });
+ }
+
+ disconnect() {
+ const types = this[kTypes];
+ const keys = Object.keys(types);
+ for (var n = 0; n < keys.length; n++) {
+ const item = types[keys[n]];
+ if (item) {
+ L.remove(item);
+ observerCounts[keys[n]]--;
+ }
+ }
+ this[kTypes] = {};
+ }
+
+ observe(options) {
+ const errors = lazyErrors();
+ if (typeof options !== 'object' || options == null) {
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'options', 'Object');
+ }
+ if (!Array.isArray(options.entryTypes)) {
+ throw new errors.TypeError('ERR_INVALID_OPT_VALUE',
+ 'entryTypes', options);
+ }
+ const entryTypes = options.entryTypes.filter(filterTypes).map(mapTypes);
+ if (entryTypes.length === 0) {
+ throw new errors.Error('ERR_VALID_PERFORMANCE_ENTRY_TYPE');
+ }
+ this.disconnect();
+ this[kBuffer][kEntries] = [];
+ L.init(this[kBuffer][kEntries]);
+ this[kBuffering] = Boolean(options.buffered);
+ for (var n = 0; n < entryTypes.length; n++) {
+ const entryType = entryTypes[n];
+ const list = getObserversList(entryType);
+ const item = { obs: this };
+ this[kTypes][entryType] = item;
+ L.append(list, item);
+ observerCounts[entryType]++;
+ }
+ }
+}
+
+class Performance extends PerformanceObserverEntryList {
+ constructor() {
+ super();
+ this[kIndex] = {
+ [kMarks]: new Set()
+ };
+ this[kInsertEntry](nodeTiming);
+ }
+
+ [kIndexEntry](item) {
+ const index = this[kIndex];
+ const type = item.entry.entryType;
+ let items = index[type];
+ if (!items) {
+ items = index[type] = {};
+ L.init(items);
+ }
+ const entry = item.entry;
+ L.append(items, { entry, item });
+ }
+
+ [kClearEntry](type, name) {
+ const index = this[kIndex];
+ const items = index[type];
+ if (!items) return;
+ let item = L.peek(items);
+ while (item && item !== items) {
+ const entry = item.entry;
+ const next = item._idlePrev;
+ if (name !== undefined) {
+ if (entry.name === `${name}`) {
+ L.remove(item); // remove from the index
+ L.remove(item.item); // remove from the master
+ }
+ } else {
+ L.remove(item); // remove from the index
+ L.remove(item.item); // remove from the master
+ }
+ item = next;
+ }
+ }
+
+ get nodeTiming() {
+ return nodeTiming;
+ }
+
+ get timeOrigin() {
+ return timeOrigin;
+ }
+
+ now() {
+ return now();
+ }
+
+ mark(name) {
+ name = `${name}`;
+ _mark(name);
+ this[kIndex][kMarks].add(name);
+ }
+
+ measure(name, startMark, endMark) {
+ name = `${name}`;
+ endMark = `${endMark}`;
+ startMark = startMark !== undefined ? `${startMark}` : '';
+ const marks = this[kIndex][kMarks];
+ if (!marks.has(endMark) && !(endMark in nodeTiming)) {
+ const errors = lazyErrors();
+ throw new errors.Error('ERR_INVALID_PERFORMANCE_MARK', endMark);
+ }
+ _measure(name, startMark, endMark);
+ }
+
+ clearMarks(name) {
+ name = name !== undefined ? `${name}` : name;
+ this[kClearEntry]('mark', name);
+ if (name !== undefined)
+ this[kIndex][kMarks].delete(name);
+ else
+ this[kIndex][kMarks].clear();
+ }
+
+ clearMeasures(name) {
+ this[kClearEntry]('measure', name);
+ }
+
+ clearGC() {
+ this[kClearEntry]('gc');
+ }
+
+ clearFunctions(name) {
+ this[kClearEntry]('function', name);
+ }
+
+ timerify(fn) {
+ if (typeof fn !== 'function') {
+ const errors = lazyErrors();
+ throw new errors.TypeError('ERR_INVALID_ARG_TYPE', 'fn', 'Function');
+ }
+ if (fn[kTimerified])
+ return fn[kTimerified];
+ const ret = timerify(fn, fn.length);
+ Object.defineProperty(fn, kTimerified, {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: ret
+ });
+ Object.defineProperties(ret, {
+ [kTimerified]: {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: ret
+ },
+ name: {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: `timerified ${fn.name}`
+ }
+ });
+ return ret;
+ }
+
+ [kInspect]() {
+ return {
+ timeOrigin,
+ nodeTiming
+ };
+ }
+}
+
+const performance = new Performance();
+
+function getObserversList(type) {
+ let list = observers[type];
+ if (list === undefined) {
+ list = observers[type] = {};
+ L.init(list);
+ }
+ return list;
+}
+
+function doNotify() {
+ this[kQueued] = false;
+ this[kCallback](this[kBuffer], this);
+ this[kBuffer][kEntries] = [];
+ L.init(this[kBuffer][kEntries]);
+}
+
+// Set up the callback used to receive PerformanceObserver notifications
+function observersCallback(entry) {
+ const type = mapTypes(entry.entryType);
+ performance[kInsertEntry](entry);
+ const list = getObserversList(type);
+
+ let current = L.peek(list);
+
+ while (current && current.obs) {
+ const observer = current.obs;
+ // First, add the item to the observers buffer
+ const buffer = observer[kBuffer];
+ buffer[kInsertEntry](entry);
+ // Second, check to see if we're buffering
+ if (observer[kBuffering]) {
+ // If we are, schedule a setImmediate call if one hasn't already
+ if (!observer[kQueued]) {
+ observer[kQueued] = true;
+ // Use setImmediate instead of nextTick to give more time
+ // for multiple entries to collect.
+ setImmediate(doNotify.bind(observer));
+ }
+ } else {
+ // If not buffering, notify immediately
+ doNotify.call(observer);
+ }
+ current = current._idlePrev;
+ }
+}
+setupObservers(observersCallback);
+
+function filterTypes(i) {
+ return observerableTypes.indexOf(`${i}`) >= 0;
+}
+
+function mapTypes(i) {
+ switch (i) {
+ case 'node': return NODE_PERFORMANCE_ENTRY_TYPE_NODE;
+ case 'mark': return NODE_PERFORMANCE_ENTRY_TYPE_MARK;
+ case 'measure': return NODE_PERFORMANCE_ENTRY_TYPE_MEASURE;
+ case 'gc': return NODE_PERFORMANCE_ENTRY_TYPE_GC;
+ case 'function': return NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION;
+ }
+}
+
+// The specification requires that PerformanceEntry instances are sorted
+// according to startTime. Unfortunately, they are not necessarily created
+// in that same order, and can be reported to the JS layer in any order,
+// which means we need to keep the list sorted as we insert.
+function getInsertLocation(list, entryStartTime) {
+ let start = 0;
+ let end = list.length;
+ while (start < end) {
+ const pivot = (end + start) >>> 1;
+ if (list[pivot].startTime === entryStartTime)
+ return pivot;
+ if (list[pivot].startTime < entryStartTime)
+ start = pivot + 1;
+ else
+ end = pivot;
+ }
+ return start;
+}
+
+function sortedInsert(list, entry) {
+ const entryStartTime = entry.startTime;
+ if (list.length === 0 ||
+ (list[list.length - 1].startTime < entryStartTime)) {
+ list.push(entry);
+ return;
+ }
+ if (list[0] && (list[0].startTime > entryStartTime)) {
+ list.unshift(entry);
+ return;
+ }
+ const location = getInsertLocation(list, entryStartTime);
+ list.splice(location, 0, entry);
+}
+
+module.exports = {
+ performance,
+ PerformanceObserver
+};
+
+Object.defineProperty(module.exports, 'constants', {
+ configurable: false,
+ enumerable: true,
+ value: constants
+});
diff --git a/node.gyp b/node.gyp
index 785814de991..975fc7238ba 100644
--- a/node.gyp
+++ b/node.gyp
@@ -50,6 +50,7 @@
'lib/net.js',
'lib/os.js',
'lib/path.js',
+ 'lib/perf_hooks.js',
'lib/process.js',
'lib/punycode.js',
'lib/querystring.js',
@@ -191,6 +192,7 @@
'src/node_main.cc',
'src/node_os.cc',
'src/node_platform.cc',
+ 'src/node_perf.cc',
'src/node_serdes.cc',
'src/node_url.cc',
'src/node_util.cc',
@@ -239,6 +241,8 @@
'src/node_javascript.h',
'src/node_mutex.h',
'src/node_platform.h',
+ 'src/node_perf.h',
+ 'src/node_perf_common.h',
'src/node_root_certs.h',
'src/node_version.h',
'src/node_watchdog.h',
@@ -657,6 +661,7 @@
'<(OBJ_PATH)<(OBJ_SEPARATOR)node.<(OBJ_SUFFIX)',
'<(OBJ_PATH)<(OBJ_SEPARATOR)node_buffer.<(OBJ_SUFFIX)',
'<(OBJ_PATH)<(OBJ_SEPARATOR)node_i18n.<(OBJ_SUFFIX)',
+ '<(OBJ_PATH)<(OBJ_SEPARATOR)node_perf.<(OBJ_SUFFIX)',
'<(OBJ_PATH)<(OBJ_SEPARATOR)node_url.<(OBJ_SUFFIX)',
'<(OBJ_PATH)<(OBJ_SEPARATOR)util.<(OBJ_SUFFIX)',
'<(OBJ_PATH)<(OBJ_SEPARATOR)string_bytes.<(OBJ_SUFFIX)',
diff --git a/src/env-inl.h b/src/env-inl.h
index 5d6211b7b69..842eed4cc8b 100644
--- a/src/env-inl.h
+++ b/src/env-inl.h
@@ -318,6 +318,16 @@ inline Environment::Environment(IsolateData* isolate_data,
AssignToContext(context);
destroy_ids_list_.reserve(512);
+ performance_state_ = Calloc<performance::performance_state>(1);
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT] =
+ PERFORMANCE_NOW();
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_NODE_START] =
+ performance::performance_node_start;
+ performance_state_->milestones[
+ performance::NODE_PERFORMANCE_MILESTONE_V8_START] =
+ performance::performance_v8_start;
}
inline Environment::~Environment() {
@@ -333,6 +343,7 @@ inline Environment::~Environment() {
delete[] heap_space_statistics_buffer_;
delete[] http_parser_buffer_;
free(http2_state_buffer_);
+ free(performance_state_);
}
inline v8::Isolate* Environment::isolate() const {
@@ -500,6 +511,41 @@ inline void Environment::set_fs_stats_field_array(double* fields) {
fs_stats_field_array_ = fields;
}
+inline performance::performance_state* Environment::performance_state() {
+ return performance_state_;
+}
+
+inline std::map<std::string, uint64_t>* Environment::performance_marks() {
+ return &performance_marks_;
+}
+
+inline Environment* Environment::from_performance_check_handle(
+ uv_check_t* handle) {
+ return ContainerOf(&Environment::performance_check_handle_, handle);
+}
+
+inline Environment* Environment::from_performance_idle_handle(
+ uv_idle_t* handle) {
+ return ContainerOf(&Environment::performance_idle_handle_, handle);
+}
+
+inline Environment* Environment::from_performance_prepare_handle(
+ uv_prepare_t* handle) {
+ return ContainerOf(&Environment::performance_prepare_handle_, handle);
+}
+
+inline uv_check_t* Environment::performance_check_handle() {
+ return &performance_check_handle_;
+}
+
+inline uv_idle_t* Environment::performance_idle_handle() {
+ return &performance_idle_handle_;
+}
+
+inline uv_prepare_t* Environment::performance_prepare_handle() {
+ return &performance_prepare_handle_;
+}
+
inline IsolateData* Environment::isolate_data() const {
return isolate_data_;
}
diff --git a/src/env.h b/src/env.h
index 2b299a66223..8016001c36c 100644
--- a/src/env.h
+++ b/src/env.h
@@ -36,6 +36,7 @@
#include "node.h"
#include <list>
+#include <map>
#include <stdint.h>
#include <vector>
#include <stack>
@@ -104,6 +105,7 @@ struct http2_state;
V(callback_string, "callback") \
V(change_string, "change") \
V(channel_string, "channel") \
+ V(constants_string, "constants") \
V(oncertcb_string, "oncertcb") \
V(onclose_string, "_onclose") \
V(code_string, "code") \
@@ -303,6 +305,8 @@ struct http2_state;
V(module_load_list_array, v8::Array) \
V(pbkdf2_constructor_template, v8::ObjectTemplate) \
V(pipe_constructor_template, v8::FunctionTemplate) \
+ V(performance_entry_callback, v8::Function) \
+ V(performance_entry_template, v8::Function) \
V(process_object, v8::Object) \
V(promise_reject_function, v8::Function) \
V(promise_wrap_template, v8::ObjectTemplate) \
@@ -611,6 +615,17 @@ class Environment {
inline double* fs_stats_field_array() const;
inline void set_fs_stats_field_array(double* fields);
+ inline performance::performance_state* performance_state();
+ inline std::map<std::string, uint64_t>* performance_marks();
+
+ static inline Environment* from_performance_check_handle(uv_check_t* handle);
+ static inline Environment* from_performance_idle_handle(uv_idle_t* handle);
+ static inline Environment* from_performance_prepare_handle(
+ uv_prepare_t* handle);
+ inline uv_check_t* performance_check_handle();
+ inline uv_idle_t* performance_idle_handle();
+ inline uv_prepare_t* performance_prepare_handle();
+
inline void ThrowError(const char* errmsg);
inline void ThrowTypeError(const char* errmsg);
inline void ThrowRangeError(const char* errmsg);
@@ -690,6 +705,10 @@ class Environment {
uv_timer_t destroy_ids_timer_handle_;
uv_prepare_t idle_prepare_handle_;
uv_check_t idle_check_handle_;
+ uv_prepare_t performance_prepare_handle_;
+ uv_check_t performance_check_handle_;
+ uv_idle_t performance_idle_handle_;
+
AsyncHooks async_hooks_;
DomainFlag domain_flag_;
TickInfo tick_info_;
@@ -700,6 +719,10 @@ class Environment {
bool abort_on_uncaught_exception_;
size_t makecallback_cntr_;
std::vector<double> destroy_ids_list_;
+
+ performance::performance_state* performance_state_ = nullptr;
+ std::map<std::string, uint64_t> performance_marks_;
+
#if HAVE_INSPECTOR
inspector::Agent inspector_agent_;
#endif
diff --git a/src/node.cc b/src/node.cc
index 34785693c8a..bdff4527c67 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -28,6 +28,7 @@
#include "node_internals.h"
#include "node_revert.h"
#include "node_debug_options.h"
+#include "node_perf.h"
#if defined HAVE_PERFCTR
#include "node_counters.h"
@@ -4559,6 +4560,7 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data,
{
SealHandleScope seal(isolate);
bool more;
+ PERFORMANCE_MARK(&env, LOOP_START);
do {
uv_run(env.event_loop(), UV_RUN_DEFAULT);
@@ -4569,6 +4571,7 @@ inline int Start(Isolate* isolate, IsolateData* isolate_data,
// event, or after running some callbacks.
more = uv_loop_alive(env.event_loop());
} while (more == true);
+ PERFORMANCE_MARK(&env, LOOP_EXIT);
}
env.set_trace_sync_io(false);
@@ -4638,6 +4641,7 @@ inline int Start(uv_loop_t* event_loop,
int Start(int argc, char** argv) {
atexit([] () { uv_tty_reset_mode(); });
PlatformInit();
+ node::performance::performance_node_start = PERFORMANCE_NOW();
CHECK_GT(argc, 0);
@@ -4674,6 +4678,7 @@ int Start(int argc, char** argv) {
v8_platform.StartTracingAgent();
}
V8::Initialize();
+ node::performance::performance_v8_start = PERFORMANCE_NOW();
v8_initialized = true;
const int exit_code =
Start(uv_default_loop(), argc, argv, exec_argc, exec_argv);
diff --git a/src/node_internals.h b/src/node_internals.h
index 5d437fa3025..1e099325a35 100644
--- a/src/node_internals.h
+++ b/src/node_internals.h
@@ -30,6 +30,7 @@
#include "uv.h"
#include "v8.h"
#include "tracing/trace_event.h"
+#include "node_perf_common.h"
#include "node_debug_options.h"
#include <stdint.h>
diff --git a/src/node_perf.cc b/src/node_perf.cc
new file mode 100644
index 00000000000..2398bb9b8fd
--- /dev/null
+++ b/src/node_perf.cc
@@ -0,0 +1,389 @@
+#include "node.h"
+#include "v8.h"
+#include "env.h"
+#include "env-inl.h"
+#include "node_perf.h"
+#include "uv.h"
+
+#include <vector>
+
+namespace node {
+namespace performance {
+
+using v8::Array;
+using v8::ArrayBuffer;
+using v8::Context;
+using v8::Function;
+using v8::FunctionCallbackInfo;
+using v8::FunctionTemplate;
+using v8::HandleScope;
+using v8::Integer;
+using v8::Isolate;
+using v8::Local;
+using v8::Name;
+using v8::Object;
+using v8::ObjectTemplate;
+using v8::PropertyCallbackInfo;
+using v8::String;
+using v8::Value;
+
+const uint64_t timeOrigin = PERFORMANCE_NOW();
+uint64_t performance_node_start;
+uint64_t performance_v8_start;
+
+uint64_t performance_last_gc_start_mark_ = 0;
+v8::GCType performance_last_gc_type_ = v8::GCType::kGCTypeAll;
+
+void PerformanceEntry::New(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Isolate* isolate = env->isolate();
+ Utf8Value name(isolate, args[0]);
+ Utf8Value type(isolate, args[1]);
+ uint64_t now = PERFORMANCE_NOW();
+ new PerformanceEntry(env, args.This(), *name, *type, now, now);
+}
+
+void PerformanceEntry::NotifyObservers(Environment* env,
+ PerformanceEntry* entry) {
+ uint32_t* observers = env->performance_state()->observers;
+ PerformanceEntryType type = ToPerformanceEntryTypeEnum(entry->type().c_str());
+ if (observers == nullptr ||
+ type == NODE_PERFORMANCE_ENTRY_TYPE_INVALID ||
+ !observers[type]) {
+ return;
+ }
+ Local<Context> context = env->context();
+ Isolate* isolate = env->isolate();
+ Local<Value> argv = entry->object();
+ env->performance_entry_callback()->Call(context,
+ v8::Undefined(isolate),
+ 1, &argv);
+}
+
+void Mark(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Local<Context> context = env->context();
+ Isolate* isolate = env->isolate();
+ Utf8Value name(isolate, args[0]);
+ uint64_t now = PERFORMANCE_NOW();
+ auto marks = env->performance_marks();
+ (*marks)[*name] = now;
+
+ // TODO(jasnell): Once Tracing API is fully implemented, this should
+ // record a trace event also.
+
+ Local<Function> fn = env->performance_entry_template();
+ Local<Object> obj = fn->NewInstance(context).ToLocalChecked();
+ new PerformanceEntry(env, obj, *name, "mark", now, now);
+ args.GetReturnValue().Set(obj);
+}
+
+inline uint64_t GetPerformanceMark(Environment* env, std::string name) {
+ auto marks = env->performance_marks();
+ auto res = marks->find(name);
+ return res != marks->end() ? res->second : 0;
+}
+
+void Measure(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Local<Context> context = env->context();
+ Isolate* isolate = env->isolate();
+ Utf8Value name(isolate, args[0]);
+ Utf8Value startMark(isolate, args[1]);
+ Utf8Value endMark(isolate, args[2]);
+
+ double* milestones = env->performance_state()->milestones;
+
+ uint64_t startTimestamp = timeOrigin;
+ uint64_t start = GetPerformanceMark(env, *startMark);
+ if (start != 0) {
+ startTimestamp = start;
+ } else {
+ PerformanceMilestone milestone = ToPerformanceMilestoneEnum(*startMark);
+ if (milestone != NODE_PERFORMANCE_MILESTONE_INVALID)
+ startTimestamp = milestones[milestone];
+ }
+
+ uint64_t endTimestamp = GetPerformanceMark(env, *endMark);
+ if (endTimestamp == 0) {
+ PerformanceMilestone milestone = ToPerformanceMilestoneEnum(*endMark);
+ if (milestone != NODE_PERFORMANCE_MILESTONE_INVALID)
+ endTimestamp = milestones[milestone];
+ }
+
+ if (endTimestamp < startTimestamp)
+ endTimestamp = startTimestamp;
+
+ // TODO(jasnell): Once Tracing API is fully implemented, this should
+ // record a trace event also.
+
+ Local<Function> fn = env->performance_entry_template();
+ Local<Object> obj = fn->NewInstance(context).ToLocalChecked();
+ new PerformanceEntry(env, obj, *name, "measure",
+ startTimestamp, endTimestamp);
+ args.GetReturnValue().Set(obj);
+}
+
+void GetPerformanceEntryName(const Local<String> prop,
+ const PropertyCallbackInfo<Value>& info) {
+ Isolate* isolate = info.GetIsolate();
+ PerformanceEntry* entry;
+ ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder());
+ info.GetReturnValue().Set(
+ String::NewFromUtf8(isolate, entry->name().c_str(), String::kNormalString));
+}
+
+void GetPerformanceEntryType(const Local<String> prop,
+ const PropertyCallbackInfo<Value>& info) {
+ Isolate* isolate = info.GetIsolate();
+ PerformanceEntry* entry;
+ ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder());
+ info.GetReturnValue().Set(
+ String::NewFromUtf8(isolate, entry->type().c_str(), String::kNormalString));
+}
+
+void GetPerformanceEntryStartTime(const Local<String> prop,
+ const PropertyCallbackInfo<Value>& info) {
+ PerformanceEntry* entry;
+ ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder());
+ info.GetReturnValue().Set(entry->startTime());
+}
+
+void GetPerformanceEntryDuration(const Local<String> prop,
+ const PropertyCallbackInfo<Value>& info) {
+ PerformanceEntry* entry;
+ ASSIGN_OR_RETURN_UNWRAP(&entry, info.Holder());
+ info.GetReturnValue().Set(entry->duration());
+}
+
+void MarkMilestone(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Local<Context> context = env->context();
+ double* milestones = env->performance_state()->milestones;
+ PerformanceMilestone milestone =
+ static_cast<PerformanceMilestone>(
+ args[0]->Int32Value(context).ToChecked());
+ if (milestone != NODE_PERFORMANCE_MILESTONE_INVALID) {
+ milestones[milestone] = PERFORMANCE_NOW();
+ }
+}
+
+void SetupPerformanceObservers(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ CHECK(args[0]->IsFunction());
+ env->set_performance_entry_callback(args[0].As<Function>());
+}
+
+inline void PerformanceGCCallback(uv_async_t* handle) {
+ PerformanceEntry::Data* data =
+ static_cast<PerformanceEntry::Data*>(handle->data);
+ Isolate* isolate = Isolate::GetCurrent();
+ HandleScope scope(isolate);
+ Environment* env = Environment::GetCurrent(isolate);
+ Local<Context> context = env->context();
+ Local<Function> fn;
+ Local<Object> obj;
+ PerformanceGCKind kind = static_cast<PerformanceGCKind>(data->data());
+
+ uint32_t* observers = env->performance_state()->observers;
+ if (!observers[NODE_PERFORMANCE_ENTRY_TYPE_GC]) {
+ goto cleanup;
+ }
+
+ fn = env->performance_entry_template();
+ obj = fn->NewInstance(context).ToLocalChecked();
+ obj->Set(context,
+ FIXED_ONE_BYTE_STRING(isolate, "kind"),
+ Integer::New(isolate, kind)).FromJust();
+ new PerformanceEntry(env, obj, data);
+
+ cleanup:
+ delete data;
+ auto closeCB = [](uv_handle_t* handle) { delete handle; };
+ uv_close(reinterpret_cast<uv_handle_t*>(handle), closeCB);
+}
+
+inline void MarkGarbageCollectionStart(Isolate* isolate,
+ v8::GCType type,
+ v8::GCCallbackFlags flags) {
+ performance_last_gc_start_mark_ = PERFORMANCE_NOW();
+ performance_last_gc_type_ = type;
+}
+
+inline void MarkGarbageCollectionEnd(Isolate* isolate,
+ v8::GCType type,
+ v8::GCCallbackFlags flags) {
+ uv_async_t *async = new uv_async_t;
+ async->data =
+ new PerformanceEntry::Data("gc", "gc",
+ performance_last_gc_start_mark_,
+ PERFORMANCE_NOW(), type);
+ uv_async_init(uv_default_loop(), async, PerformanceGCCallback);
+ uv_async_send(async);
+}
+
+inline void SetupGarbageCollectionTracking(Isolate* isolate) {
+ isolate->AddGCPrologueCallback(MarkGarbageCollectionStart);
+ isolate->AddGCPrologueCallback(MarkGarbageCollectionEnd);
+}
+
+inline Local<Value> GetName(Local<Function> fn) {
+ Local<Value> val = fn->GetDebugName();
+ if (val.IsEmpty() || val->IsUndefined()) {
+ Local<Value> boundFunction = fn->GetBoundFunction();
+ if (!boundFunction.IsEmpty() && !boundFunction->IsUndefined()) {
+ val = GetName(boundFunction.As<Function>());
+ }
+ }
+ return val;
+}
+
+void TimerFunctionCall(const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ Environment* env = Environment::GetCurrent(isolate);
+ Local<Context> context = env->context();
+ Local<Function> fn = args.Data().As<Function>();
+ size_t count = args.Length();
+ size_t idx;
+ std::vector<Local<Value>> call_args;
+ for (size_t i = 0; i < count; ++i) {
+ call_args.push_back(args[i]);
+ }
+
+ Utf8Value name(isolate, GetName(fn));
+
+ uint64_t start;
+ uint64_t end;
+ v8::TryCatch try_catch(isolate);
+ if (args.IsConstructCall()) {
+ start = PERFORMANCE_NOW();
+ v8::MaybeLocal<Object> ret = fn->NewInstance(context,
+ call_args.size(),
+ call_args.data());
+ end = PERFORMANCE_NOW();
+ if (ret.IsEmpty()) {
+ try_catch.ReThrow();
+ return;
+ }
+ args.GetReturnValue().Set(ret.ToLocalChecked());
+ } else {
+ start = PERFORMANCE_NOW();
+ v8::MaybeLocal<Value> ret = fn->Call(context,
+ args.This(),
+ call_args.size(),
+ call_args.data());
+ end = PERFORMANCE_NOW();
+ if (ret.IsEmpty()) {
+ try_catch.ReThrow();
+ return;
+ }
+ args.GetReturnValue().Set(ret.ToLocalChecked());
+ }
+
+
+ uint32_t* observers = env->performance_state()->observers;
+ if (!observers[NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION])
+ return;
+
+ Local<Function> ctor = env->performance_entry_template();
+ v8::MaybeLocal<Object> instance = ctor->NewInstance(context);
+ Local<Object> obj = instance.ToLocalChecked();
+ for (idx = 0; idx < count; idx++) {
+ obj->Set(context, idx, args[idx]);
+ }
+ new PerformanceEntry(env, obj, *name, "function", start, end);
+}
+
+void Timerify(const FunctionCallbackInfo<Value>& args) {
+ Environment* env = Environment::GetCurrent(args);
+ Local<Context> context = env->context();
+ CHECK(args[0]->IsFunction());
+ CHECK(args[1]->IsNumber());
+ Local<Function> fn = args[0].As<Function>();
+ int length = args[1]->IntegerValue(context).ToChecked();
+ Local<Function> wrap =
+ Function::New(context, TimerFunctionCall, fn, length).ToLocalChecked();
+ args.GetReturnValue().Set(wrap);
+}
+
+void Init(Local<Object> target,
+ Local<Value> unused,
+ Local<Context> context) {
+ Environment* env = Environment::GetCurrent(context);
+ Isolate* isolate = env->isolate();
+ performance_state* state = env->performance_state();
+ auto state_ab = ArrayBuffer::New(isolate, state, sizeof(*state));
+
+ #define SET_STATE_TYPEDARRAY(name, type, field) \
+ target->Set(context, \
+ FIXED_ONE_BYTE_STRING(isolate, (name)), \
+ type::New(state_ab, \
+ offsetof(performance_state, field), \
+ arraysize(state->field))) \
+ .FromJust()
+ SET_STATE_TYPEDARRAY("observerCounts", v8::Uint32Array, observers);
+ SET_STATE_TYPEDARRAY("milestones", v8::Float64Array, milestones);
+ #undef SET_STATE_TYPEDARRAY
+
+ Local<String> performanceEntryString =
+ FIXED_ONE_BYTE_STRING(isolate, "PerformanceEntry");
+
+ Local<FunctionTemplate> pe = env->NewFunctionTemplate(PerformanceEntry::New);
+ pe->InstanceTemplate()->SetInternalFieldCount(1);
+ pe->SetClassName(performanceEntryString);
+ Local<ObjectTemplate> ot = pe->InstanceTemplate();
+ ot->SetAccessor(env->name_string(), GetPerformanceEntryName);
+ ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "entryType"),
+ GetPerformanceEntryType);
+ ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "startTime"),
+ GetPerformanceEntryStartTime);
+ ot->SetAccessor(FIXED_ONE_BYTE_STRING(isolate, "duration"),
+ GetPerformanceEntryDuration);
+ Local<Function> fn = pe->GetFunction();
+ target->Set(performanceEntryString, fn);
+ env->set_performance_entry_template(fn);
+
+ env->SetMethod(target, "mark", Mark);
+ env->SetMethod(target, "measure", Measure);
+ env->SetMethod(target, "markMilestone", MarkMilestone);
+ env->SetMethod(target, "setupObservers", SetupPerformanceObservers);
+ env->SetMethod(target, "timerify", Timerify);
+
+ Local<Object> constants = Object::New(isolate);
+
+ NODE_DEFINE_CONSTANT(constants, NODE_PERFORMANCE_GC_MAJOR);
+ NODE_DEFINE_CONSTANT(constants, NODE_PERFORMANCE_GC_MINOR);
+ NODE_DEFINE_CONSTANT(constants, NODE_PERFORMANCE_GC_INCREMENTAL);
+ NODE_DEFINE_CONSTANT(constants, NODE_PERFORMANCE_GC_WEAKCB);
+
+#define V(name, _) \
+ NODE_DEFINE_HIDDEN_CONSTANT(constants, NODE_PERFORMANCE_ENTRY_TYPE_##name);
+ NODE_PERFORMANCE_ENTRY_TYPES(V)
+#undef V
+
+#define V(name, _) \
+ NODE_DEFINE_HIDDEN_CONSTANT(constants, NODE_PERFORMANCE_MILESTONE_##name);
+ NODE_PERFORMANCE_MILESTONES(V)
+#undef V
+
+ v8::PropertyAttribute attr =
+ static_cast<v8::PropertyAttribute>(v8::ReadOnly | v8::DontDelete);
+
+ target->DefineOwnProperty(context,
+ FIXED_ONE_BYTE_STRING(isolate, "timeOrigin"),
+ v8::Number::New(isolate, timeOrigin / 1e6),
+ attr);
+
+ target->DefineOwnProperty(context,
+ env->constants_string(),
+ constants,
+ attr);
+
+ SetupGarbageCollectionTracking(isolate);
+}
+
+} // namespace performance
+} // namespace node
+
+NODE_MODULE_CONTEXT_AWARE_BUILTIN(performance, node::performance::Init)
diff --git a/src/node_perf.h b/src/node_perf.h
new file mode 100644
index 00000000000..412479c9023
--- /dev/null
+++ b/src/node_perf.h
@@ -0,0 +1,174 @@
+#ifndef SRC_NODE_PERF_H_
+#define SRC_NODE_PERF_H_
+
+#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
+
+#include "node.h"
+#include "node_perf_common.h"
+#include "env.h"
+#include "base-object.h"
+#include "base-object-inl.h"
+
+#include "v8.h"
+#include "uv.h"
+
+#include <limits>
+#include <string>
+
+namespace node {
+namespace performance {
+
+using v8::FunctionCallbackInfo;
+using v8::GCType;
+using v8::Local;
+using v8::Object;
+using v8::Value;
+
+static inline PerformanceMilestone ToPerformanceMilestoneEnum(const char* str) {
+#define V(name, label) \
+ if (strcmp(str, label) == 0) return NODE_PERFORMANCE_MILESTONE_##name;
+ NODE_PERFORMANCE_MILESTONES(V)
+#undef V
+ return NODE_PERFORMANCE_MILESTONE_INVALID;
+}
+
+static inline PerformanceEntryType ToPerformanceEntryTypeEnum(
+ const char* type) {
+#define V(name, label) \
+ if (strcmp(type, label) == 0) return NODE_PERFORMANCE_ENTRY_TYPE_##name;
+ NODE_PERFORMANCE_ENTRY_TYPES(V)
+#undef V
+ return NODE_PERFORMANCE_ENTRY_TYPE_INVALID;
+}
+
+const double MAX_DOUBLE = std::numeric_limits<double>::max();
+
+NODE_EXTERN inline void MarkPerformanceMilestone(
+ Environment* env,
+ PerformanceMilestone milestone) {
+ env->performance_state()->milestones[milestone] = PERFORMANCE_NOW();
+ }
+
+class PerformanceEntry : public BaseObject {
+ public:
+ // Used for temporary storage of performance entry details when the
+ // object cannot be created immediately.
+ class Data {
+ public:
+ Data(
+ const char* name,
+ const char* type,
+ uint64_t startTime,
+ uint64_t endTime,
+ int data = 0) :
+ name_(name),
+ type_(type),
+ startTime_(startTime),
+ endTime_(endTime),
+ data_(data) {}
+
+ std::string name() const {
+ return name_;
+ }
+
+ std::string type() const {
+ return type_;
+ }
+
+ uint64_t startTime() const {
+ return startTime_;
+ }
+
+ uint64_t endTime() const {
+ return endTime_;
+ }
+
+ int data() const {
+ return data_;
+ }
+
+ private:
+ std::string name_;
+ std::string type_;
+ uint64_t startTime_;
+ uint64_t endTime_;
+ int data_;
+ };
+
+ static void NotifyObservers(Environment* env, PerformanceEntry* entry);
+
+ static void New(const FunctionCallbackInfo<Value>& args);
+
+ PerformanceEntry(Environment* env,
+ Local<Object> wrap,
+ const char* name,
+ const char* type,
+ uint64_t startTime,
+ uint64_t endTime) :
+ BaseObject(env, wrap),
+ name_(name),
+ type_(type),
+ startTime_(startTime),
+ endTime_(endTime) {
+ MakeWeak<PerformanceEntry>(this);
+ NotifyObservers(env, this);
+ }
+
+ PerformanceEntry(Environment* env,
+ Local<Object> wrap,
+ Data* data) :
+ BaseObject(env, wrap),
+ name_(data->name()),
+ type_(data->type()),
+ startTime_(data->startTime()),
+ endTime_(data->endTime()) {
+ MakeWeak<PerformanceEntry>(this);
+ NotifyObservers(env, this);
+ }
+
+ ~PerformanceEntry() {}
+
+ std::string name() const {
+ return name_;
+ }
+
+ std::string type() const {
+ return type_;
+ }
+
+ double startTime() const {
+ return startTime_ / 1e6;
+ }
+
+ double duration() const {
+ return durationNano() / 1e6;
+ }
+
+ uint64_t startTimeNano() const {
+ return startTime_;
+ }
+
+ uint64_t durationNano() const {
+ return endTime_ - startTime_;
+ }
+
+ private:
+ std::string name_;
+ std::string type_;
+ uint64_t startTime_;
+ uint64_t endTime_;
+};
+
+enum PerformanceGCKind {
+ NODE_PERFORMANCE_GC_MAJOR = GCType::kGCTypeMarkSweepCompact,
+ NODE_PERFORMANCE_GC_MINOR = GCType::kGCTypeScavenge,
+ NODE_PERFORMANCE_GC_INCREMENTAL = GCType::kGCTypeIncrementalMarking,
+ NODE_PERFORMANCE_GC_WEAKCB = GCType::kGCTypeProcessWeakCallbacks
+};
+
+} // namespace performance
+} // namespace node
+
+#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
+
+#endif // SRC_NODE_PERF_H_
diff --git a/src/node_perf_common.h b/src/node_perf_common.h
new file mode 100644
index 00000000000..30efcb6134b
--- /dev/null
+++ b/src/node_perf_common.h
@@ -0,0 +1,71 @@
+#ifndef SRC_NODE_PERF_COMMON_H_
+#define SRC_NODE_PERF_COMMON_H_
+
+#include "node.h"
+#include "v8.h"
+
+#include <map>
+#include <string>
+
+namespace node {
+namespace performance {
+
+#define PERFORMANCE_NOW() uv_hrtime()
+
+// These occur before the environment is created. Cache them
+// here and add them to the milestones when the env is init'd.
+extern uint64_t performance_node_start;
+extern uint64_t performance_v8_start;
+
+#define NODE_PERFORMANCE_MILESTONES(V) \
+ V(ENVIRONMENT, "environment") \
+ V(NODE_START, "nodeStart") \
+ V(V8_START, "v8Start") \
+ V(LOOP_START, "loopStart") \
+ V(LOOP_EXIT, "loopExit") \
+ V(BOOTSTRAP_COMPLETE, "bootstrapComplete") \
+ V(THIRD_PARTY_MAIN_START, "thirdPartyMainStart") \
+ V(THIRD_PARTY_MAIN_END, "thirdPartyMainEnd") \
+ V(CLUSTER_SETUP_START, "clusterSetupStart") \
+ V(CLUSTER_SETUP_END, "clusterSetupEnd") \
+ V(MODULE_LOAD_START, "moduleLoadStart") \
+ V(MODULE_LOAD_END, "moduleLoadEnd") \
+ V(PRELOAD_MODULE_LOAD_START, "preloadModulesLoadStart") \
+ V(PRELOAD_MODULE_LOAD_END, "preloadModulesLoadEnd")
+
+#define NODE_PERFORMANCE_ENTRY_TYPES(V) \
+ V(NODE, "node") \
+ V(MARK, "mark") \
+ V(MEASURE, "measure") \
+ V(GC, "gc") \
+ V(FUNCTION, "function")
+
+enum PerformanceMilestone {
+#define V(name, _) NODE_PERFORMANCE_MILESTONE_##name,
+ NODE_PERFORMANCE_MILESTONES(V)
+#undef V
+ NODE_PERFORMANCE_MILESTONE_INVALID
+};
+
+enum PerformanceEntryType {
+#define V(name, _) NODE_PERFORMANCE_ENTRY_TYPE_##name,
+ NODE_PERFORMANCE_ENTRY_TYPES(V)
+#undef V
+ NODE_PERFORMANCE_ENTRY_TYPE_INVALID
+};
+
+#define PERFORMANCE_MARK(env, n) \
+ do { \
+ node::performance::MarkPerformanceMilestone(env, \
+ node::performance::NODE_PERFORMANCE_MILESTONE_##n); \
+ } while (0);
+
+struct performance_state {
+ uint32_t observers[NODE_PERFORMANCE_ENTRY_TYPE_INVALID];
+ double milestones[NODE_PERFORMANCE_MILESTONE_INVALID];
+};
+
+} // namespace performance
+} // namespace node
+
+#endif // SRC_NODE_PERF_COMMON_H_
diff --git a/test/parallel/test-performance-function.js b/test/parallel/test-performance-function.js
new file mode 100644
index 00000000000..8dd4d3004ba
--- /dev/null
+++ b/test/parallel/test-performance-function.js
@@ -0,0 +1,93 @@
+'use strict';
+
+const common = require('../common');
+const assert = require('assert');
+
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+{
+ // Intentional non-op. Do not wrap in common.mustCall();
+ const n = performance.timerify(() => {});
+ n();
+ const entries = performance.getEntriesByType('function');
+ assert.strictEqual(entries.length, 0);
+
+ const obs = new PerformanceObserver(common.mustCall((list) => {
+ const entries = list.getEntries();
+ const entry = entries[0];
+ assert(entry);
+ assert.strictEqual(entry.name, 'performance.timerify');
+ assert.strictEqual(entry.entryType, 'function');
+ assert.strictEqual(typeof entry.duration, 'number');
+ assert.strictEqual(typeof entry.startTime, 'number');
+ obs.disconnect();
+ performance.clearFunctions();
+ }));
+ obs.observe({ entryTypes: ['function'] });
+ n();
+}
+
+{
+ // If the error throws, the error should just be bubbled up and the
+ // performance timeline entry will not be reported.
+ const obs = new PerformanceObserver(common.mustNotCall());
+ obs.observe({ entryTypes: ['function'] });
+ const n = performance.timerify(() => {
+ throw new Error('test');
+ });
+ assert.throws(() => n(), /^Error: test$/);
+ const entries = performance.getEntriesByType('function');
+ assert.strictEqual(entries.length, 0);
+ obs.disconnect();
+}
+
+{
+ class N {}
+ const n = performance.timerify(N);
+ new n();
+ const entries = performance.getEntriesByType('function');
+ assert.strictEqual(entries.length, 0);
+
+ const obs = new PerformanceObserver(common.mustCall((list) => {
+ const entries = list.getEntries();
+ const entry = entries[0];
+ assert.strictEqual(entry[0], 1);
+ assert.strictEqual(entry[1], 'abc');
+ assert(entry);
+ assert.strictEqual(entry.name, 'N');
+ assert.strictEqual(entry.entryType, 'function');
+ assert.strictEqual(typeof entry.duration, 'number');
+ assert.strictEqual(typeof entry.startTime, 'number');
+ obs.disconnect();
+ performance.clearFunctions();
+ }));
+ obs.observe({ entryTypes: ['function'] });
+
+ new n(1, 'abc');
+}
+
+{
+ [1, {}, [], null, undefined, Infinity].forEach((i) => {
+ assert.throws(() => performance.timerify(i),
+ common.expectsError({
+ code: 'ERR_INVALID_ARG_TYPE',
+ type: TypeError,
+ message: 'The "fn" argument must be of type Function'
+ }));
+ });
+}
+
+// Function can only be wrapped once, also check length and name
+{
+ const m = (a, b = 1) => {};
+ const n = performance.timerify(m);
+ const o = performance.timerify(m);
+ const p = performance.timerify(n);
+ assert.strictEqual(n, o);
+ assert.strictEqual(n, p);
+ assert.strictEqual(n.length, m.length);
+ assert.strictEqual(n.name, 'timerified m');
+}
diff --git a/test/parallel/test-performance-gc.js b/test/parallel/test-performance-gc.js
new file mode 100755
index 00000000000..1ff4c9ae629
--- /dev/null
+++ b/test/parallel/test-performance-gc.js
@@ -0,0 +1,51 @@
+// Flags: --expose-gc
+'use strict';
+
+const common = require('../common');
+const assert = require('assert');
+const {
+ performance,
+ PerformanceObserver
+} = require('perf_hooks');
+
+const {
+ NODE_PERFORMANCE_GC_MAJOR,
+ NODE_PERFORMANCE_GC_MINOR,
+ NODE_PERFORMANCE_GC_INCREMENTAL,
+ NODE_PERFORMANCE_GC_WEAKCB
+} = process.binding('performance').constants;
+
+const kinds = [
+ NODE_PERFORMANCE_GC_MAJOR,
+ NODE_PERFORMANCE_GC_MINOR,
+ NODE_PERFORMANCE_GC_INCREMENTAL,
+ NODE_PERFORMANCE_GC_WEAKCB
+];
+
+// No observers for GC events, no entries should appear
+{
+ global.gc();
+ const entries = performance.getEntriesByType('gc');
+ assert.strictEqual(entries.length, 0);
+}
+
+// Adding an observer should force at least one gc to appear
+{
+ const obs = new PerformanceObserver(common.mustCallAtLeast((list) => {
+ const entry = list.getEntries()[0];
+ assert(entry);
+ assert.strictEqual(entry.name, 'gc');
+ assert.strictEqual(entry.entryType, 'gc');
+ assert(kinds.includes(entry.kind));
+ assert.strictEqual(typeof entry.startTime, 'number');
+ assert.strictEqual(typeof entry.duration, 'number');
+
+ performance.clearGC();
+
+ const entries = performance.getEntriesByType('gc');
+ assert.strictEqual(entries.length, 0);
+ obs.disconnect();
+ }));
+ obs.observe({ entryTypes: ['gc'] });
+ global.gc();
+}
diff --git a/test/parallel/test-performance.js b/test/parallel/test-performance.js
new file mode 100644
index 00000000000..62fb13ed289
--- /dev/null
+++ b/test/parallel/test-performance.js
@@ -0,0 +1,124 @@
+'use strict';
+
+const common = require('../common');
+const assert = require('assert');
+const { performance } = require('perf_hooks');
+
+assert(performance);
+assert(performance.nodeTiming);
+assert.strictEqual(typeof performance.timeOrigin, 'number');
+
+{
+ const entries = performance.getEntries();
+ assert(Array.isArray(entries));
+ assert.strictEqual(entries.length, 1);
+ assert.strictEqual(entries[0], performance.nodeTiming);
+}
+
+{
+ const entries = performance.getEntriesByName('node');
+ assert(Array.isArray(entries));
+ assert.strictEqual(entries.length, 1);
+ assert.strictEqual(entries[0], performance.nodeTiming);
+}
+
+{
+ let n;
+ let entries = performance.getEntries();
+ for (n = 0; n < entries.length; n++) {
+ const entry = entries[n];
+ assert.notStrictEqual(entry.name, 'A');
+ assert.notStrictEqual(entry.entryType, 'mark');
+ }
+ performance.mark('A');
+ entries = performance.getEntries();
+ const markA = entries[1];
+ assert.strictEqual(markA.name, 'A');
+ assert.strictEqual(markA.entryType, 'mark');
+ performance.clearMarks('A');
+ entries = performance.getEntries();
+ for (n = 0; n < entries.length; n++) {
+ const entry = entries[n];
+ assert.notStrictEqual(entry.name, 'A');
+ assert.notStrictEqual(entry.entryType, 'mark');
+ }
+}
+
+{
+ let entries = performance.getEntries();
+ for (let n = 0; n < entries.length; n++) {
+ const entry = entries[n];
+ assert.notStrictEqual(entry.name, 'B');
+ assert.notStrictEqual(entry.entryType, 'mark');
+ }
+ performance.mark('B');
+ entries = performance.getEntries();
+ const markB = entries[1];
+ assert.strictEqual(markB.name, 'B');
+ assert.strictEqual(markB.entryType, 'mark');
+ performance.clearMarks();
+ entries = performance.getEntries();
+ assert.strictEqual(entries.length, 1);
+}
+
+{
+ performance.mark('A');
+ [undefined, null, 'foo', 'initialize', 1].forEach((i) => {
+ assert.doesNotThrow(() => performance.measure('test', i, 'A'));
+ });
+
+ [undefined, null, 'foo', 1].forEach((i) => {
+ assert.throws(() => performance.measure('test', 'A', i),
+ common.expectsError({
+ code: 'ERR_INVALID_PERFORMANCE_MARK',
+ type: Error,
+ message: `The "${i}" performance mark has not been set`
+ }));
+ });
+
+ performance.clearMeasures();
+ performance.clearMarks();
+
+ const entries = performance.getEntries();
+ assert.strictEqual(entries.length, 1);
+}
+
+{
+ performance.mark('A');
+ setImmediate(() => {
+ performance.mark('B');
+ performance.measure('foo', 'A', 'B');
+ const entry = performance.getEntriesByName('foo')[0];
+ const markA = performance.getEntriesByName('A', 'mark')[0];
+ const markB = performance.getEntriesByName('B', 'mark')[0];
+ assert.strictEqual(entry.name, 'foo');
+ assert.strictEqual(entry.entryType, 'measure');
+ assert.strictEqual(entry.startTime, markA.startTime);
+ assert.strictEqual(entry.duration.toPrecision(3),
+ (markB.startTime - markA.startTime).toPrecision(3));
+ });
+}
+
+assert.strictEqual(performance.nodeTiming.name, 'node');
+assert.strictEqual(performance.nodeTiming.entryType, 'node');
+
+[
+ 'startTime',
+ 'duration',
+ 'nodeStart',
+ 'v8Start',
+ 'bootstrapComplete',
+ 'environment',
+ 'loopStart',
+ 'loopExit',
+ 'thirdPartyMainStart',
+ 'thirdPartyMainEnd',
+ 'clusterSetupStart',
+ 'clusterSetupEnd',
+ 'moduleLoadStart',
+ 'moduleLoadEnd',
+ 'preloadModuleLoadStart',
+ 'preloadModuleLoadEnd'
+].forEach((i) => {
+ assert.strictEqual(typeof performance.nodeTiming[i], 'number');
+});
diff --git a/test/parallel/test-performanceobserver.js b/test/parallel/test-performanceobserver.js
new file mode 100644
index 00000000000..9b7dba5eb2a
--- /dev/null
+++ b/test/parallel/test-performanceobserver.js
@@ -0,0 +1,139 @@
+'use strict';
+
+const common = require('../common');
+const Countdown = require('../common/countdown');
+const assert = require('assert');
+const {
+ observerCounts: counts
+} = process.binding('performance');
+const {
+ performance,
+ PerformanceObserver,
+ constants
+} = require('perf_hooks');
+
+const {
+ NODE_PERFORMANCE_ENTRY_TYPE_NODE,
+ NODE_PERFORMANCE_ENTRY_TYPE_MARK,
+ NODE_PERFORMANCE_ENTRY_TYPE_MEASURE,
+ NODE_PERFORMANCE_ENTRY_TYPE_GC,
+ NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION,
+} = constants;
+
+assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_NODE], 0);
+assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 1);
+assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MEASURE], 1);
+assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_GC], 0);
+assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_FUNCTION], 0);
+
+{
+ [1, null, undefined, {}, [], Infinity].forEach((i) => {
+ assert.throws(() => new PerformanceObserver(i),
+ common.expectsError({
+ code: 'ERR_INVALID_CALLBACK',
+ type: TypeError,
+ message: 'Callback must be a function'
+ }));
+ });
+ const observer = new PerformanceObserver(common.mustNotCall());
+
+ [1, null, undefined].forEach((i) => {
+ //observer.observe(i);
+ assert.throws(() => observer.observe(i),
+ common.expectsError({
+ code: 'ERR_INVALID_ARG_TYPE',
+ type: TypeError,
+ message: 'The "options" argument must be of type Object'
+ }));
+ });
+
+ [1, undefined, null, {}, Infinity].forEach((i) => {
+ assert.throws(() => observer.observe({ entryTypes: i }),
+ common.expectsError({
+ code: 'ERR_INVALID_OPT_VALUE',
+ type: TypeError,
+ message: 'The value "[object Object]" is invalid for ' +
+ 'option "entryTypes"'
+ }));
+ });
+}
+
+// Test Non-Buffered
+{
+ const observer =
+ new PerformanceObserver(common.mustCall(callback, 3));
+
+ const countdown =
+ new Countdown(3, common.mustCall(() => {
+ observer.disconnect();
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 1);
+ }));
+
+ function callback(list, obs) {
+ assert.strictEqual(obs, observer);
+ const entries = list.getEntries();
+ assert.strictEqual(entries.length, 1);
+ countdown.dec();
+ }
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 1);
+ assert.doesNotThrow(() => observer.observe({ entryTypes: ['mark'] }));
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 2);
+ performance.mark('test1');
+ performance.mark('test2');
+ performance.mark('test3');
+}
+
+
+// Test Buffered
+{
+ const observer =
+ new PerformanceObserver(common.mustCall(callback, 1));
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 1);
+
+ function callback(list, obs) {
+ assert.strictEqual(obs, observer);
+ const entries = list.getEntries();
+ assert.strictEqual(entries.length, 3);
+ observer.disconnect();
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 1);
+
+ {
+ const entriesByName = list.getEntriesByName('test1');
+ assert.strictEqual(entriesByName.length, 1);
+ assert.strictEqual(entriesByName[0].name, 'test1');
+ assert.strictEqual(entriesByName[0].entryType, 'mark');
+ }
+
+ {
+ const entriesByName = list.getEntriesByName('test1', 'mark');
+ assert.strictEqual(entriesByName.length, 1);
+ assert.strictEqual(entriesByName[0].name, 'test1');
+ assert.strictEqual(entriesByName[0].entryType, 'mark');
+ }
+
+ {
+ const entriesByName = list.getEntriesByName('test1', 'measure');
+ assert.strictEqual(entriesByName.length, 0);
+ }
+
+ {
+ const entriesByType = list.getEntriesByType('measure');
+ assert.strictEqual(entriesByType.length, 1);
+ assert.strictEqual(entriesByType[0].name, 'test3');
+ assert.strictEqual(entriesByType[0].entryType, 'measure');
+ }
+ }
+
+ assert.doesNotThrow(() => {
+ observer.observe({ entryTypes: ['mark', 'measure'], buffered: true });
+ });
+ // Do this twice to make sure it doesn't throw
+ assert.doesNotThrow(() => {
+ observer.observe({ entryTypes: ['mark', 'measure'], buffered: true });
+ });
+ // Even tho we called twice, count should be 1
+ assert.strictEqual(counts[NODE_PERFORMANCE_ENTRY_TYPE_MARK], 2);
+ performance.mark('test1');
+ performance.mark('test2');
+ performance.measure('test3', 'test1', 'test2');
+}