Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/npm/cli.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorclaudiahdz <cghr1990@gmail.com>2020-02-06 23:43:20 +0300
committerisaacs <i@izs.me>2020-05-08 04:11:51 +0300
commit0a30b0d8a1c448b6d89d781ad81f46117d014953 (patch)
tree8b2443c75e95cd29e8a06cb990282f87a886115a /node_modules
parent0a45514d8beba749bebbd973ca9750ee0dc13b40 (diff)
tar@6.0.1
Diffstat (limited to 'node_modules')
-rw-r--r--node_modules/fs-minipass/index.js103
-rw-r--r--node_modules/fs-minipass/node_modules/minipass/README.md76
-rw-r--r--node_modules/fs-minipass/node_modules/minipass/index.js45
-rw-r--r--node_modules/fs-minipass/node_modules/minipass/package.json30
-rw-r--r--node_modules/fs-minipass/node_modules/yallist/LICENSE15
-rw-r--r--node_modules/fs-minipass/node_modules/yallist/README.md204
-rw-r--r--node_modules/fs-minipass/node_modules/yallist/iterator.js8
-rw-r--r--node_modules/fs-minipass/node_modules/yallist/package.json62
-rw-r--r--node_modules/fs-minipass/node_modules/yallist/yallist.js426
-rw-r--r--node_modules/fs-minipass/package.json32
-rw-r--r--node_modules/minizlib/README.md7
-rw-r--r--node_modules/minizlib/index.js24
-rw-r--r--node_modules/minizlib/node_modules/minipass/README.md76
-rw-r--r--node_modules/minizlib/node_modules/minipass/index.js45
-rw-r--r--node_modules/minizlib/node_modules/minipass/package.json30
-rw-r--r--node_modules/minizlib/node_modules/yallist/LICENSE15
-rw-r--r--node_modules/minizlib/node_modules/yallist/README.md204
-rw-r--r--node_modules/minizlib/node_modules/yallist/iterator.js8
-rw-r--r--node_modules/minizlib/node_modules/yallist/package.json63
-rw-r--r--node_modules/minizlib/node_modules/yallist/yallist.js426
-rw-r--r--node_modules/minizlib/package.json35
-rw-r--r--node_modules/node-gyp/node_modules/fs-minipass/LICENSE15
-rw-r--r--node_modules/node-gyp/node_modules/fs-minipass/README.md70
-rw-r--r--node_modules/node-gyp/node_modules/fs-minipass/index.js387
-rw-r--r--node_modules/node-gyp/node_modules/fs-minipass/package.json65
-rw-r--r--node_modules/node-gyp/node_modules/minipass/LICENSE15
-rw-r--r--node_modules/node-gyp/node_modules/minipass/README.md606
-rw-r--r--node_modules/node-gyp/node_modules/minipass/index.js537
-rw-r--r--node_modules/node-gyp/node_modules/minipass/package.json72
-rw-r--r--node_modules/node-gyp/node_modules/minizlib/LICENSE26
-rw-r--r--node_modules/node-gyp/node_modules/minizlib/README.md53
-rw-r--r--node_modules/node-gyp/node_modules/minizlib/constants.js115
-rw-r--r--node_modules/node-gyp/node_modules/minizlib/index.js320
-rw-r--r--node_modules/node-gyp/node_modules/minizlib/package.json71
-rw-r--r--node_modules/node-gyp/node_modules/tar/LICENSE15
-rw-r--r--node_modules/node-gyp/node_modules/tar/README.md954
-rw-r--r--node_modules/node-gyp/node_modules/tar/index.js18
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/buffer.js (renamed from node_modules/tar/lib/buffer.js)0
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/create.js105
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/extract.js112
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/header.js289
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/high-level-opt.js29
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/large-numbers.js97
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/list.js130
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/mkdir.js206
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/mode-fix.js14
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/pack.js404
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/parse.js428
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/pax.js146
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/read-entry.js98
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/replace.js220
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/types.js44
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/unpack.js621
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/update.js36
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/warn-mixin.js14
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/winchars.js23
-rw-r--r--node_modules/node-gyp/node_modules/tar/lib/write-entry.js422
-rw-r--r--node_modules/node-gyp/node_modules/tar/package.json82
-rw-r--r--node_modules/pacote/node_modules/fs-minipass/LICENSE15
-rw-r--r--node_modules/pacote/node_modules/fs-minipass/README.md70
-rw-r--r--node_modules/pacote/node_modules/fs-minipass/index.js387
-rw-r--r--node_modules/pacote/node_modules/fs-minipass/package.json65
-rw-r--r--node_modules/pacote/node_modules/minizlib/LICENSE26
-rw-r--r--node_modules/pacote/node_modules/minizlib/README.md53
-rw-r--r--node_modules/pacote/node_modules/minizlib/constants.js115
-rw-r--r--node_modules/pacote/node_modules/minizlib/index.js320
-rw-r--r--node_modules/pacote/node_modules/minizlib/package.json71
-rw-r--r--node_modules/pacote/node_modules/tar/LICENSE15
-rw-r--r--node_modules/pacote/node_modules/tar/README.md954
-rw-r--r--node_modules/pacote/node_modules/tar/index.js18
-rw-r--r--node_modules/pacote/node_modules/tar/lib/buffer.js11
-rw-r--r--node_modules/pacote/node_modules/tar/lib/create.js105
-rw-r--r--node_modules/pacote/node_modules/tar/lib/extract.js112
-rw-r--r--node_modules/pacote/node_modules/tar/lib/header.js289
-rw-r--r--node_modules/pacote/node_modules/tar/lib/high-level-opt.js29
-rw-r--r--node_modules/pacote/node_modules/tar/lib/large-numbers.js97
-rw-r--r--node_modules/pacote/node_modules/tar/lib/list.js130
-rw-r--r--node_modules/pacote/node_modules/tar/lib/mkdir.js206
-rw-r--r--node_modules/pacote/node_modules/tar/lib/mode-fix.js14
-rw-r--r--node_modules/pacote/node_modules/tar/lib/pack.js404
-rw-r--r--node_modules/pacote/node_modules/tar/lib/parse.js428
-rw-r--r--node_modules/pacote/node_modules/tar/lib/pax.js146
-rw-r--r--node_modules/pacote/node_modules/tar/lib/read-entry.js98
-rw-r--r--node_modules/pacote/node_modules/tar/lib/replace.js220
-rw-r--r--node_modules/pacote/node_modules/tar/lib/types.js44
-rw-r--r--node_modules/pacote/node_modules/tar/lib/unpack.js621
-rw-r--r--node_modules/pacote/node_modules/tar/lib/update.js36
-rw-r--r--node_modules/pacote/node_modules/tar/lib/warn-mixin.js14
-rw-r--r--node_modules/pacote/node_modules/tar/lib/winchars.js23
-rw-r--r--node_modules/pacote/node_modules/tar/lib/write-entry.js422
-rw-r--r--node_modules/pacote/node_modules/tar/package.json82
-rw-r--r--node_modules/tar/CHANGELOG.md66
-rw-r--r--node_modules/tar/README.md183
-rw-r--r--node_modules/tar/lib/get-write-flag.js20
-rw-r--r--node_modules/tar/lib/header.js1
-rw-r--r--node_modules/tar/lib/large-numbers.js6
-rw-r--r--node_modules/tar/lib/list.js2
-rw-r--r--node_modules/tar/lib/mkdir.js2
-rw-r--r--node_modules/tar/lib/mode-fix.js12
-rw-r--r--node_modules/tar/lib/pack.js11
-rw-r--r--node_modules/tar/lib/parse.js165
-rw-r--r--node_modules/tar/lib/path-reservations.js125
-rw-r--r--node_modules/tar/lib/pax.js1
-rw-r--r--node_modules/tar/lib/replace.js1
-rw-r--r--node_modules/tar/lib/unpack.js135
-rw-r--r--node_modules/tar/lib/warn-mixin.js27
-rw-r--r--node_modules/tar/lib/write-entry.js56
l---------node_modules/tar/node_modules/.bin/mkdirp1
-rw-r--r--node_modules/tar/node_modules/minipass/README.md76
-rw-r--r--node_modules/tar/node_modules/minipass/index.js45
-rw-r--r--node_modules/tar/node_modules/minipass/package.json30
-rw-r--r--node_modules/tar/node_modules/mkdirp/CHANGELOG.md15
-rw-r--r--node_modules/tar/node_modules/mkdirp/LICENSE21
-rwxr-xr-xnode_modules/tar/node_modules/mkdirp/bin/cmd.js68
-rw-r--r--node_modules/tar/node_modules/mkdirp/index.js31
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/find-made.js29
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js64
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js39
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/opts-arg.js23
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/path-arg.js29
-rw-r--r--node_modules/tar/node_modules/mkdirp/lib/use-native.js10
-rw-r--r--node_modules/tar/node_modules/mkdirp/package.json75
-rw-r--r--node_modules/tar/node_modules/mkdirp/readme.markdown266
-rw-r--r--node_modules/tar/node_modules/yallist/LICENSE15
-rw-r--r--node_modules/tar/node_modules/yallist/README.md204
-rw-r--r--node_modules/tar/node_modules/yallist/iterator.js8
-rw-r--r--node_modules/tar/node_modules/yallist/package.json63
-rw-r--r--node_modules/tar/node_modules/yallist/yallist.js426
-rw-r--r--node_modules/tar/package.json54
129 files changed, 16323 insertions, 515 deletions
diff --git a/node_modules/fs-minipass/index.js b/node_modules/fs-minipass/index.js
index cd585a83c..9b0779c80 100644
--- a/node_modules/fs-minipass/index.js
+++ b/node_modules/fs-minipass/index.js
@@ -3,11 +3,21 @@ const MiniPass = require('minipass')
const EE = require('events').EventEmitter
const fs = require('fs')
-// for writev
-const binding = process.binding('fs')
-const writeBuffers = binding.writeBuffers
+let writev = fs.writev
/* istanbul ignore next */
-const FSReqWrap = binding.FSReqWrap || binding.FSReqCallback
+if (!writev) {
+ // This entire block can be removed if support for earlier than Node.js
+ // 12.9.0 is not needed.
+ const binding = process.binding('fs')
+ const FSReqWrap = binding.FSReqWrap || binding.FSReqCallback
+
+ writev = (fd, iovec, pos, cb) => {
+ const done = (er, bw) => cb(er, bw, iovec)
+ const req = new FSReqWrap()
+ req.oncomplete = done
+ binding.writeBuffers(fd, iovec, pos, req)
+ }
+}
const _autoClose = Symbol('_autoClose')
const _close = Symbol('_close')
@@ -36,17 +46,20 @@ const _size = Symbol('_size')
const _write = Symbol('_write')
const _writing = Symbol('_writing')
const _defaultFlag = Symbol('_defaultFlag')
+const _errored = Symbol('_errored')
class ReadStream extends MiniPass {
constructor (path, opt) {
opt = opt || {}
super(opt)
+ this.readable = true
this.writable = false
if (typeof path !== 'string')
throw new TypeError('path must be a string')
+ this[_errored] = false
this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
this[_path] = path
this[_readSize] = opt.readSize || 16*1024*1024
@@ -96,7 +109,8 @@ class ReadStream extends MiniPass {
this[_reading] = true
const buf = this[_makeBuf]()
/* istanbul ignore if */
- if (buf.length === 0) return process.nextTick(() => this[_onread](null, 0, buf))
+ if (buf.length === 0)
+ return process.nextTick(() => this[_onread](null, 0, buf))
fs.read(this[_fd], buf, 0, buf.length, null, (er, br, buf) =>
this[_onread](er, br, buf))
}
@@ -112,8 +126,9 @@ class ReadStream extends MiniPass {
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
- fs.close(this[_fd], _ => this.emit('close'))
+ const fd = this[_fd]
this[_fd] = null
+ fs.close(fd, er => er ? this.emit('error', er) : this.emit('close'))
}
}
@@ -150,6 +165,12 @@ class ReadStream extends MiniPass {
this[_read]()
break
+ case 'error':
+ if (this[_errored])
+ return
+ this[_errored] = true
+ return super.emit(ev, data)
+
default:
return super.emit(ev, data)
}
@@ -176,7 +197,8 @@ class ReadStreamSync extends ReadStream {
do {
const buf = this[_makeBuf]()
/* istanbul ignore next */
- const br = buf.length === 0 ? 0 : fs.readSync(this[_fd], buf, 0, buf.length, null)
+ const br = buf.length === 0 ? 0
+ : fs.readSync(this[_fd], buf, 0, buf.length, null)
if (!this[_handleChunk](br, buf))
break
} while (true)
@@ -191,10 +213,9 @@ class ReadStreamSync extends ReadStream {
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
- try {
- fs.closeSync(this[_fd])
- } catch (er) {}
+ const fd = this[_fd]
this[_fd] = null
+ fs.closeSync(fd)
this.emit('close')
}
}
@@ -205,6 +226,8 @@ class WriteStream extends EE {
opt = opt || {}
super(opt)
this.readable = false
+ this.writable = true
+ this[_errored] = false
this[_writing] = false
this[_ended] = false
this[_needDrain] = false
@@ -225,6 +248,16 @@ class WriteStream extends EE {
this[_open]()
}
+ emit (ev, data) {
+ if (ev === 'error') {
+ if (this[_errored])
+ return
+ this[_errored] = true
+ }
+ return super.emit(ev, data)
+ }
+
+
get fd () { return this[_fd] }
get path () { return this[_path] }
@@ -264,11 +297,12 @@ class WriteStream extends EE {
if (!this[_writing] && !this[_queue].length &&
typeof this[_fd] === 'number')
this[_onwrite](null, 0)
+ return this
}
write (buf, enc) {
if (typeof buf === 'string')
- buf = new Buffer(buf, enc)
+ buf = Buffer.from(buf, enc)
if (this[_ended]) {
this.emit('error', new Error('write() after end()'))
@@ -330,8 +364,9 @@ class WriteStream extends EE {
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
- fs.close(this[_fd], _ => this.emit('close'))
+ const fd = this[_fd]
this[_fd] = null
+ fs.close(fd, er => er ? this.emit('error', er) : this.emit('close'))
}
}
}
@@ -339,47 +374,47 @@ class WriteStream extends EE {
class WriteStreamSync extends WriteStream {
[_open] () {
let fd
- try {
+ // only wrap in a try{} block if we know we'll retry, to avoid
+ // the rethrow obscuring the error's source frame in most cases.
+ if (this[_defaultFlag] && this[_flags] === 'r+') {
+ try {
+ fd = fs.openSync(this[_path], this[_flags], this[_mode])
+ } catch (er) {
+ if (er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ return this[_open]()
+ } else
+ throw er
+ }
+ } else
fd = fs.openSync(this[_path], this[_flags], this[_mode])
- } catch (er) {
- if (this[_defaultFlag] &&
- this[_flags] === 'r+' &&
- er && er.code === 'ENOENT') {
- this[_flags] = 'w'
- return this[_open]()
- } else
- throw er
- }
+
this[_onopen](null, fd)
}
[_close] () {
if (this[_autoClose] && typeof this[_fd] === 'number') {
- try {
- fs.closeSync(this[_fd])
- } catch (er) {}
+ const fd = this[_fd]
this[_fd] = null
+ fs.closeSync(fd)
this.emit('close')
}
}
[_write] (buf) {
+ // throw the original, but try to close if it fails
+ let threw = true
try {
this[_onwrite](null,
fs.writeSync(this[_fd], buf, 0, buf.length, this[_pos]))
- } catch (er) {
- this[_onwrite](er, 0)
+ threw = false
+ } finally {
+ if (threw)
+ try { this[_close]() } catch (_) {}
}
}
}
-const writev = (fd, iovec, pos, cb) => {
- const done = (er, bw) => cb(er, bw, iovec)
- const req = new FSReqWrap()
- req.oncomplete = done
- binding.writeBuffers(fd, iovec, pos, req)
-}
-
exports.ReadStream = ReadStream
exports.ReadStreamSync = ReadStreamSync
diff --git a/node_modules/fs-minipass/node_modules/minipass/README.md b/node_modules/fs-minipass/node_modules/minipass/README.md
index c989beea0..32ace2fb9 100644
--- a/node_modules/fs-minipass/node_modules/minipass/README.md
+++ b/node_modules/fs-minipass/node_modules/minipass/README.md
@@ -7,32 +7,32 @@ stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
for objects, strings, and buffers.
-Supports pipe()ing (including multi-pipe() and backpressure
-transmission), buffering data until either a `data` event handler or
-`pipe()` is added (so you don't lose the first chunk), and most other
-cases where PassThrough is a good idea.
+Supports pipe()ing (including multi-pipe() and backpressure transmission),
+buffering data until either a `data` event handler or `pipe()` is added (so
+you don't lose the first chunk), and most other cases where PassThrough is
+a good idea.
-There is a `read()` method, but it's much more efficient to consume
-data from this stream via `'data'` events or by calling `pipe()` into
-some other stream. Calling `read()` requires the buffer to be
-flattened in some cases, which requires copying memory.
+There is a `read()` method, but it's much more efficient to consume data
+from this stream via `'data'` events or by calling `pipe()` into some other
+stream. Calling `read()` requires the buffer to be flattened in some
+cases, which requires copying memory.
-There is also no `unpipe()` method. Once you start piping, there is
-no stopping it!
+There is also no `unpipe()` method. Once you start piping, there is no
+stopping it!
-If you set `objectMode: true` in the options, then whatever is written
-will be emitted. Otherwise, it'll do a minimal amount of Buffer
-copying to ensure proper Streams semantics when `read(n)` is called.
+If you set `objectMode: true` in the options, then whatever is written will
+be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
+ensure proper Streams semantics when `read(n)` is called.
`objectMode` can also be set by doing `stream.objectMode = true`, or by
writing any non-string/non-buffer data. `objectMode` cannot be set to
false once it is set.
-This is not a `through` or `through2` stream. It doesn't transform
-the data, it just passes it right through. If you want to transform
-the data, extend the class, and override the `write()` method. Once
-you're done transforming the data however you want, call
-`super.write()` with the transform output.
+This is not a `through` or `through2` stream. It doesn't transform the
+data, it just passes it right through. If you want to transform the data,
+extend the class, and override the `write()` method. Once you're done
+transforming the data however you want, call `super.write()` with the
+transform output.
For some examples of streams that extend Minipass in various ways, check
out:
@@ -46,6 +46,7 @@ out:
- [tap](http://npm.im/tap)
- [tap-parser](http://npm.im/tap)
- [treport](http://npm.im/tap)
+- [minipass-fetch](http://npm.im/minipass-fetch)
## Differences from Node.js Streams
@@ -252,7 +253,8 @@ src.pipe(tee)
## USAGE
-It's a stream! Use it like a stream and it'll most likely do what you want.
+It's a stream! Use it like a stream and it'll most likely do what you
+want.
```js
const Minipass = require('minipass')
@@ -280,31 +282,30 @@ streams.
* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
base Minipass class, the same data will come out.) Returns `false` if
- the stream will buffer the next write, or true if it's still in
- "flowing" mode.
+ the stream will buffer the next write, or true if it's still in "flowing"
+ mode.
* `end([chunk, [encoding]], [callback])` - Signal that you have no more
data to write. This will queue an `end` event to be fired when all the
data has been consumed.
-* `setEncoding(encoding)` - Set the encoding for data coming of the
- stream. This can only be done once.
+* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
+ This can only be done once.
* `pause()` - No more data for a while, please. This also prevents `end`
from being emitted for empty streams until the stream is resumed.
-* `resume()` - Resume the stream. If there's data in the buffer, it is
- all discarded. Any buffered events are immediately emitted.
+* `resume()` - Resume the stream. If there's data in the buffer, it is all
+ discarded. Any buffered events are immediately emitted.
* `pipe(dest)` - Send all output to the stream provided. There is no way
to unpipe. When data is emitted, it is immediately written to any and
all pipe destinations.
-* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters.
- Some events are given special treatment, however. (See below under
- "events".)
+* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
+ events are given special treatment, however. (See below under "events".)
* `promise()` - Returns a Promise that resolves when the stream emits
`end`, or rejects if the stream emits `error`.
* `collect()` - Return a Promise that resolves on `end` with an array
- containing each chunk of data that was emitted, or rejects if the
- stream emits `error`. Note that this consumes the stream data.
-* `concat()` - Same as `collect()`, but concatenates the data into a
- single Buffer object. Will reject the returned promise if the stream is
- in objectMode, or if it goes into objectMode by the end of the data.
+ containing each chunk of data that was emitted, or rejects if the stream
+ emits `error`. Note that this consumes the stream data.
+* `concat()` - Same as `collect()`, but concatenates the data into a single
+ Buffer object. Will reject the returned promise if the stream is in
+ objectMode, or if it goes into objectMode by the end of the data.
* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
provided, then consume all of it. If `n` bytes are not available, then
it returns null. **Note** consuming streams in this way is less
@@ -421,8 +422,8 @@ mp.concat().then(onebigchunk => {
### iteration
-You can iterate over streams synchronously or asynchronously in
-platforms that support it.
+You can iterate over streams synchronously or asynchronously in platforms
+that support it.
Synchronous iteration will end when the currently available data is
consumed, even if the `end` event has not been reached. In string and
@@ -430,9 +431,8 @@ buffer mode, the data is concatenated, so unless multiple writes are
occurring in the same tick as the `read()`, sync iteration loops will
generally only have a single iteration.
-To consume chunks in this way exactly as they have been written, with
-no flattening, create the stream with the `{ objectMode: true }`
-option.
+To consume chunks in this way exactly as they have been written, with no
+flattening, create the stream with the `{ objectMode: true }` option.
```js
const mp = new Minipass({ objectMode: true })
diff --git a/node_modules/fs-minipass/node_modules/minipass/index.js b/node_modules/fs-minipass/node_modules/minipass/index.js
index c072352d4..55ea0f3dd 100644
--- a/node_modules/fs-minipass/node_modules/minipass/index.js
+++ b/node_modules/fs-minipass/node_modules/minipass/index.js
@@ -1,5 +1,6 @@
'use strict'
const EE = require('events')
+const Stream = require('stream')
const Yallist = require('yallist')
const SD = require('string_decoder').StringDecoder
@@ -29,12 +30,6 @@ const ASYNCITERATOR = doIter && Symbol.asyncIterator
const ITERATOR = doIter && Symbol.iterator
|| Symbol('iterator not implemented')
-// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
-// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
-// .M, this is fine .\^/M..
-const B = Buffer.alloc ? Buffer
- : /* istanbul ignore next */ require('safe-buffer').Buffer
-
// events that mean 'the stream is over'
// these are treated specially, and re-emitted
// if they are listened for after emitting.
@@ -49,9 +44,9 @@ const isArrayBuffer = b => b instanceof ArrayBuffer ||
b.constructor.name === 'ArrayBuffer' &&
b.byteLength >= 0
-const isArrayBufferView = b => !B.isBuffer(b) && ArrayBuffer.isView(b)
+const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
-module.exports = class Minipass extends EE {
+module.exports = class Minipass extends Stream {
constructor (options) {
super()
this[FLOWING] = false
@@ -126,11 +121,11 @@ module.exports = class Minipass extends EE {
// at some point in the future, we may want to do the opposite!
// leave strings and buffers as-is
// anything else switches us into object mode
- if (!this[OBJECTMODE] && !B.isBuffer(chunk)) {
+ if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
if (isArrayBufferView(chunk))
- chunk = B.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
+ chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
else if (isArrayBuffer(chunk))
- chunk = B.from(chunk)
+ chunk = Buffer.from(chunk)
else if (typeof chunk !== 'string')
// use the setter so we throw if we have encoding set
this.objectMode = true
@@ -152,10 +147,10 @@ module.exports = class Minipass extends EE {
if (typeof chunk === 'string' && !this[OBJECTMODE] &&
// unless it is a string already ready for us to use
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
- chunk = B.from(chunk, encoding)
+ chunk = Buffer.from(chunk, encoding)
}
- if (B.isBuffer(chunk) && this[ENCODING])
+ if (Buffer.isBuffer(chunk) && this[ENCODING])
chunk = this[DECODER].write(chunk)
try {
@@ -188,7 +183,7 @@ module.exports = class Minipass extends EE {
])
else
this.buffer = new Yallist([
- B.concat(Array.from(this.buffer), this[BUFFERLENGTH])
+ Buffer.concat(Array.from(this.buffer), this[BUFFERLENGTH])
])
}
@@ -423,12 +418,17 @@ module.exports = class Minipass extends EE {
// const all = await stream.collect()
collect () {
const buf = []
- buf.dataLength = 0
+ if (!this[OBJECTMODE])
+ buf.dataLength = 0
+ // set the promise first, in case an error is raised
+ // by triggering the flow here.
+ const p = this.promise()
this.on('data', c => {
buf.push(c)
- buf.dataLength += c.length
+ if (!this[OBJECTMODE])
+ buf.dataLength += c.length
})
- return this.promise().then(() => buf)
+ return p.then(() => buf)
}
// const data = await stream.concat()
@@ -438,7 +438,7 @@ module.exports = class Minipass extends EE {
: this.collect().then(buf =>
this[OBJECTMODE]
? Promise.reject(new Error('cannot concat in objectMode'))
- : this[ENCODING] ? buf.join('') : B.concat(buf, buf.dataLength))
+ : this[ENCODING] ? buf.join('') : Buffer.concat(buf, buf.dataLength))
}
// stream.promise().then(() => done, er => emitted error)
@@ -529,9 +529,10 @@ module.exports = class Minipass extends EE {
}
static isStream (s) {
- return !!s && (s instanceof Minipass || s instanceof EE && (
- typeof s.pipe === 'function' || // readable
- (typeof s.write === 'function' && typeof s.end === 'function') // writable
- ))
+ return !!s && (s instanceof Minipass || s instanceof Stream ||
+ s instanceof EE && (
+ typeof s.pipe === 'function' || // readable
+ (typeof s.write === 'function' && typeof s.end === 'function') // writable
+ ))
}
}
diff --git a/node_modules/fs-minipass/node_modules/minipass/package.json b/node_modules/fs-minipass/node_modules/minipass/package.json
index 416e231c9..e125bc7dc 100644
--- a/node_modules/fs-minipass/node_modules/minipass/package.json
+++ b/node_modules/fs-minipass/node_modules/minipass/package.json
@@ -1,27 +1,27 @@
{
- "_from": "minipass@^2.6.0",
- "_id": "minipass@2.9.0",
+ "_from": "minipass@^3.0.0",
+ "_id": "minipass@3.1.1",
"_inBundle": false,
- "_integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
+ "_integrity": "sha512-UFqVihv6PQgwj8/yTGvl9kPz7xIAY+R5z6XYjRInD3Gk3qx6QGSD6zEcpeG4Dy/lQnv1J6zv8ejV90hyYIKf3w==",
"_location": "/fs-minipass/minipass",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
- "raw": "minipass@^2.6.0",
+ "raw": "minipass@^3.0.0",
"name": "minipass",
"escapedName": "minipass",
- "rawSpec": "^2.6.0",
+ "rawSpec": "^3.0.0",
"saveSpec": null,
- "fetchSpec": "^2.6.0"
+ "fetchSpec": "^3.0.0"
},
"_requiredBy": [
"/fs-minipass"
],
- "_resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
- "_shasum": "e713762e7d3e32fed803115cf93e04bca9fcc9a6",
- "_spec": "minipass@^2.6.0",
- "_where": "/Users/mperrotte/npminc/cli/node_modules/fs-minipass",
+ "_resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.1.tgz",
+ "_shasum": "7607ce778472a185ad6d89082aa2070f79cedcd5",
+ "_spec": "minipass@^3.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/fs-minipass",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -32,8 +32,7 @@
},
"bundleDependencies": false,
"dependencies": {
- "safe-buffer": "^5.1.2",
- "yallist": "^3.0.0"
+ "yallist": "^4.0.0"
},
"deprecated": false,
"description": "minimal implementation of a PassThrough stream",
@@ -42,6 +41,9 @@
"tap": "^14.6.5",
"through2": "^2.0.3"
},
+ "engines": {
+ "node": ">=8"
+ },
"files": [
"index.js"
],
@@ -59,12 +61,12 @@
},
"scripts": {
"postpublish": "git push origin --follow-tags",
- "postversion": "npm publish",
+ "postversion": "npm publish --tag=next",
"preversion": "npm test",
"test": "tap"
},
"tap": {
"check-coverage": true
},
- "version": "2.9.0"
+ "version": "3.1.1"
}
diff --git a/node_modules/fs-minipass/node_modules/yallist/LICENSE b/node_modules/fs-minipass/node_modules/yallist/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/fs-minipass/node_modules/yallist/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/fs-minipass/node_modules/yallist/README.md b/node_modules/fs-minipass/node_modules/yallist/README.md
new file mode 100644
index 000000000..f58610186
--- /dev/null
+++ b/node_modules/fs-minipass/node_modules/yallist/README.md
@@ -0,0 +1,204 @@
+# yallist
+
+Yet Another Linked List
+
+There are many doubly-linked list implementations like it, but this
+one is mine.
+
+For when an array would be too big, and a Map can't be iterated in
+reverse order.
+
+
+[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
+
+## basic usage
+
+```javascript
+var yallist = require('yallist')
+var myList = yallist.create([1, 2, 3])
+myList.push('foo')
+myList.unshift('bar')
+// of course pop() and shift() are there, too
+console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
+myList.forEach(function (k) {
+ // walk the list head to tail
+})
+myList.forEachReverse(function (k, index, list) {
+ // walk the list tail to head
+})
+var myDoubledList = myList.map(function (k) {
+ return k + k
+})
+// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
+// mapReverse is also a thing
+var myDoubledListReverse = myList.mapReverse(function (k) {
+ return k + k
+}) // ['foofoo', 6, 4, 2, 'barbar']
+
+var reduced = myList.reduce(function (set, entry) {
+ set += entry
+ return set
+}, 'start')
+console.log(reduced) // 'startfoo123bar'
+```
+
+## api
+
+The whole API is considered "public".
+
+Functions with the same name as an Array method work more or less the
+same way.
+
+There's reverse versions of most things because that's the point.
+
+### Yallist
+
+Default export, the class that holds and manages a list.
+
+Call it with either a forEach-able (like an array) or a set of
+arguments, to initialize the list.
+
+The Array-ish methods all act like you'd expect. No magic length,
+though, so if you change that it won't automatically prune or add
+empty spots.
+
+### Yallist.create(..)
+
+Alias for Yallist function. Some people like factories.
+
+#### yallist.head
+
+The first node in the list
+
+#### yallist.tail
+
+The last node in the list
+
+#### yallist.length
+
+The number of nodes in the list. (Change this at your peril. It is
+not magic like Array length.)
+
+#### yallist.toArray()
+
+Convert the list to an array.
+
+#### yallist.forEach(fn, [thisp])
+
+Call a function on each item in the list.
+
+#### yallist.forEachReverse(fn, [thisp])
+
+Call a function on each item in the list, in reverse order.
+
+#### yallist.get(n)
+
+Get the data at position `n` in the list. If you use this a lot,
+probably better off just using an Array.
+
+#### yallist.getReverse(n)
+
+Get the data at position `n`, counting from the tail.
+
+#### yallist.map(fn, thisp)
+
+Create a new Yallist with the result of calling the function on each
+item.
+
+#### yallist.mapReverse(fn, thisp)
+
+Same as `map`, but in reverse.
+
+#### yallist.pop()
+
+Get the data from the list tail, and remove the tail from the list.
+
+#### yallist.push(item, ...)
+
+Insert one or more items to the tail of the list.
+
+#### yallist.reduce(fn, initialValue)
+
+Like Array.reduce.
+
+#### yallist.reduceReverse
+
+Like Array.reduce, but in reverse.
+
+#### yallist.reverse
+
+Reverse the list in place.
+
+#### yallist.shift()
+
+Get the data from the list head, and remove the head from the list.
+
+#### yallist.slice([from], [to])
+
+Just like Array.slice, but returns a new Yallist.
+
+#### yallist.sliceReverse([from], [to])
+
+Just like yallist.slice, but the result is returned in reverse.
+
+#### yallist.toArray()
+
+Create an array representation of the list.
+
+#### yallist.toArrayReverse()
+
+Create a reversed array representation of the list.
+
+#### yallist.unshift(item, ...)
+
+Insert one or more items to the head of the list.
+
+#### yallist.unshiftNode(node)
+
+Move a Node object to the front of the list. (That is, pull it out of
+wherever it lives, and make it the new head.)
+
+If the node belongs to a different list, then that list will remove it
+first.
+
+#### yallist.pushNode(node)
+
+Move a Node object to the end of the list. (That is, pull it out of
+wherever it lives, and make it the new tail.)
+
+If the node belongs to a list already, then that list will remove it
+first.
+
+#### yallist.removeNode(node)
+
+Remove a node from the list, preserving referential integrity of head
+and tail and other nodes.
+
+Will throw an error if you try to have a list remove a node that
+doesn't belong to it.
+
+### Yallist.Node
+
+The class that holds the data and is actually the list.
+
+Call with `var n = new Node(value, previousNode, nextNode)`
+
+Note that if you do direct operations on Nodes themselves, it's very
+easy to get into weird states where the list is broken. Be careful :)
+
+#### node.next
+
+The next node in the list.
+
+#### node.prev
+
+The previous node in the list.
+
+#### node.value
+
+The data the node contains.
+
+#### node.list
+
+The list to which this node belongs. (Null if it does not belong to
+any list.)
diff --git a/node_modules/fs-minipass/node_modules/yallist/iterator.js b/node_modules/fs-minipass/node_modules/yallist/iterator.js
new file mode 100644
index 000000000..d41c97a19
--- /dev/null
+++ b/node_modules/fs-minipass/node_modules/yallist/iterator.js
@@ -0,0 +1,8 @@
+'use strict'
+module.exports = function (Yallist) {
+ Yallist.prototype[Symbol.iterator] = function* () {
+ for (let walker = this.head; walker; walker = walker.next) {
+ yield walker.value
+ }
+ }
+}
diff --git a/node_modules/fs-minipass/node_modules/yallist/package.json b/node_modules/fs-minipass/node_modules/yallist/package.json
new file mode 100644
index 000000000..cdfd89afb
--- /dev/null
+++ b/node_modules/fs-minipass/node_modules/yallist/package.json
@@ -0,0 +1,62 @@
+{
+ "_from": "yallist@^4.0.0",
+ "_id": "yallist@4.0.0",
+ "_inBundle": false,
+ "_integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "_location": "/fs-minipass/yallist",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "yallist@^4.0.0",
+ "name": "yallist",
+ "escapedName": "yallist",
+ "rawSpec": "^4.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^4.0.0"
+ },
+ "_requiredBy": [
+ "/fs-minipass/minipass"
+ ],
+ "_resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "_shasum": "9bb92790d9c0effec63be73519e11a35019a3a72",
+ "_spec": "yallist@^4.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/fs-minipass/node_modules/minipass",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/yallist/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {},
+ "deprecated": false,
+ "description": "Yet Another Linked List",
+ "devDependencies": {
+ "tap": "^12.1.0"
+ },
+ "directories": {
+ "test": "test"
+ },
+ "files": [
+ "yallist.js",
+ "iterator.js"
+ ],
+ "homepage": "https://github.com/isaacs/yallist#readme",
+ "license": "ISC",
+ "main": "yallist.js",
+ "name": "yallist",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/yallist.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100"
+ },
+ "version": "4.0.0"
+}
diff --git a/node_modules/fs-minipass/node_modules/yallist/yallist.js b/node_modules/fs-minipass/node_modules/yallist/yallist.js
new file mode 100644
index 000000000..4e83ab1c5
--- /dev/null
+++ b/node_modules/fs-minipass/node_modules/yallist/yallist.js
@@ -0,0 +1,426 @@
+'use strict'
+module.exports = Yallist
+
+Yallist.Node = Node
+Yallist.create = Yallist
+
+function Yallist (list) {
+ var self = this
+ if (!(self instanceof Yallist)) {
+ self = new Yallist()
+ }
+
+ self.tail = null
+ self.head = null
+ self.length = 0
+
+ if (list && typeof list.forEach === 'function') {
+ list.forEach(function (item) {
+ self.push(item)
+ })
+ } else if (arguments.length > 0) {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ self.push(arguments[i])
+ }
+ }
+
+ return self
+}
+
+Yallist.prototype.removeNode = function (node) {
+ if (node.list !== this) {
+ throw new Error('removing node which does not belong to this list')
+ }
+
+ var next = node.next
+ var prev = node.prev
+
+ if (next) {
+ next.prev = prev
+ }
+
+ if (prev) {
+ prev.next = next
+ }
+
+ if (node === this.head) {
+ this.head = next
+ }
+ if (node === this.tail) {
+ this.tail = prev
+ }
+
+ node.list.length--
+ node.next = null
+ node.prev = null
+ node.list = null
+
+ return next
+}
+
+Yallist.prototype.unshiftNode = function (node) {
+ if (node === this.head) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var head = this.head
+ node.list = this
+ node.next = head
+ if (head) {
+ head.prev = node
+ }
+
+ this.head = node
+ if (!this.tail) {
+ this.tail = node
+ }
+ this.length++
+}
+
+Yallist.prototype.pushNode = function (node) {
+ if (node === this.tail) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var tail = this.tail
+ node.list = this
+ node.prev = tail
+ if (tail) {
+ tail.next = node
+ }
+
+ this.tail = node
+ if (!this.head) {
+ this.head = node
+ }
+ this.length++
+}
+
+Yallist.prototype.push = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ push(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.unshift = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ unshift(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.pop = function () {
+ if (!this.tail) {
+ return undefined
+ }
+
+ var res = this.tail.value
+ this.tail = this.tail.prev
+ if (this.tail) {
+ this.tail.next = null
+ } else {
+ this.head = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.shift = function () {
+ if (!this.head) {
+ return undefined
+ }
+
+ var res = this.head.value
+ this.head = this.head.next
+ if (this.head) {
+ this.head.prev = null
+ } else {
+ this.tail = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.forEach = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.head, i = 0; walker !== null; i++) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.next
+ }
+}
+
+Yallist.prototype.forEachReverse = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.prev
+ }
+}
+
+Yallist.prototype.get = function (n) {
+ for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.next
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.getReverse = function (n) {
+ for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.prev
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.map = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.head; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.next
+ }
+ return res
+}
+
+Yallist.prototype.mapReverse = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.tail; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.prev
+ }
+ return res
+}
+
+Yallist.prototype.reduce = function (fn, initial) {
+ var acc
+ var walker = this.head
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.head) {
+ walker = this.head.next
+ acc = this.head.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = 0; walker !== null; i++) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.next
+ }
+
+ return acc
+}
+
+Yallist.prototype.reduceReverse = function (fn, initial) {
+ var acc
+ var walker = this.tail
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.tail) {
+ walker = this.tail.prev
+ acc = this.tail.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = this.length - 1; walker !== null; i--) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.prev
+ }
+
+ return acc
+}
+
+Yallist.prototype.toArray = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.head; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.next
+ }
+ return arr
+}
+
+Yallist.prototype.toArrayReverse = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.tail; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.prev
+ }
+ return arr
+}
+
+Yallist.prototype.slice = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
+ walker = walker.next
+ }
+ for (; walker !== null && i < to; i++, walker = walker.next) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.sliceReverse = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
+ walker = walker.prev
+ }
+ for (; walker !== null && i > from; i--, walker = walker.prev) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.splice = function (start, deleteCount, ...nodes) {
+ if (start > this.length) {
+ start = this.length - 1
+ }
+ if (start < 0) {
+ start = this.length + start;
+ }
+
+ for (var i = 0, walker = this.head; walker !== null && i < start; i++) {
+ walker = walker.next
+ }
+
+ var ret = []
+ for (var i = 0; walker && i < deleteCount; i++) {
+ ret.push(walker.value)
+ walker = this.removeNode(walker)
+ }
+ if (walker === null) {
+ walker = this.tail
+ }
+
+ if (walker !== this.head && walker !== this.tail) {
+ walker = walker.prev
+ }
+
+ for (var i = 0; i < nodes.length; i++) {
+ walker = insert(this, walker, nodes[i])
+ }
+ return ret;
+}
+
+Yallist.prototype.reverse = function () {
+ var head = this.head
+ var tail = this.tail
+ for (var walker = head; walker !== null; walker = walker.prev) {
+ var p = walker.prev
+ walker.prev = walker.next
+ walker.next = p
+ }
+ this.head = tail
+ this.tail = head
+ return this
+}
+
+function insert (self, node, value) {
+ var inserted = node === self.head ?
+ new Node(value, null, node, self) :
+ new Node(value, node, node.next, self)
+
+ if (inserted.next === null) {
+ self.tail = inserted
+ }
+ if (inserted.prev === null) {
+ self.head = inserted
+ }
+
+ self.length++
+
+ return inserted
+}
+
+function push (self, item) {
+ self.tail = new Node(item, self.tail, null, self)
+ if (!self.head) {
+ self.head = self.tail
+ }
+ self.length++
+}
+
+function unshift (self, item) {
+ self.head = new Node(item, null, self.head, self)
+ if (!self.tail) {
+ self.tail = self.head
+ }
+ self.length++
+}
+
+function Node (value, prev, next, list) {
+ if (!(this instanceof Node)) {
+ return new Node(value, prev, next, list)
+ }
+
+ this.list = list
+ this.value = value
+
+ if (prev) {
+ prev.next = this
+ this.prev = prev
+ } else {
+ this.prev = null
+ }
+
+ if (next) {
+ next.prev = this
+ this.next = next
+ } else {
+ this.next = null
+ }
+}
+
+try {
+ // add if support for Symbol.iterator is present
+ require('./iterator.js')(Yallist)
+} catch (er) {}
diff --git a/node_modules/fs-minipass/package.json b/node_modules/fs-minipass/package.json
index 870d08f6f..01635bd8d 100644
--- a/node_modules/fs-minipass/package.json
+++ b/node_modules/fs-minipass/package.json
@@ -1,30 +1,27 @@
{
- "_from": "fs-minipass@^1.2.5",
- "_id": "fs-minipass@1.2.7",
+ "_from": "fs-minipass@^2.0.0",
+ "_id": "fs-minipass@2.1.0",
"_inBundle": false,
- "_integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
+ "_integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==",
"_location": "/fs-minipass",
- "_phantomChildren": {
- "safe-buffer": "5.1.2",
- "yallist": "3.0.3"
- },
+ "_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
- "raw": "fs-minipass@^1.2.5",
+ "raw": "fs-minipass@^2.0.0",
"name": "fs-minipass",
"escapedName": "fs-minipass",
- "rawSpec": "^1.2.5",
+ "rawSpec": "^2.0.0",
"saveSpec": null,
- "fetchSpec": "^1.2.5"
+ "fetchSpec": "^2.0.0"
},
"_requiredBy": [
"/tar"
],
- "_resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
- "_shasum": "ccff8570841e7fe4265693da88936c55aed7f7c7",
- "_spec": "fs-minipass@^1.2.5",
- "_where": "/Users/mperrotte/npminc/cli/node_modules/tar",
+ "_resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz",
+ "_shasum": "7f5036fdbf12c63c169190cbe4199c852271f9fb",
+ "_spec": "fs-minipass@^2.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -35,7 +32,7 @@
},
"bundleDependencies": false,
"dependencies": {
- "minipass": "^2.6.0"
+ "minipass": "^3.0.0"
},
"deprecated": false,
"description": "fs read and write streams based on minipass",
@@ -43,6 +40,9 @@
"mutate-fs": "^2.0.1",
"tap": "^14.6.4"
},
+ "engines": {
+ "node": ">= 8"
+ },
"files": [
"index.js"
],
@@ -64,5 +64,5 @@
"tap": {
"check-coverage": true
},
- "version": "1.2.7"
+ "version": "2.1.0"
}
diff --git a/node_modules/minizlib/README.md b/node_modules/minizlib/README.md
index 4097b8522..80e067ab3 100644
--- a/node_modules/minizlib/README.md
+++ b/node_modules/minizlib/README.md
@@ -51,3 +51,10 @@ const decode = new zlib.BrotliDecompress()
const output = whereToWriteTheDecodedData()
input.pipe(decode).pipe(output)
```
+
+## REPRODUCIBLE BUILDS
+
+To create reproducible gzip compressed files across different operating
+systems, set `portable: true` in the options. This causes minizlib to set
+the `OS` indicator in byte 9 of the extended gzip header to `0xFF` for
+'unknown'.
diff --git a/node_modules/minizlib/index.js b/node_modules/minizlib/index.js
index 295047b9c..c84bda1b5 100644
--- a/node_modules/minizlib/index.js
+++ b/node_modules/minizlib/index.js
@@ -9,6 +9,7 @@ const Minipass = require('minipass')
const OriginalBufferConcat = Buffer.concat
+const _superWrite = Symbol('_superWrite')
class ZlibError extends Error {
constructor (err) {
super('zlib: ' + err.message)
@@ -164,12 +165,12 @@ class ZlibBase extends Minipass {
if (Array.isArray(result) && result.length > 0) {
// The first buffer is always `handle._outBuffer`, which would be
// re-used for later invocations; so, we always have to copy that one.
- writeReturn = super.write(Buffer.from(result[0]))
+ writeReturn = this[_superWrite](Buffer.from(result[0]))
for (let i = 1; i < result.length; i++) {
- writeReturn = super.write(result[i])
+ writeReturn = this[_superWrite](result[i])
}
} else {
- writeReturn = super.write(Buffer.from(result))
+ writeReturn = this[_superWrite](Buffer.from(result))
}
}
@@ -177,6 +178,10 @@ class ZlibBase extends Minipass {
cb()
return writeReturn
}
+
+ [_superWrite] (data) {
+ return super.write(data)
+ }
}
class Zlib extends ZlibBase {
@@ -243,9 +248,22 @@ class Inflate extends Zlib {
}
// gzip - bigger header, same deflate compression
+const _portable = Symbol('_portable')
class Gzip extends Zlib {
constructor (opts) {
super(opts, 'Gzip')
+ this[_portable] = opts && !!opts.portable
+ }
+
+ [_superWrite] (data) {
+ if (!this[_portable])
+ return super[_superWrite](data)
+
+ // we'll always get the header emitted in one first chunk
+ // overwrite the OS indicator byte with 0xFF
+ this[_portable] = false
+ data[9] = 255
+ return super[_superWrite](data)
}
}
diff --git a/node_modules/minizlib/node_modules/minipass/README.md b/node_modules/minizlib/node_modules/minipass/README.md
index c989beea0..32ace2fb9 100644
--- a/node_modules/minizlib/node_modules/minipass/README.md
+++ b/node_modules/minizlib/node_modules/minipass/README.md
@@ -7,32 +7,32 @@ stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
for objects, strings, and buffers.
-Supports pipe()ing (including multi-pipe() and backpressure
-transmission), buffering data until either a `data` event handler or
-`pipe()` is added (so you don't lose the first chunk), and most other
-cases where PassThrough is a good idea.
+Supports pipe()ing (including multi-pipe() and backpressure transmission),
+buffering data until either a `data` event handler or `pipe()` is added (so
+you don't lose the first chunk), and most other cases where PassThrough is
+a good idea.
-There is a `read()` method, but it's much more efficient to consume
-data from this stream via `'data'` events or by calling `pipe()` into
-some other stream. Calling `read()` requires the buffer to be
-flattened in some cases, which requires copying memory.
+There is a `read()` method, but it's much more efficient to consume data
+from this stream via `'data'` events or by calling `pipe()` into some other
+stream. Calling `read()` requires the buffer to be flattened in some
+cases, which requires copying memory.
-There is also no `unpipe()` method. Once you start piping, there is
-no stopping it!
+There is also no `unpipe()` method. Once you start piping, there is no
+stopping it!
-If you set `objectMode: true` in the options, then whatever is written
-will be emitted. Otherwise, it'll do a minimal amount of Buffer
-copying to ensure proper Streams semantics when `read(n)` is called.
+If you set `objectMode: true` in the options, then whatever is written will
+be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
+ensure proper Streams semantics when `read(n)` is called.
`objectMode` can also be set by doing `stream.objectMode = true`, or by
writing any non-string/non-buffer data. `objectMode` cannot be set to
false once it is set.
-This is not a `through` or `through2` stream. It doesn't transform
-the data, it just passes it right through. If you want to transform
-the data, extend the class, and override the `write()` method. Once
-you're done transforming the data however you want, call
-`super.write()` with the transform output.
+This is not a `through` or `through2` stream. It doesn't transform the
+data, it just passes it right through. If you want to transform the data,
+extend the class, and override the `write()` method. Once you're done
+transforming the data however you want, call `super.write()` with the
+transform output.
For some examples of streams that extend Minipass in various ways, check
out:
@@ -46,6 +46,7 @@ out:
- [tap](http://npm.im/tap)
- [tap-parser](http://npm.im/tap)
- [treport](http://npm.im/tap)
+- [minipass-fetch](http://npm.im/minipass-fetch)
## Differences from Node.js Streams
@@ -252,7 +253,8 @@ src.pipe(tee)
## USAGE
-It's a stream! Use it like a stream and it'll most likely do what you want.
+It's a stream! Use it like a stream and it'll most likely do what you
+want.
```js
const Minipass = require('minipass')
@@ -280,31 +282,30 @@ streams.
* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
base Minipass class, the same data will come out.) Returns `false` if
- the stream will buffer the next write, or true if it's still in
- "flowing" mode.
+ the stream will buffer the next write, or true if it's still in "flowing"
+ mode.
* `end([chunk, [encoding]], [callback])` - Signal that you have no more
data to write. This will queue an `end` event to be fired when all the
data has been consumed.
-* `setEncoding(encoding)` - Set the encoding for data coming of the
- stream. This can only be done once.
+* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
+ This can only be done once.
* `pause()` - No more data for a while, please. This also prevents `end`
from being emitted for empty streams until the stream is resumed.
-* `resume()` - Resume the stream. If there's data in the buffer, it is
- all discarded. Any buffered events are immediately emitted.
+* `resume()` - Resume the stream. If there's data in the buffer, it is all
+ discarded. Any buffered events are immediately emitted.
* `pipe(dest)` - Send all output to the stream provided. There is no way
to unpipe. When data is emitted, it is immediately written to any and
all pipe destinations.
-* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters.
- Some events are given special treatment, however. (See below under
- "events".)
+* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
+ events are given special treatment, however. (See below under "events".)
* `promise()` - Returns a Promise that resolves when the stream emits
`end`, or rejects if the stream emits `error`.
* `collect()` - Return a Promise that resolves on `end` with an array
- containing each chunk of data that was emitted, or rejects if the
- stream emits `error`. Note that this consumes the stream data.
-* `concat()` - Same as `collect()`, but concatenates the data into a
- single Buffer object. Will reject the returned promise if the stream is
- in objectMode, or if it goes into objectMode by the end of the data.
+ containing each chunk of data that was emitted, or rejects if the stream
+ emits `error`. Note that this consumes the stream data.
+* `concat()` - Same as `collect()`, but concatenates the data into a single
+ Buffer object. Will reject the returned promise if the stream is in
+ objectMode, or if it goes into objectMode by the end of the data.
* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
provided, then consume all of it. If `n` bytes are not available, then
it returns null. **Note** consuming streams in this way is less
@@ -421,8 +422,8 @@ mp.concat().then(onebigchunk => {
### iteration
-You can iterate over streams synchronously or asynchronously in
-platforms that support it.
+You can iterate over streams synchronously or asynchronously in platforms
+that support it.
Synchronous iteration will end when the currently available data is
consumed, even if the `end` event has not been reached. In string and
@@ -430,9 +431,8 @@ buffer mode, the data is concatenated, so unless multiple writes are
occurring in the same tick as the `read()`, sync iteration loops will
generally only have a single iteration.
-To consume chunks in this way exactly as they have been written, with
-no flattening, create the stream with the `{ objectMode: true }`
-option.
+To consume chunks in this way exactly as they have been written, with no
+flattening, create the stream with the `{ objectMode: true }` option.
```js
const mp = new Minipass({ objectMode: true })
diff --git a/node_modules/minizlib/node_modules/minipass/index.js b/node_modules/minizlib/node_modules/minipass/index.js
index c072352d4..55ea0f3dd 100644
--- a/node_modules/minizlib/node_modules/minipass/index.js
+++ b/node_modules/minizlib/node_modules/minipass/index.js
@@ -1,5 +1,6 @@
'use strict'
const EE = require('events')
+const Stream = require('stream')
const Yallist = require('yallist')
const SD = require('string_decoder').StringDecoder
@@ -29,12 +30,6 @@ const ASYNCITERATOR = doIter && Symbol.asyncIterator
const ITERATOR = doIter && Symbol.iterator
|| Symbol('iterator not implemented')
-// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
-// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
-// .M, this is fine .\^/M..
-const B = Buffer.alloc ? Buffer
- : /* istanbul ignore next */ require('safe-buffer').Buffer
-
// events that mean 'the stream is over'
// these are treated specially, and re-emitted
// if they are listened for after emitting.
@@ -49,9 +44,9 @@ const isArrayBuffer = b => b instanceof ArrayBuffer ||
b.constructor.name === 'ArrayBuffer' &&
b.byteLength >= 0
-const isArrayBufferView = b => !B.isBuffer(b) && ArrayBuffer.isView(b)
+const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
-module.exports = class Minipass extends EE {
+module.exports = class Minipass extends Stream {
constructor (options) {
super()
this[FLOWING] = false
@@ -126,11 +121,11 @@ module.exports = class Minipass extends EE {
// at some point in the future, we may want to do the opposite!
// leave strings and buffers as-is
// anything else switches us into object mode
- if (!this[OBJECTMODE] && !B.isBuffer(chunk)) {
+ if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
if (isArrayBufferView(chunk))
- chunk = B.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
+ chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
else if (isArrayBuffer(chunk))
- chunk = B.from(chunk)
+ chunk = Buffer.from(chunk)
else if (typeof chunk !== 'string')
// use the setter so we throw if we have encoding set
this.objectMode = true
@@ -152,10 +147,10 @@ module.exports = class Minipass extends EE {
if (typeof chunk === 'string' && !this[OBJECTMODE] &&
// unless it is a string already ready for us to use
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
- chunk = B.from(chunk, encoding)
+ chunk = Buffer.from(chunk, encoding)
}
- if (B.isBuffer(chunk) && this[ENCODING])
+ if (Buffer.isBuffer(chunk) && this[ENCODING])
chunk = this[DECODER].write(chunk)
try {
@@ -188,7 +183,7 @@ module.exports = class Minipass extends EE {
])
else
this.buffer = new Yallist([
- B.concat(Array.from(this.buffer), this[BUFFERLENGTH])
+ Buffer.concat(Array.from(this.buffer), this[BUFFERLENGTH])
])
}
@@ -423,12 +418,17 @@ module.exports = class Minipass extends EE {
// const all = await stream.collect()
collect () {
const buf = []
- buf.dataLength = 0
+ if (!this[OBJECTMODE])
+ buf.dataLength = 0
+ // set the promise first, in case an error is raised
+ // by triggering the flow here.
+ const p = this.promise()
this.on('data', c => {
buf.push(c)
- buf.dataLength += c.length
+ if (!this[OBJECTMODE])
+ buf.dataLength += c.length
})
- return this.promise().then(() => buf)
+ return p.then(() => buf)
}
// const data = await stream.concat()
@@ -438,7 +438,7 @@ module.exports = class Minipass extends EE {
: this.collect().then(buf =>
this[OBJECTMODE]
? Promise.reject(new Error('cannot concat in objectMode'))
- : this[ENCODING] ? buf.join('') : B.concat(buf, buf.dataLength))
+ : this[ENCODING] ? buf.join('') : Buffer.concat(buf, buf.dataLength))
}
// stream.promise().then(() => done, er => emitted error)
@@ -529,9 +529,10 @@ module.exports = class Minipass extends EE {
}
static isStream (s) {
- return !!s && (s instanceof Minipass || s instanceof EE && (
- typeof s.pipe === 'function' || // readable
- (typeof s.write === 'function' && typeof s.end === 'function') // writable
- ))
+ return !!s && (s instanceof Minipass || s instanceof Stream ||
+ s instanceof EE && (
+ typeof s.pipe === 'function' || // readable
+ (typeof s.write === 'function' && typeof s.end === 'function') // writable
+ ))
}
}
diff --git a/node_modules/minizlib/node_modules/minipass/package.json b/node_modules/minizlib/node_modules/minipass/package.json
index 57284172b..88791e4dd 100644
--- a/node_modules/minizlib/node_modules/minipass/package.json
+++ b/node_modules/minizlib/node_modules/minipass/package.json
@@ -1,27 +1,27 @@
{
- "_from": "minipass@^2.9.0",
- "_id": "minipass@2.9.0",
+ "_from": "minipass@^3.0.0",
+ "_id": "minipass@3.1.1",
"_inBundle": false,
- "_integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
+ "_integrity": "sha512-UFqVihv6PQgwj8/yTGvl9kPz7xIAY+R5z6XYjRInD3Gk3qx6QGSD6zEcpeG4Dy/lQnv1J6zv8ejV90hyYIKf3w==",
"_location": "/minizlib/minipass",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
- "raw": "minipass@^2.9.0",
+ "raw": "minipass@^3.0.0",
"name": "minipass",
"escapedName": "minipass",
- "rawSpec": "^2.9.0",
+ "rawSpec": "^3.0.0",
"saveSpec": null,
- "fetchSpec": "^2.9.0"
+ "fetchSpec": "^3.0.0"
},
"_requiredBy": [
"/minizlib"
],
- "_resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
- "_shasum": "e713762e7d3e32fed803115cf93e04bca9fcc9a6",
- "_spec": "minipass@^2.9.0",
- "_where": "/Users/mperrotte/npminc/cli/node_modules/minizlib",
+ "_resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.1.tgz",
+ "_shasum": "7607ce778472a185ad6d89082aa2070f79cedcd5",
+ "_spec": "minipass@^3.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/minizlib",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -32,8 +32,7 @@
},
"bundleDependencies": false,
"dependencies": {
- "safe-buffer": "^5.1.2",
- "yallist": "^3.0.0"
+ "yallist": "^4.0.0"
},
"deprecated": false,
"description": "minimal implementation of a PassThrough stream",
@@ -42,6 +41,9 @@
"tap": "^14.6.5",
"through2": "^2.0.3"
},
+ "engines": {
+ "node": ">=8"
+ },
"files": [
"index.js"
],
@@ -59,12 +61,12 @@
},
"scripts": {
"postpublish": "git push origin --follow-tags",
- "postversion": "npm publish",
+ "postversion": "npm publish --tag=next",
"preversion": "npm test",
"test": "tap"
},
"tap": {
"check-coverage": true
},
- "version": "2.9.0"
+ "version": "3.1.1"
}
diff --git a/node_modules/minizlib/node_modules/yallist/LICENSE b/node_modules/minizlib/node_modules/yallist/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/minizlib/node_modules/yallist/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/minizlib/node_modules/yallist/README.md b/node_modules/minizlib/node_modules/yallist/README.md
new file mode 100644
index 000000000..f58610186
--- /dev/null
+++ b/node_modules/minizlib/node_modules/yallist/README.md
@@ -0,0 +1,204 @@
+# yallist
+
+Yet Another Linked List
+
+There are many doubly-linked list implementations like it, but this
+one is mine.
+
+For when an array would be too big, and a Map can't be iterated in
+reverse order.
+
+
+[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
+
+## basic usage
+
+```javascript
+var yallist = require('yallist')
+var myList = yallist.create([1, 2, 3])
+myList.push('foo')
+myList.unshift('bar')
+// of course pop() and shift() are there, too
+console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
+myList.forEach(function (k) {
+ // walk the list head to tail
+})
+myList.forEachReverse(function (k, index, list) {
+ // walk the list tail to head
+})
+var myDoubledList = myList.map(function (k) {
+ return k + k
+})
+// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
+// mapReverse is also a thing
+var myDoubledListReverse = myList.mapReverse(function (k) {
+ return k + k
+}) // ['foofoo', 6, 4, 2, 'barbar']
+
+var reduced = myList.reduce(function (set, entry) {
+ set += entry
+ return set
+}, 'start')
+console.log(reduced) // 'startfoo123bar'
+```
+
+## api
+
+The whole API is considered "public".
+
+Functions with the same name as an Array method work more or less the
+same way.
+
+There's reverse versions of most things because that's the point.
+
+### Yallist
+
+Default export, the class that holds and manages a list.
+
+Call it with either a forEach-able (like an array) or a set of
+arguments, to initialize the list.
+
+The Array-ish methods all act like you'd expect. No magic length,
+though, so if you change that it won't automatically prune or add
+empty spots.
+
+### Yallist.create(..)
+
+Alias for Yallist function. Some people like factories.
+
+#### yallist.head
+
+The first node in the list
+
+#### yallist.tail
+
+The last node in the list
+
+#### yallist.length
+
+The number of nodes in the list. (Change this at your peril. It is
+not magic like Array length.)
+
+#### yallist.toArray()
+
+Convert the list to an array.
+
+#### yallist.forEach(fn, [thisp])
+
+Call a function on each item in the list.
+
+#### yallist.forEachReverse(fn, [thisp])
+
+Call a function on each item in the list, in reverse order.
+
+#### yallist.get(n)
+
+Get the data at position `n` in the list. If you use this a lot,
+probably better off just using an Array.
+
+#### yallist.getReverse(n)
+
+Get the data at position `n`, counting from the tail.
+
+#### yallist.map(fn, thisp)
+
+Create a new Yallist with the result of calling the function on each
+item.
+
+#### yallist.mapReverse(fn, thisp)
+
+Same as `map`, but in reverse.
+
+#### yallist.pop()
+
+Get the data from the list tail, and remove the tail from the list.
+
+#### yallist.push(item, ...)
+
+Insert one or more items to the tail of the list.
+
+#### yallist.reduce(fn, initialValue)
+
+Like Array.reduce.
+
+#### yallist.reduceReverse
+
+Like Array.reduce, but in reverse.
+
+#### yallist.reverse
+
+Reverse the list in place.
+
+#### yallist.shift()
+
+Get the data from the list head, and remove the head from the list.
+
+#### yallist.slice([from], [to])
+
+Just like Array.slice, but returns a new Yallist.
+
+#### yallist.sliceReverse([from], [to])
+
+Just like yallist.slice, but the result is returned in reverse.
+
+#### yallist.toArray()
+
+Create an array representation of the list.
+
+#### yallist.toArrayReverse()
+
+Create a reversed array representation of the list.
+
+#### yallist.unshift(item, ...)
+
+Insert one or more items to the head of the list.
+
+#### yallist.unshiftNode(node)
+
+Move a Node object to the front of the list. (That is, pull it out of
+wherever it lives, and make it the new head.)
+
+If the node belongs to a different list, then that list will remove it
+first.
+
+#### yallist.pushNode(node)
+
+Move a Node object to the end of the list. (That is, pull it out of
+wherever it lives, and make it the new tail.)
+
+If the node belongs to a list already, then that list will remove it
+first.
+
+#### yallist.removeNode(node)
+
+Remove a node from the list, preserving referential integrity of head
+and tail and other nodes.
+
+Will throw an error if you try to have a list remove a node that
+doesn't belong to it.
+
+### Yallist.Node
+
+The class that holds the data and is actually the list.
+
+Call with `var n = new Node(value, previousNode, nextNode)`
+
+Note that if you do direct operations on Nodes themselves, it's very
+easy to get into weird states where the list is broken. Be careful :)
+
+#### node.next
+
+The next node in the list.
+
+#### node.prev
+
+The previous node in the list.
+
+#### node.value
+
+The data the node contains.
+
+#### node.list
+
+The list to which this node belongs. (Null if it does not belong to
+any list.)
diff --git a/node_modules/minizlib/node_modules/yallist/iterator.js b/node_modules/minizlib/node_modules/yallist/iterator.js
new file mode 100644
index 000000000..d41c97a19
--- /dev/null
+++ b/node_modules/minizlib/node_modules/yallist/iterator.js
@@ -0,0 +1,8 @@
+'use strict'
+module.exports = function (Yallist) {
+ Yallist.prototype[Symbol.iterator] = function* () {
+ for (let walker = this.head; walker; walker = walker.next) {
+ yield walker.value
+ }
+ }
+}
diff --git a/node_modules/minizlib/node_modules/yallist/package.json b/node_modules/minizlib/node_modules/yallist/package.json
new file mode 100644
index 000000000..b89de41e4
--- /dev/null
+++ b/node_modules/minizlib/node_modules/yallist/package.json
@@ -0,0 +1,63 @@
+{
+ "_from": "yallist@^4.0.0",
+ "_id": "yallist@4.0.0",
+ "_inBundle": false,
+ "_integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "_location": "/minizlib/yallist",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "yallist@^4.0.0",
+ "name": "yallist",
+ "escapedName": "yallist",
+ "rawSpec": "^4.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^4.0.0"
+ },
+ "_requiredBy": [
+ "/minizlib",
+ "/minizlib/minipass"
+ ],
+ "_resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "_shasum": "9bb92790d9c0effec63be73519e11a35019a3a72",
+ "_spec": "yallist@^4.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/minizlib",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/yallist/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {},
+ "deprecated": false,
+ "description": "Yet Another Linked List",
+ "devDependencies": {
+ "tap": "^12.1.0"
+ },
+ "directories": {
+ "test": "test"
+ },
+ "files": [
+ "yallist.js",
+ "iterator.js"
+ ],
+ "homepage": "https://github.com/isaacs/yallist#readme",
+ "license": "ISC",
+ "main": "yallist.js",
+ "name": "yallist",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/yallist.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100"
+ },
+ "version": "4.0.0"
+}
diff --git a/node_modules/minizlib/node_modules/yallist/yallist.js b/node_modules/minizlib/node_modules/yallist/yallist.js
new file mode 100644
index 000000000..4e83ab1c5
--- /dev/null
+++ b/node_modules/minizlib/node_modules/yallist/yallist.js
@@ -0,0 +1,426 @@
+'use strict'
+module.exports = Yallist
+
+Yallist.Node = Node
+Yallist.create = Yallist
+
+function Yallist (list) {
+ var self = this
+ if (!(self instanceof Yallist)) {
+ self = new Yallist()
+ }
+
+ self.tail = null
+ self.head = null
+ self.length = 0
+
+ if (list && typeof list.forEach === 'function') {
+ list.forEach(function (item) {
+ self.push(item)
+ })
+ } else if (arguments.length > 0) {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ self.push(arguments[i])
+ }
+ }
+
+ return self
+}
+
+Yallist.prototype.removeNode = function (node) {
+ if (node.list !== this) {
+ throw new Error('removing node which does not belong to this list')
+ }
+
+ var next = node.next
+ var prev = node.prev
+
+ if (next) {
+ next.prev = prev
+ }
+
+ if (prev) {
+ prev.next = next
+ }
+
+ if (node === this.head) {
+ this.head = next
+ }
+ if (node === this.tail) {
+ this.tail = prev
+ }
+
+ node.list.length--
+ node.next = null
+ node.prev = null
+ node.list = null
+
+ return next
+}
+
+Yallist.prototype.unshiftNode = function (node) {
+ if (node === this.head) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var head = this.head
+ node.list = this
+ node.next = head
+ if (head) {
+ head.prev = node
+ }
+
+ this.head = node
+ if (!this.tail) {
+ this.tail = node
+ }
+ this.length++
+}
+
+Yallist.prototype.pushNode = function (node) {
+ if (node === this.tail) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var tail = this.tail
+ node.list = this
+ node.prev = tail
+ if (tail) {
+ tail.next = node
+ }
+
+ this.tail = node
+ if (!this.head) {
+ this.head = node
+ }
+ this.length++
+}
+
+Yallist.prototype.push = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ push(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.unshift = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ unshift(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.pop = function () {
+ if (!this.tail) {
+ return undefined
+ }
+
+ var res = this.tail.value
+ this.tail = this.tail.prev
+ if (this.tail) {
+ this.tail.next = null
+ } else {
+ this.head = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.shift = function () {
+ if (!this.head) {
+ return undefined
+ }
+
+ var res = this.head.value
+ this.head = this.head.next
+ if (this.head) {
+ this.head.prev = null
+ } else {
+ this.tail = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.forEach = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.head, i = 0; walker !== null; i++) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.next
+ }
+}
+
+Yallist.prototype.forEachReverse = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.prev
+ }
+}
+
+Yallist.prototype.get = function (n) {
+ for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.next
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.getReverse = function (n) {
+ for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.prev
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.map = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.head; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.next
+ }
+ return res
+}
+
+Yallist.prototype.mapReverse = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.tail; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.prev
+ }
+ return res
+}
+
+Yallist.prototype.reduce = function (fn, initial) {
+ var acc
+ var walker = this.head
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.head) {
+ walker = this.head.next
+ acc = this.head.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = 0; walker !== null; i++) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.next
+ }
+
+ return acc
+}
+
+Yallist.prototype.reduceReverse = function (fn, initial) {
+ var acc
+ var walker = this.tail
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.tail) {
+ walker = this.tail.prev
+ acc = this.tail.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = this.length - 1; walker !== null; i--) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.prev
+ }
+
+ return acc
+}
+
+Yallist.prototype.toArray = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.head; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.next
+ }
+ return arr
+}
+
+Yallist.prototype.toArrayReverse = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.tail; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.prev
+ }
+ return arr
+}
+
+Yallist.prototype.slice = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
+ walker = walker.next
+ }
+ for (; walker !== null && i < to; i++, walker = walker.next) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.sliceReverse = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
+ walker = walker.prev
+ }
+ for (; walker !== null && i > from; i--, walker = walker.prev) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.splice = function (start, deleteCount, ...nodes) {
+ if (start > this.length) {
+ start = this.length - 1
+ }
+ if (start < 0) {
+ start = this.length + start;
+ }
+
+ for (var i = 0, walker = this.head; walker !== null && i < start; i++) {
+ walker = walker.next
+ }
+
+ var ret = []
+ for (var i = 0; walker && i < deleteCount; i++) {
+ ret.push(walker.value)
+ walker = this.removeNode(walker)
+ }
+ if (walker === null) {
+ walker = this.tail
+ }
+
+ if (walker !== this.head && walker !== this.tail) {
+ walker = walker.prev
+ }
+
+ for (var i = 0; i < nodes.length; i++) {
+ walker = insert(this, walker, nodes[i])
+ }
+ return ret;
+}
+
+Yallist.prototype.reverse = function () {
+ var head = this.head
+ var tail = this.tail
+ for (var walker = head; walker !== null; walker = walker.prev) {
+ var p = walker.prev
+ walker.prev = walker.next
+ walker.next = p
+ }
+ this.head = tail
+ this.tail = head
+ return this
+}
+
+function insert (self, node, value) {
+ var inserted = node === self.head ?
+ new Node(value, null, node, self) :
+ new Node(value, node, node.next, self)
+
+ if (inserted.next === null) {
+ self.tail = inserted
+ }
+ if (inserted.prev === null) {
+ self.head = inserted
+ }
+
+ self.length++
+
+ return inserted
+}
+
+function push (self, item) {
+ self.tail = new Node(item, self.tail, null, self)
+ if (!self.head) {
+ self.head = self.tail
+ }
+ self.length++
+}
+
+function unshift (self, item) {
+ self.head = new Node(item, null, self.head, self)
+ if (!self.tail) {
+ self.tail = self.head
+ }
+ self.length++
+}
+
+function Node (value, prev, next, list) {
+ if (!(this instanceof Node)) {
+ return new Node(value, prev, next, list)
+ }
+
+ this.list = list
+ this.value = value
+
+ if (prev) {
+ prev.next = this
+ this.prev = prev
+ } else {
+ this.prev = null
+ }
+
+ if (next) {
+ next.prev = this
+ this.next = next
+ } else {
+ this.next = null
+ }
+}
+
+try {
+ // add if support for Symbol.iterator is present
+ require('./iterator.js')(Yallist)
+} catch (er) {}
diff --git a/node_modules/minizlib/package.json b/node_modules/minizlib/package.json
index 1284b8c6c..e3107c0b7 100644
--- a/node_modules/minizlib/package.json
+++ b/node_modules/minizlib/package.json
@@ -1,30 +1,27 @@
{
- "_from": "minizlib@^1.2.1",
- "_id": "minizlib@1.3.3",
+ "_from": "minizlib@^2.1.0",
+ "_id": "minizlib@2.1.0",
"_inBundle": false,
- "_integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
+ "_integrity": "sha512-EzTZN/fjSvifSX0SlqUERCN39o6T40AMarPbv0MrarSFtIITCBh7bi+dU8nxGFHuqs9jdIAeoYoKuQAAASsPPA==",
"_location": "/minizlib",
- "_phantomChildren": {
- "safe-buffer": "5.1.2",
- "yallist": "3.0.3"
- },
+ "_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
- "raw": "minizlib@^1.2.1",
+ "raw": "minizlib@^2.1.0",
"name": "minizlib",
"escapedName": "minizlib",
- "rawSpec": "^1.2.1",
+ "rawSpec": "^2.1.0",
"saveSpec": null,
- "fetchSpec": "^1.2.1"
+ "fetchSpec": "^2.1.0"
},
"_requiredBy": [
"/tar"
],
- "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
- "_shasum": "2290de96818a34c29551c8a8d301216bd65a861d",
- "_spec": "minizlib@^1.2.1",
- "_where": "/Users/mperrotte/npminc/cli/node_modules/tar",
+ "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.0.tgz",
+ "_shasum": "fd52c645301ef09a63a2c209697c294c6ce02cf3",
+ "_spec": "minizlib@^2.1.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -35,12 +32,16 @@
},
"bundleDependencies": false,
"dependencies": {
- "minipass": "^2.9.0"
+ "minipass": "^3.0.0",
+ "yallist": "^4.0.0"
},
"deprecated": false,
"description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
"devDependencies": {
- "tap": "^12.0.1"
+ "tap": "^14.6.9"
+ },
+ "engines": {
+ "node": ">= 8"
},
"files": [
"index.js",
@@ -70,5 +71,5 @@
"preversion": "npm test",
"test": "tap test/*.js --100 -J"
},
- "version": "1.3.3"
+ "version": "2.1.0"
}
diff --git a/node_modules/node-gyp/node_modules/fs-minipass/LICENSE b/node_modules/node-gyp/node_modules/fs-minipass/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/fs-minipass/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/node-gyp/node_modules/fs-minipass/README.md b/node_modules/node-gyp/node_modules/fs-minipass/README.md
new file mode 100644
index 000000000..1e61241cf
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/fs-minipass/README.md
@@ -0,0 +1,70 @@
+# fs-minipass
+
+Filesystem streams based on [minipass](http://npm.im/minipass).
+
+4 classes are exported:
+
+- ReadStream
+- ReadStreamSync
+- WriteStream
+- WriteStreamSync
+
+When using `ReadStreamSync`, all of the data is made available
+immediately upon consuming the stream. Nothing is buffered in memory
+when the stream is constructed. If the stream is piped to a writer,
+then it will synchronously `read()` and emit data into the writer as
+fast as the writer can consume it. (That is, it will respect
+backpressure.) If you call `stream.read()` then it will read the
+entire file and return the contents.
+
+When using `WriteStreamSync`, every write is flushed to the file
+synchronously. If your writes all come in a single tick, then it'll
+write it all out in a single tick. It's as synchronous as you are.
+
+The async versions work much like their node builtin counterparts,
+with the exception of introducing significantly less Stream machinery
+overhead.
+
+## USAGE
+
+It's just streams, you pipe them or read() them or write() to them.
+
+```js
+const fsm = require('fs-minipass')
+const readStream = new fsm.ReadStream('file.txt')
+const writeStream = new fsm.WriteStream('output.txt')
+writeStream.write('some file header or whatever\n')
+readStream.pipe(writeStream)
+```
+
+## ReadStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `readSize` The size of reads to do, defaults to 16MB
+- `size` The size of the file, if known. Prevents zero-byte read()
+ call at the end.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the file is done being read.
+
+## WriteStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `mode` The mode to create the file with. Defaults to `0o666`.
+- `start` The position in the file to start reading. If not
+ specified, then the file will start writing at position zero, and be
+ truncated by default.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the stream is ended.
+- `flags` Flags to use when opening the file. Irrelevant if `fd` is
+ passed in, since file won't be opened in that case. Defaults to
+ `'a'` if a `pos` is specified, or `'w'` otherwise.
diff --git a/node_modules/node-gyp/node_modules/fs-minipass/index.js b/node_modules/node-gyp/node_modules/fs-minipass/index.js
new file mode 100644
index 000000000..cd585a83c
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/fs-minipass/index.js
@@ -0,0 +1,387 @@
+'use strict'
+const MiniPass = require('minipass')
+const EE = require('events').EventEmitter
+const fs = require('fs')
+
+// for writev
+const binding = process.binding('fs')
+const writeBuffers = binding.writeBuffers
+/* istanbul ignore next */
+const FSReqWrap = binding.FSReqWrap || binding.FSReqCallback
+
+const _autoClose = Symbol('_autoClose')
+const _close = Symbol('_close')
+const _ended = Symbol('_ended')
+const _fd = Symbol('_fd')
+const _finished = Symbol('_finished')
+const _flags = Symbol('_flags')
+const _flush = Symbol('_flush')
+const _handleChunk = Symbol('_handleChunk')
+const _makeBuf = Symbol('_makeBuf')
+const _mode = Symbol('_mode')
+const _needDrain = Symbol('_needDrain')
+const _onerror = Symbol('_onerror')
+const _onopen = Symbol('_onopen')
+const _onread = Symbol('_onread')
+const _onwrite = Symbol('_onwrite')
+const _open = Symbol('_open')
+const _path = Symbol('_path')
+const _pos = Symbol('_pos')
+const _queue = Symbol('_queue')
+const _read = Symbol('_read')
+const _readSize = Symbol('_readSize')
+const _reading = Symbol('_reading')
+const _remain = Symbol('_remain')
+const _size = Symbol('_size')
+const _write = Symbol('_write')
+const _writing = Symbol('_writing')
+const _defaultFlag = Symbol('_defaultFlag')
+
+class ReadStream extends MiniPass {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+
+ this.writable = false
+
+ if (typeof path !== 'string')
+ throw new TypeError('path must be a string')
+
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_path] = path
+ this[_readSize] = opt.readSize || 16*1024*1024
+ this[_reading] = false
+ this[_size] = typeof opt.size === 'number' ? opt.size : Infinity
+ this[_remain] = this[_size]
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ else
+ this[_open]()
+ }
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ write () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ end () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ [_open] () {
+ fs.open(this[_path], 'r', (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_read]()
+ }
+ }
+
+ [_makeBuf] () {
+ return Buffer.allocUnsafe(Math.min(this[_readSize], this[_remain]))
+ }
+
+ [_read] () {
+ if (!this[_reading]) {
+ this[_reading] = true
+ const buf = this[_makeBuf]()
+ /* istanbul ignore if */
+ if (buf.length === 0) return process.nextTick(() => this[_onread](null, 0, buf))
+ fs.read(this[_fd], buf, 0, buf.length, null, (er, br, buf) =>
+ this[_onread](er, br, buf))
+ }
+ }
+
+ [_onread] (er, br, buf) {
+ this[_reading] = false
+ if (er)
+ this[_onerror](er)
+ else if (this[_handleChunk](br, buf))
+ this[_read]()
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ fs.close(this[_fd], _ => this.emit('close'))
+ this[_fd] = null
+ }
+ }
+
+ [_onerror] (er) {
+ this[_reading] = true
+ this[_close]()
+ this.emit('error', er)
+ }
+
+ [_handleChunk] (br, buf) {
+ let ret = false
+ // no effect if infinite
+ this[_remain] -= br
+ if (br > 0)
+ ret = super.write(br < buf.length ? buf.slice(0, br) : buf)
+
+ if (br === 0 || this[_remain] <= 0) {
+ ret = false
+ this[_close]()
+ super.end()
+ }
+
+ return ret
+ }
+
+ emit (ev, data) {
+ switch (ev) {
+ case 'prefinish':
+ case 'finish':
+ break
+
+ case 'drain':
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ break
+
+ default:
+ return super.emit(ev, data)
+ }
+ }
+}
+
+class ReadStreamSync extends ReadStream {
+ [_open] () {
+ let threw = true
+ try {
+ this[_onopen](null, fs.openSync(this[_path], 'r'))
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_read] () {
+ let threw = true
+ try {
+ if (!this[_reading]) {
+ this[_reading] = true
+ do {
+ const buf = this[_makeBuf]()
+ /* istanbul ignore next */
+ const br = buf.length === 0 ? 0 : fs.readSync(this[_fd], buf, 0, buf.length, null)
+ if (!this[_handleChunk](br, buf))
+ break
+ } while (true)
+ this[_reading] = false
+ }
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ try {
+ fs.closeSync(this[_fd])
+ } catch (er) {}
+ this[_fd] = null
+ this.emit('close')
+ }
+ }
+}
+
+class WriteStream extends EE {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+ this.readable = false
+ this[_writing] = false
+ this[_ended] = false
+ this[_needDrain] = false
+ this[_queue] = []
+ this[_path] = path
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_mode] = opt.mode === undefined ? 0o666 : opt.mode
+ this[_pos] = typeof opt.start === 'number' ? opt.start : null
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ // truncating makes no sense when writing into the middle
+ const defaultFlag = this[_pos] !== null ? 'r+' : 'w'
+ this[_defaultFlag] = opt.flags === undefined
+ this[_flags] = this[_defaultFlag] ? defaultFlag : opt.flags
+
+ if (this[_fd] === null)
+ this[_open]()
+ }
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ [_onerror] (er) {
+ this[_close]()
+ this[_writing] = true
+ this.emit('error', er)
+ }
+
+ [_open] () {
+ fs.open(this[_path], this[_flags], this[_mode],
+ (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (this[_defaultFlag] &&
+ this[_flags] === 'r+' &&
+ er && er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ this[_open]()
+ } else if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_flush]()
+ }
+ }
+
+ end (buf, enc) {
+ if (buf)
+ this.write(buf, enc)
+
+ this[_ended] = true
+
+ // synthetic after-write logic, where drain/finish live
+ if (!this[_writing] && !this[_queue].length &&
+ typeof this[_fd] === 'number')
+ this[_onwrite](null, 0)
+ }
+
+ write (buf, enc) {
+ if (typeof buf === 'string')
+ buf = new Buffer(buf, enc)
+
+ if (this[_ended]) {
+ this.emit('error', new Error('write() after end()'))
+ return false
+ }
+
+ if (this[_fd] === null || this[_writing] || this[_queue].length) {
+ this[_queue].push(buf)
+ this[_needDrain] = true
+ return false
+ }
+
+ this[_writing] = true
+ this[_write](buf)
+ return true
+ }
+
+ [_write] (buf) {
+ fs.write(this[_fd], buf, 0, buf.length, this[_pos], (er, bw) =>
+ this[_onwrite](er, bw))
+ }
+
+ [_onwrite] (er, bw) {
+ if (er)
+ this[_onerror](er)
+ else {
+ if (this[_pos] !== null)
+ this[_pos] += bw
+ if (this[_queue].length)
+ this[_flush]()
+ else {
+ this[_writing] = false
+
+ if (this[_ended] && !this[_finished]) {
+ this[_finished] = true
+ this[_close]()
+ this.emit('finish')
+ } else if (this[_needDrain]) {
+ this[_needDrain] = false
+ this.emit('drain')
+ }
+ }
+ }
+ }
+
+ [_flush] () {
+ if (this[_queue].length === 0) {
+ if (this[_ended])
+ this[_onwrite](null, 0)
+ } else if (this[_queue].length === 1)
+ this[_write](this[_queue].pop())
+ else {
+ const iovec = this[_queue]
+ this[_queue] = []
+ writev(this[_fd], iovec, this[_pos],
+ (er, bw) => this[_onwrite](er, bw))
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ fs.close(this[_fd], _ => this.emit('close'))
+ this[_fd] = null
+ }
+ }
+}
+
+class WriteStreamSync extends WriteStream {
+ [_open] () {
+ let fd
+ try {
+ fd = fs.openSync(this[_path], this[_flags], this[_mode])
+ } catch (er) {
+ if (this[_defaultFlag] &&
+ this[_flags] === 'r+' &&
+ er && er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ return this[_open]()
+ } else
+ throw er
+ }
+ this[_onopen](null, fd)
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ try {
+ fs.closeSync(this[_fd])
+ } catch (er) {}
+ this[_fd] = null
+ this.emit('close')
+ }
+ }
+
+ [_write] (buf) {
+ try {
+ this[_onwrite](null,
+ fs.writeSync(this[_fd], buf, 0, buf.length, this[_pos]))
+ } catch (er) {
+ this[_onwrite](er, 0)
+ }
+ }
+}
+
+const writev = (fd, iovec, pos, cb) => {
+ const done = (er, bw) => cb(er, bw, iovec)
+ const req = new FSReqWrap()
+ req.oncomplete = done
+ binding.writeBuffers(fd, iovec, pos, req)
+}
+
+exports.ReadStream = ReadStream
+exports.ReadStreamSync = ReadStreamSync
+
+exports.WriteStream = WriteStream
+exports.WriteStreamSync = WriteStreamSync
diff --git a/node_modules/node-gyp/node_modules/fs-minipass/package.json b/node_modules/node-gyp/node_modules/fs-minipass/package.json
new file mode 100644
index 000000000..810dfab1c
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/fs-minipass/package.json
@@ -0,0 +1,65 @@
+{
+ "_from": "fs-minipass@^1.2.5",
+ "_id": "fs-minipass@1.2.7",
+ "_inBundle": false,
+ "_integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
+ "_location": "/node-gyp/fs-minipass",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "fs-minipass@^1.2.5",
+ "name": "fs-minipass",
+ "escapedName": "fs-minipass",
+ "rawSpec": "^1.2.5",
+ "saveSpec": null,
+ "fetchSpec": "^1.2.5"
+ },
+ "_requiredBy": [
+ "/node-gyp/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
+ "_shasum": "ccff8570841e7fe4265693da88936c55aed7f7c7",
+ "_spec": "fs-minipass@^1.2.5",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/node-gyp/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/fs-minipass/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^2.6.0"
+ },
+ "deprecated": false,
+ "description": "fs read and write streams based on minipass",
+ "devDependencies": {
+ "mutate-fs": "^2.0.1",
+ "tap": "^14.6.4"
+ },
+ "files": [
+ "index.js"
+ ],
+ "homepage": "https://github.com/npm/fs-minipass#readme",
+ "keywords": [],
+ "license": "ISC",
+ "main": "index.js",
+ "name": "fs-minipass",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/fs-minipass.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true
+ },
+ "version": "1.2.7"
+}
diff --git a/node_modules/node-gyp/node_modules/minipass/LICENSE b/node_modules/node-gyp/node_modules/minipass/LICENSE
new file mode 100644
index 000000000..20a476254
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minipass/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) npm, Inc. and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/node-gyp/node_modules/minipass/README.md b/node_modules/node-gyp/node_modules/minipass/README.md
new file mode 100644
index 000000000..c989beea0
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minipass/README.md
@@ -0,0 +1,606 @@
+# minipass
+
+A _very_ minimal implementation of a [PassThrough
+stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
+
+[It's very
+fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
+for objects, strings, and buffers.
+
+Supports pipe()ing (including multi-pipe() and backpressure
+transmission), buffering data until either a `data` event handler or
+`pipe()` is added (so you don't lose the first chunk), and most other
+cases where PassThrough is a good idea.
+
+There is a `read()` method, but it's much more efficient to consume
+data from this stream via `'data'` events or by calling `pipe()` into
+some other stream. Calling `read()` requires the buffer to be
+flattened in some cases, which requires copying memory.
+
+There is also no `unpipe()` method. Once you start piping, there is
+no stopping it!
+
+If you set `objectMode: true` in the options, then whatever is written
+will be emitted. Otherwise, it'll do a minimal amount of Buffer
+copying to ensure proper Streams semantics when `read(n)` is called.
+
+`objectMode` can also be set by doing `stream.objectMode = true`, or by
+writing any non-string/non-buffer data. `objectMode` cannot be set to
+false once it is set.
+
+This is not a `through` or `through2` stream. It doesn't transform
+the data, it just passes it right through. If you want to transform
+the data, extend the class, and override the `write()` method. Once
+you're done transforming the data however you want, call
+`super.write()` with the transform output.
+
+For some examples of streams that extend Minipass in various ways, check
+out:
+
+- [minizlib](http://npm.im/minizlib)
+- [fs-minipass](http://npm.im/fs-minipass)
+- [tar](http://npm.im/tar)
+- [minipass-collect](http://npm.im/minipass-collect)
+- [minipass-flush](http://npm.im/minipass-flush)
+- [minipass-pipeline](http://npm.im/minipass-pipeline)
+- [tap](http://npm.im/tap)
+- [tap-parser](http://npm.im/tap)
+- [treport](http://npm.im/tap)
+
+## Differences from Node.js Streams
+
+There are several things that make Minipass streams different from (and in
+some ways superior to) Node.js core streams.
+
+Please read these caveats if you are familiar with noode-core streams and
+intend to use Minipass streams in your programs.
+
+### Timing
+
+Minipass streams are designed to support synchronous use-cases. Thus, data
+is emitted as soon as it is available, always. It is buffered until read,
+but no longer. Another way to look at it is that Minipass streams are
+exactly as synchronous as the logic that writes into them.
+
+This can be surprising if your code relies on `PassThrough.write()` always
+providing data on the next tick rather than the current one, or being able
+to call `resume()` and not have the entire buffer disappear immediately.
+
+However, without this synchronicity guarantee, there would be no way for
+Minipass to achieve the speeds it does, or support the synchronous use
+cases that it does. Simply put, waiting takes time.
+
+This non-deferring approach makes Minipass streams much easier to reason
+about, especially in the context of Promises and other flow-control
+mechanisms.
+
+### No High/Low Water Marks
+
+Node.js core streams will optimistically fill up a buffer, returning `true`
+on all writes until the limit is hit, even if the data has nowhere to go.
+Then, they will not attempt to draw more data in until the buffer size dips
+below a minimum value.
+
+Minipass streams are much simpler. The `write()` method will return `true`
+if the data has somewhere to go (which is to say, given the timing
+guarantees, that the data is already there by the time `write()` returns).
+
+If the data has nowhere to go, then `write()` returns false, and the data
+sits in a buffer, to be drained out immediately as soon as anyone consumes
+it.
+
+### Hazards of Buffering (or: Why Minipass Is So Fast)
+
+Since data written to a Minipass stream is immediately written all the way
+through the pipeline, and `write()` always returns true/false based on
+whether the data was fully flushed, backpressure is communicated
+immediately to the upstream caller. This minimizes buffering.
+
+Consider this case:
+
+```js
+const {PassThrough} = require('stream')
+const p1 = new PassThrough({ highWaterMark: 1024 })
+const p2 = new PassThrough({ highWaterMark: 1024 })
+const p3 = new PassThrough({ highWaterMark: 1024 })
+const p4 = new PassThrough({ highWaterMark: 1024 })
+
+p1.pipe(p2).pipe(p3).pipe(p4)
+p4.on('data', () => console.log('made it through'))
+
+// this returns false and buffers, then writes to p2 on next tick (1)
+// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
+// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
+// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
+// on next tick (4)
+// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
+// 'drain' on next tick (5)
+// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
+// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
+// tick (7)
+
+p1.write(Buffer.alloc(2048)) // returns false
+```
+
+Along the way, the data was buffered and deferred at each stage, and
+multiple event deferrals happened, for an unblocked pipeline where it was
+perfectly safe to write all the way through!
+
+Furthermore, setting a `highWaterMark` of `1024` might lead someone reading
+the code to think an advisory maximum of 1KiB is being set for the
+pipeline. However, the actual advisory buffering level is the _sum_ of
+`highWaterMark` values, since each one has its own bucket.
+
+Consider the Minipass case:
+
+```js
+const m1 = new Minipass()
+const m2 = new Minipass()
+const m3 = new Minipass()
+const m4 = new Minipass()
+
+m1.pipe(m2).pipe(m3).pipe(m4)
+m4.on('data', () => console.log('made it through'))
+
+// m1 is flowing, so it writes the data to m2 immediately
+// m2 is flowing, so it writes the data to m3 immediately
+// m3 is flowing, so it writes the data to m4 immediately
+// m4 is flowing, so it fires the 'data' event immediately, returns true
+// m4's write returned true, so m3 is still flowing, returns true
+// m3's write returned true, so m2 is still flowing, returns true
+// m2's write returned true, so m1 is still flowing, returns true
+// No event deferrals or buffering along the way!
+
+m1.write(Buffer.alloc(2048)) // returns true
+```
+
+It is extremely unlikely that you _don't_ want to buffer any data written,
+or _ever_ buffer data that can be flushed all the way through. Neither
+node-core streams nor Minipass ever fail to buffer written data, but
+node-core streams do a lot of unnecessary buffering and pausing.
+
+As always, the faster implementation is the one that does less stuff and
+waits less time to do it.
+
+### Immediately emit `end` for empty streams (when not paused)
+
+If a stream is not paused, and `end()` is called before writing any data
+into it, then it will emit `end` immediately.
+
+If you have logic that occurs on the `end` event which you don't want to
+potentially happen immediately (for example, closing file descriptors,
+moving on to the next entry in an archive parse stream, etc.) then be sure
+to call `stream.pause()` on creation, and then `stream.resume()` once you
+are ready to respond to the `end` event.
+
+### Emit `end` When Asked
+
+One hazard of immediately emitting `'end'` is that you may not yet have had
+a chance to add a listener. In order to avoid this hazard, Minipass
+streams safely re-emit the `'end'` event if a new listener is added after
+`'end'` has been emitted.
+
+Ie, if you do `stream.on('end', someFunction)`, and the stream has already
+emitted `end`, then it will call the handler right away. (You can think of
+this somewhat like attaching a new `.then(fn)` to a previously-resolved
+Promise.)
+
+To prevent calling handlers multiple times who would not expect multiple
+ends to occur, all listeners are removed from the `'end'` event whenever it
+is emitted.
+
+### Impact of "immediate flow" on Tee-streams
+
+A "tee stream" is a stream piping to multiple destinations:
+
+```js
+const tee = new Minipass()
+t.pipe(dest1)
+t.pipe(dest2)
+t.write('foo') // goes to both destinations
+```
+
+Since Minipass streams _immediately_ process any pending data through the
+pipeline when a new pipe destination is added, this can have surprising
+effects, especially when a stream comes in from some other function and may
+or may not have data in its buffer.
+
+```js
+// WARNING! WILL LOSE DATA!
+const src = new Minipass()
+src.write('foo')
+src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
+src.pipe(dest2) // gets nothing!
+```
+
+The solution is to create a dedicated tee-stream junction that pipes to
+both locations, and then pipe to _that_ instead.
+
+```js
+// Safe example: tee to both places
+const src = new Minipass()
+src.write('foo')
+const tee = new Minipass()
+tee.pipe(dest1)
+tee.pipe(dest2)
+stream.pipe(tee) // tee gets 'foo', pipes to both locations
+```
+
+The same caveat applies to `on('data')` event listeners. The first one
+added will _immediately_ receive all of the data, leaving nothing for the
+second:
+
+```js
+// WARNING! WILL LOSE DATA!
+const src = new Minipass()
+src.write('foo')
+src.on('data', handler1) // receives 'foo' right away
+src.on('data', handler2) // nothing to see here!
+```
+
+Using a dedicated tee-stream can be used in this case as well:
+
+```js
+// Safe example: tee to both data handlers
+const src = new Minipass()
+src.write('foo')
+const tee = new Minipass()
+tee.on('data', handler1)
+tee.on('data', handler2)
+src.pipe(tee)
+```
+
+## USAGE
+
+It's a stream! Use it like a stream and it'll most likely do what you want.
+
+```js
+const Minipass = require('minipass')
+const mp = new Minipass(options) // optional: { encoding, objectMode }
+mp.write('foo')
+mp.pipe(someOtherStream)
+mp.end('bar')
+```
+
+### OPTIONS
+
+* `encoding` How would you like the data coming _out_ of the stream to be
+ encoded? Accepts any values that can be passed to `Buffer.toString()`.
+* `objectMode` Emit data exactly as it comes in. This will be flipped on
+ by default if you write() something other than a string or Buffer at any
+ point. Setting `objectMode: true` will prevent setting any encoding
+ value.
+
+### API
+
+Implements the user-facing portions of Node.js's `Readable` and `Writable`
+streams.
+
+### Methods
+
+* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
+ base Minipass class, the same data will come out.) Returns `false` if
+ the stream will buffer the next write, or true if it's still in
+ "flowing" mode.
+* `end([chunk, [encoding]], [callback])` - Signal that you have no more
+ data to write. This will queue an `end` event to be fired when all the
+ data has been consumed.
+* `setEncoding(encoding)` - Set the encoding for data coming of the
+ stream. This can only be done once.
+* `pause()` - No more data for a while, please. This also prevents `end`
+ from being emitted for empty streams until the stream is resumed.
+* `resume()` - Resume the stream. If there's data in the buffer, it is
+ all discarded. Any buffered events are immediately emitted.
+* `pipe(dest)` - Send all output to the stream provided. There is no way
+ to unpipe. When data is emitted, it is immediately written to any and
+ all pipe destinations.
+* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters.
+ Some events are given special treatment, however. (See below under
+ "events".)
+* `promise()` - Returns a Promise that resolves when the stream emits
+ `end`, or rejects if the stream emits `error`.
+* `collect()` - Return a Promise that resolves on `end` with an array
+ containing each chunk of data that was emitted, or rejects if the
+ stream emits `error`. Note that this consumes the stream data.
+* `concat()` - Same as `collect()`, but concatenates the data into a
+ single Buffer object. Will reject the returned promise if the stream is
+ in objectMode, or if it goes into objectMode by the end of the data.
+* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
+ provided, then consume all of it. If `n` bytes are not available, then
+ it returns null. **Note** consuming streams in this way is less
+ efficient, and can lead to unnecessary Buffer copying.
+* `destroy([er])` - Destroy the stream. If an error is provided, then an
+ `'error'` event is emitted. If the stream has a `close()` method, and
+ has not emitted a `'close'` event yet, then `stream.close()` will be
+ called. Any Promises returned by `.promise()`, `.collect()` or
+ `.concat()` will be rejected. After being destroyed, writing to the
+ stream will emit an error. No more data will be emitted if the stream is
+ destroyed, even if it was previously buffered.
+
+### Properties
+
+* `bufferLength` Read-only. Total number of bytes buffered, or in the case
+ of objectMode, the total number of objects.
+* `encoding` The encoding that has been set. (Setting this is equivalent
+ to calling `setEncoding(enc)` and has the same prohibition against
+ setting multiple times.)
+* `flowing` Read-only. Boolean indicating whether a chunk written to the
+ stream will be immediately emitted.
+* `emittedEnd` Read-only. Boolean indicating whether the end-ish events
+ (ie, `end`, `prefinish`, `finish`) have been emitted. Note that
+ listening on any end-ish event will immediateyl re-emit it if it has
+ already been emitted.
+* `writable` Whether the stream is writable. Default `true`. Set to
+ `false` when `end()`
+* `readable` Whether the stream is readable. Default `true`.
+* `buffer` A [yallist](http://npm.im/yallist) linked list of chunks written
+ to the stream that have not yet been emitted. (It's probably a bad idea
+ to mess with this.)
+* `pipes` A [yallist](http://npm.im/yallist) linked list of streams that
+ this stream is piping into. (It's probably a bad idea to mess with
+ this.)
+* `destroyed` A getter that indicates whether the stream was destroyed.
+* `paused` True if the stream has been explicitly paused, otherwise false.
+* `objectMode` Indicates whether the stream is in `objectMode`. Once set
+ to `true`, it cannot be set to `false`.
+
+### Events
+
+* `data` Emitted when there's data to read. Argument is the data to read.
+ This is never emitted while not flowing. If a listener is attached, that
+ will resume the stream.
+* `end` Emitted when there's no more data to read. This will be emitted
+ immediately for empty streams when `end()` is called. If a listener is
+ attached, and `end` was already emitted, then it will be emitted again.
+ All listeners are removed when `end` is emitted.
+* `prefinish` An end-ish event that follows the same logic as `end` and is
+ emitted in the same conditions where `end` is emitted. Emitted after
+ `'end'`.
+* `finish` An end-ish event that follows the same logic as `end` and is
+ emitted in the same conditions where `end` is emitted. Emitted after
+ `'prefinish'`.
+* `close` An indication that an underlying resource has been released.
+ Minipass does not emit this event, but will defer it until after `end`
+ has been emitted, since it throws off some stream libraries otherwise.
+* `drain` Emitted when the internal buffer empties, and it is again
+ suitable to `write()` into the stream.
+* `readable` Emitted when data is buffered and ready to be read by a
+ consumer.
+* `resume` Emitted when stream changes state from buffering to flowing
+ mode. (Ie, when `resume` is called, `pipe` is called, or a `data` event
+ listener is added.)
+
+### Static Methods
+
+* `Minipass.isStream(stream)` Returns `true` if the argument is a stream,
+ and false otherwise. To be considered a stream, the object must be
+ either an instance of Minipass, or an EventEmitter that has either a
+ `pipe()` method, or both `write()` and `end()` methods. (Pretty much any
+ stream in node-land will return `true` for this.)
+
+## EXAMPLES
+
+Here are some examples of things you can do with Minipass streams.
+
+### simple "are you done yet" promise
+
+```js
+mp.promise().then(() => {
+ // stream is finished
+}, er => {
+ // stream emitted an error
+})
+```
+
+### collecting
+
+```js
+mp.collect().then(all => {
+ // all is an array of all the data emitted
+ // encoding is supported in this case, so
+ // so the result will be a collection of strings if
+ // an encoding is specified, or buffers/objects if not.
+ //
+ // In an async function, you may do
+ // const data = await stream.collect()
+})
+```
+
+### collecting into a single blob
+
+This is a bit slower because it concatenates the data into one chunk for
+you, but if you're going to do it yourself anyway, it's convenient this
+way:
+
+```js
+mp.concat().then(onebigchunk => {
+ // onebigchunk is a string if the stream
+ // had an encoding set, or a buffer otherwise.
+})
+```
+
+### iteration
+
+You can iterate over streams synchronously or asynchronously in
+platforms that support it.
+
+Synchronous iteration will end when the currently available data is
+consumed, even if the `end` event has not been reached. In string and
+buffer mode, the data is concatenated, so unless multiple writes are
+occurring in the same tick as the `read()`, sync iteration loops will
+generally only have a single iteration.
+
+To consume chunks in this way exactly as they have been written, with
+no flattening, create the stream with the `{ objectMode: true }`
+option.
+
+```js
+const mp = new Minipass({ objectMode: true })
+mp.write('a')
+mp.write('b')
+for (let letter of mp) {
+ console.log(letter) // a, b
+}
+mp.write('c')
+mp.write('d')
+for (let letter of mp) {
+ console.log(letter) // c, d
+}
+mp.write('e')
+mp.end()
+for (let letter of mp) {
+ console.log(letter) // e
+}
+for (let letter of mp) {
+ console.log(letter) // nothing
+}
+```
+
+Asynchronous iteration will continue until the end event is reached,
+consuming all of the data.
+
+```js
+const mp = new Minipass({ encoding: 'utf8' })
+
+// some source of some data
+let i = 5
+const inter = setInterval(() => {
+ if (i --> 0)
+ mp.write(Buffer.from('foo\n', 'utf8'))
+ else {
+ mp.end()
+ clearInterval(inter)
+ }
+}, 100)
+
+// consume the data with asynchronous iteration
+async function consume () {
+ for await (let chunk of mp) {
+ console.log(chunk)
+ }
+ return 'ok'
+}
+
+consume().then(res => console.log(res))
+// logs `foo\n` 5 times, and then `ok`
+```
+
+### subclass that `console.log()`s everything written into it
+
+```js
+class Logger extends Minipass {
+ write (chunk, encoding, callback) {
+ console.log('WRITE', chunk, encoding)
+ return super.write(chunk, encoding, callback)
+ }
+ end (chunk, encoding, callback) {
+ console.log('END', chunk, encoding)
+ return super.end(chunk, encoding, callback)
+ }
+}
+
+someSource.pipe(new Logger()).pipe(someDest)
+```
+
+### same thing, but using an inline anonymous class
+
+```js
+// js classes are fun
+someSource
+ .pipe(new (class extends Minipass {
+ emit (ev, ...data) {
+ // let's also log events, because debugging some weird thing
+ console.log('EMIT', ev)
+ return super.emit(ev, ...data)
+ }
+ write (chunk, encoding, callback) {
+ console.log('WRITE', chunk, encoding)
+ return super.write(chunk, encoding, callback)
+ }
+ end (chunk, encoding, callback) {
+ console.log('END', chunk, encoding)
+ return super.end(chunk, encoding, callback)
+ }
+ }))
+ .pipe(someDest)
+```
+
+### subclass that defers 'end' for some reason
+
+```js
+class SlowEnd extends Minipass {
+ emit (ev, ...args) {
+ if (ev === 'end') {
+ console.log('going to end, hold on a sec')
+ setTimeout(() => {
+ console.log('ok, ready to end now')
+ super.emit('end', ...args)
+ }, 100)
+ } else {
+ return super.emit(ev, ...args)
+ }
+ }
+}
+```
+
+### transform that creates newline-delimited JSON
+
+```js
+class NDJSONEncode extends Minipass {
+ write (obj, cb) {
+ try {
+ // JSON.stringify can throw, emit an error on that
+ return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
+ } catch (er) {
+ this.emit('error', er)
+ }
+ }
+ end (obj, cb) {
+ if (typeof obj === 'function') {
+ cb = obj
+ obj = undefined
+ }
+ if (obj !== undefined) {
+ this.write(obj)
+ }
+ return super.end(cb)
+ }
+}
+```
+
+### transform that parses newline-delimited JSON
+
+```js
+class NDJSONDecode extends Minipass {
+ constructor (options) {
+ // always be in object mode, as far as Minipass is concerned
+ super({ objectMode: true })
+ this._jsonBuffer = ''
+ }
+ write (chunk, encoding, cb) {
+ if (typeof chunk === 'string' &&
+ typeof encoding === 'string' &&
+ encoding !== 'utf8') {
+ chunk = Buffer.from(chunk, encoding).toString()
+ } else if (Buffer.isBuffer(chunk))
+ chunk = chunk.toString()
+ }
+ if (typeof encoding === 'function') {
+ cb = encoding
+ }
+ const jsonData = (this._jsonBuffer + chunk).split('\n')
+ this._jsonBuffer = jsonData.pop()
+ for (let i = 0; i < jsonData.length; i++) {
+ let parsed
+ try {
+ super.write(parsed)
+ } catch (er) {
+ this.emit('error', er)
+ continue
+ }
+ }
+ if (cb)
+ cb()
+ }
+}
+```
diff --git a/node_modules/node-gyp/node_modules/minipass/index.js b/node_modules/node-gyp/node_modules/minipass/index.js
new file mode 100644
index 000000000..c072352d4
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minipass/index.js
@@ -0,0 +1,537 @@
+'use strict'
+const EE = require('events')
+const Yallist = require('yallist')
+const SD = require('string_decoder').StringDecoder
+
+const EOF = Symbol('EOF')
+const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
+const EMITTED_END = Symbol('emittedEnd')
+const EMITTING_END = Symbol('emittingEnd')
+const CLOSED = Symbol('closed')
+const READ = Symbol('read')
+const FLUSH = Symbol('flush')
+const FLUSHCHUNK = Symbol('flushChunk')
+const ENCODING = Symbol('encoding')
+const DECODER = Symbol('decoder')
+const FLOWING = Symbol('flowing')
+const PAUSED = Symbol('paused')
+const RESUME = Symbol('resume')
+const BUFFERLENGTH = Symbol('bufferLength')
+const BUFFERPUSH = Symbol('bufferPush')
+const BUFFERSHIFT = Symbol('bufferShift')
+const OBJECTMODE = Symbol('objectMode')
+const DESTROYED = Symbol('destroyed')
+
+// TODO remove when Node v8 support drops
+const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
+const ASYNCITERATOR = doIter && Symbol.asyncIterator
+ || Symbol('asyncIterator not implemented')
+const ITERATOR = doIter && Symbol.iterator
+ || Symbol('iterator not implemented')
+
+// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
+// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
+// .M, this is fine .\^/M..
+const B = Buffer.alloc ? Buffer
+ : /* istanbul ignore next */ require('safe-buffer').Buffer
+
+// events that mean 'the stream is over'
+// these are treated specially, and re-emitted
+// if they are listened for after emitting.
+const isEndish = ev =>
+ ev === 'end' ||
+ ev === 'finish' ||
+ ev === 'prefinish'
+
+const isArrayBuffer = b => b instanceof ArrayBuffer ||
+ typeof b === 'object' &&
+ b.constructor &&
+ b.constructor.name === 'ArrayBuffer' &&
+ b.byteLength >= 0
+
+const isArrayBufferView = b => !B.isBuffer(b) && ArrayBuffer.isView(b)
+
+module.exports = class Minipass extends EE {
+ constructor (options) {
+ super()
+ this[FLOWING] = false
+ // whether we're explicitly paused
+ this[PAUSED] = false
+ this.pipes = new Yallist()
+ this.buffer = new Yallist()
+ this[OBJECTMODE] = options && options.objectMode || false
+ if (this[OBJECTMODE])
+ this[ENCODING] = null
+ else
+ this[ENCODING] = options && options.encoding || null
+ if (this[ENCODING] === 'buffer')
+ this[ENCODING] = null
+ this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
+ this[EOF] = false
+ this[EMITTED_END] = false
+ this[EMITTING_END] = false
+ this[CLOSED] = false
+ this.writable = true
+ this.readable = true
+ this[BUFFERLENGTH] = 0
+ this[DESTROYED] = false
+ }
+
+ get bufferLength () { return this[BUFFERLENGTH] }
+
+ get encoding () { return this[ENCODING] }
+ set encoding (enc) {
+ if (this[OBJECTMODE])
+ throw new Error('cannot set encoding in objectMode')
+
+ if (this[ENCODING] && enc !== this[ENCODING] &&
+ (this[DECODER] && this[DECODER].lastNeed || this[BUFFERLENGTH]))
+ throw new Error('cannot change encoding')
+
+ if (this[ENCODING] !== enc) {
+ this[DECODER] = enc ? new SD(enc) : null
+ if (this.buffer.length)
+ this.buffer = this.buffer.map(chunk => this[DECODER].write(chunk))
+ }
+
+ this[ENCODING] = enc
+ }
+
+ setEncoding (enc) {
+ this.encoding = enc
+ }
+
+ get objectMode () { return this[OBJECTMODE] }
+ set objectMode (ॐ ) { this[OBJECTMODE] = this[OBJECTMODE] || !!ॐ }
+
+ write (chunk, encoding, cb) {
+ if (this[EOF])
+ throw new Error('write after end')
+
+ if (this[DESTROYED]) {
+ this.emit('error', Object.assign(
+ new Error('Cannot call write after a stream was destroyed'),
+ { code: 'ERR_STREAM_DESTROYED' }
+ ))
+ return true
+ }
+
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+
+ if (!encoding)
+ encoding = 'utf8'
+
+ // convert array buffers and typed array views into buffers
+ // at some point in the future, we may want to do the opposite!
+ // leave strings and buffers as-is
+ // anything else switches us into object mode
+ if (!this[OBJECTMODE] && !B.isBuffer(chunk)) {
+ if (isArrayBufferView(chunk))
+ chunk = B.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
+ else if (isArrayBuffer(chunk))
+ chunk = B.from(chunk)
+ else if (typeof chunk !== 'string')
+ // use the setter so we throw if we have encoding set
+ this.objectMode = true
+ }
+
+ // this ensures at this point that the chunk is a buffer or string
+ // don't buffer it up or send it to the decoder
+ if (!this.objectMode && !chunk.length) {
+ const ret = this.flowing
+ if (this[BUFFERLENGTH] !== 0)
+ this.emit('readable')
+ if (cb)
+ cb()
+ return ret
+ }
+
+ // fast-path writing strings of same encoding to a stream with
+ // an empty buffer, skipping the buffer/decoder dance
+ if (typeof chunk === 'string' && !this[OBJECTMODE] &&
+ // unless it is a string already ready for us to use
+ !(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
+ chunk = B.from(chunk, encoding)
+ }
+
+ if (B.isBuffer(chunk) && this[ENCODING])
+ chunk = this[DECODER].write(chunk)
+
+ try {
+ return this.flowing
+ ? (this.emit('data', chunk), this.flowing)
+ : (this[BUFFERPUSH](chunk), false)
+ } finally {
+ if (this[BUFFERLENGTH] !== 0)
+ this.emit('readable')
+ if (cb)
+ cb()
+ }
+ }
+
+ read (n) {
+ if (this[DESTROYED])
+ return null
+
+ try {
+ if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH])
+ return null
+
+ if (this[OBJECTMODE])
+ n = null
+
+ if (this.buffer.length > 1 && !this[OBJECTMODE]) {
+ if (this.encoding)
+ this.buffer = new Yallist([
+ Array.from(this.buffer).join('')
+ ])
+ else
+ this.buffer = new Yallist([
+ B.concat(Array.from(this.buffer), this[BUFFERLENGTH])
+ ])
+ }
+
+ return this[READ](n || null, this.buffer.head.value)
+ } finally {
+ this[MAYBE_EMIT_END]()
+ }
+ }
+
+ [READ] (n, chunk) {
+ if (n === chunk.length || n === null)
+ this[BUFFERSHIFT]()
+ else {
+ this.buffer.head.value = chunk.slice(n)
+ chunk = chunk.slice(0, n)
+ this[BUFFERLENGTH] -= n
+ }
+
+ this.emit('data', chunk)
+
+ if (!this.buffer.length && !this[EOF])
+ this.emit('drain')
+
+ return chunk
+ }
+
+ end (chunk, encoding, cb) {
+ if (typeof chunk === 'function')
+ cb = chunk, chunk = null
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+ if (chunk)
+ this.write(chunk, encoding)
+ if (cb)
+ this.once('end', cb)
+ this[EOF] = true
+ this.writable = false
+
+ // if we haven't written anything, then go ahead and emit,
+ // even if we're not reading.
+ // we'll re-emit if a new 'end' listener is added anyway.
+ // This makes MP more suitable to write-only use cases.
+ if (this.flowing || !this[PAUSED])
+ this[MAYBE_EMIT_END]()
+ return this
+ }
+
+ // don't let the internal resume be overwritten
+ [RESUME] () {
+ if (this[DESTROYED])
+ return
+
+ this[PAUSED] = false
+ this[FLOWING] = true
+ this.emit('resume')
+ if (this.buffer.length)
+ this[FLUSH]()
+ else if (this[EOF])
+ this[MAYBE_EMIT_END]()
+ else
+ this.emit('drain')
+ }
+
+ resume () {
+ return this[RESUME]()
+ }
+
+ pause () {
+ this[FLOWING] = false
+ this[PAUSED] = true
+ }
+
+ get destroyed () {
+ return this[DESTROYED]
+ }
+
+ get flowing () {
+ return this[FLOWING]
+ }
+
+ get paused () {
+ return this[PAUSED]
+ }
+
+ [BUFFERPUSH] (chunk) {
+ if (this[OBJECTMODE])
+ this[BUFFERLENGTH] += 1
+ else
+ this[BUFFERLENGTH] += chunk.length
+ return this.buffer.push(chunk)
+ }
+
+ [BUFFERSHIFT] () {
+ if (this.buffer.length) {
+ if (this[OBJECTMODE])
+ this[BUFFERLENGTH] -= 1
+ else
+ this[BUFFERLENGTH] -= this.buffer.head.value.length
+ }
+ return this.buffer.shift()
+ }
+
+ [FLUSH] () {
+ do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()))
+
+ if (!this.buffer.length && !this[EOF])
+ this.emit('drain')
+ }
+
+ [FLUSHCHUNK] (chunk) {
+ return chunk ? (this.emit('data', chunk), this.flowing) : false
+ }
+
+ pipe (dest, opts) {
+ if (this[DESTROYED])
+ return
+
+ const ended = this[EMITTED_END]
+ opts = opts || {}
+ if (dest === process.stdout || dest === process.stderr)
+ opts.end = false
+ else
+ opts.end = opts.end !== false
+
+ const p = { dest: dest, opts: opts, ondrain: _ => this[RESUME]() }
+ this.pipes.push(p)
+
+ dest.on('drain', p.ondrain)
+ this[RESUME]()
+ // piping an ended stream ends immediately
+ if (ended && p.opts.end)
+ p.dest.end()
+ return dest
+ }
+
+ addListener (ev, fn) {
+ return this.on(ev, fn)
+ }
+
+ on (ev, fn) {
+ try {
+ return super.on(ev, fn)
+ } finally {
+ if (ev === 'data' && !this.pipes.length && !this.flowing)
+ this[RESUME]()
+ else if (isEndish(ev) && this[EMITTED_END]) {
+ super.emit(ev)
+ this.removeAllListeners(ev)
+ }
+ }
+ }
+
+ get emittedEnd () {
+ return this[EMITTED_END]
+ }
+
+ [MAYBE_EMIT_END] () {
+ if (!this[EMITTING_END] &&
+ !this[EMITTED_END] &&
+ !this[DESTROYED] &&
+ this.buffer.length === 0 &&
+ this[EOF]) {
+ this[EMITTING_END] = true
+ this.emit('end')
+ this.emit('prefinish')
+ this.emit('finish')
+ if (this[CLOSED])
+ this.emit('close')
+ this[EMITTING_END] = false
+ }
+ }
+
+ emit (ev, data) {
+ // error and close are only events allowed after calling destroy()
+ if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
+ return
+ else if (ev === 'data') {
+ if (!data)
+ return
+
+ if (this.pipes.length)
+ this.pipes.forEach(p =>
+ p.dest.write(data) === false && this.pause())
+ } else if (ev === 'end') {
+ // only actual end gets this treatment
+ if (this[EMITTED_END] === true)
+ return
+
+ this[EMITTED_END] = true
+ this.readable = false
+
+ if (this[DECODER]) {
+ data = this[DECODER].end()
+ if (data) {
+ this.pipes.forEach(p => p.dest.write(data))
+ super.emit('data', data)
+ }
+ }
+
+ this.pipes.forEach(p => {
+ p.dest.removeListener('drain', p.ondrain)
+ if (p.opts.end)
+ p.dest.end()
+ })
+ } else if (ev === 'close') {
+ this[CLOSED] = true
+ // don't emit close before 'end' and 'finish'
+ if (!this[EMITTED_END] && !this[DESTROYED])
+ return
+ }
+
+ // TODO: replace with a spread operator when Node v4 support drops
+ const args = new Array(arguments.length)
+ args[0] = ev
+ args[1] = data
+ if (arguments.length > 2) {
+ for (let i = 2; i < arguments.length; i++) {
+ args[i] = arguments[i]
+ }
+ }
+
+ try {
+ return super.emit.apply(this, args)
+ } finally {
+ if (!isEndish(ev))
+ this[MAYBE_EMIT_END]()
+ else
+ this.removeAllListeners(ev)
+ }
+ }
+
+ // const all = await stream.collect()
+ collect () {
+ const buf = []
+ buf.dataLength = 0
+ this.on('data', c => {
+ buf.push(c)
+ buf.dataLength += c.length
+ })
+ return this.promise().then(() => buf)
+ }
+
+ // const data = await stream.concat()
+ concat () {
+ return this[OBJECTMODE]
+ ? Promise.reject(new Error('cannot concat in objectMode'))
+ : this.collect().then(buf =>
+ this[OBJECTMODE]
+ ? Promise.reject(new Error('cannot concat in objectMode'))
+ : this[ENCODING] ? buf.join('') : B.concat(buf, buf.dataLength))
+ }
+
+ // stream.promise().then(() => done, er => emitted error)
+ promise () {
+ return new Promise((resolve, reject) => {
+ this.on(DESTROYED, () => reject(new Error('stream destroyed')))
+ this.on('end', () => resolve())
+ this.on('error', er => reject(er))
+ })
+ }
+
+ // for await (let chunk of stream)
+ [ASYNCITERATOR] () {
+ const next = () => {
+ const res = this.read()
+ if (res !== null)
+ return Promise.resolve({ done: false, value: res })
+
+ if (this[EOF])
+ return Promise.resolve({ done: true })
+
+ let resolve = null
+ let reject = null
+ const onerr = er => {
+ this.removeListener('data', ondata)
+ this.removeListener('end', onend)
+ reject(er)
+ }
+ const ondata = value => {
+ this.removeListener('error', onerr)
+ this.removeListener('end', onend)
+ this.pause()
+ resolve({ value: value, done: !!this[EOF] })
+ }
+ const onend = () => {
+ this.removeListener('error', onerr)
+ this.removeListener('data', ondata)
+ resolve({ done: true })
+ }
+ const ondestroy = () => onerr(new Error('stream destroyed'))
+ return new Promise((res, rej) => {
+ reject = rej
+ resolve = res
+ this.once(DESTROYED, ondestroy)
+ this.once('error', onerr)
+ this.once('end', onend)
+ this.once('data', ondata)
+ })
+ }
+
+ return { next }
+ }
+
+ // for (let chunk of stream)
+ [ITERATOR] () {
+ const next = () => {
+ const value = this.read()
+ const done = value === null
+ return { value, done }
+ }
+ return { next }
+ }
+
+ destroy (er) {
+ if (this[DESTROYED]) {
+ if (er)
+ this.emit('error', er)
+ else
+ this.emit(DESTROYED)
+ return this
+ }
+
+ this[DESTROYED] = true
+
+ // throw away all buffered data, it's never coming out
+ this.buffer = new Yallist()
+ this[BUFFERLENGTH] = 0
+
+ if (typeof this.close === 'function' && !this[CLOSED])
+ this.close()
+
+ if (er)
+ this.emit('error', er)
+ else // if no error to emit, still reject pending promises
+ this.emit(DESTROYED)
+
+ return this
+ }
+
+ static isStream (s) {
+ return !!s && (s instanceof Minipass || s instanceof EE && (
+ typeof s.pipe === 'function' || // readable
+ (typeof s.write === 'function' && typeof s.end === 'function') // writable
+ ))
+ }
+}
diff --git a/node_modules/node-gyp/node_modules/minipass/package.json b/node_modules/node-gyp/node_modules/minipass/package.json
new file mode 100644
index 000000000..c0c479817
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minipass/package.json
@@ -0,0 +1,72 @@
+{
+ "_from": "minipass@^2.8.6",
+ "_id": "minipass@2.9.0",
+ "_inBundle": false,
+ "_integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
+ "_location": "/node-gyp/minipass",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "minipass@^2.8.6",
+ "name": "minipass",
+ "escapedName": "minipass",
+ "rawSpec": "^2.8.6",
+ "saveSpec": null,
+ "fetchSpec": "^2.8.6"
+ },
+ "_requiredBy": [
+ "/node-gyp/fs-minipass",
+ "/node-gyp/minizlib",
+ "/node-gyp/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
+ "_shasum": "e713762e7d3e32fed803115cf93e04bca9fcc9a6",
+ "_spec": "minipass@^2.8.6",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/node-gyp/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/minipass/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.0"
+ },
+ "deprecated": false,
+ "description": "minimal implementation of a PassThrough stream",
+ "devDependencies": {
+ "end-of-stream": "^1.4.0",
+ "tap": "^14.6.5",
+ "through2": "^2.0.3"
+ },
+ "files": [
+ "index.js"
+ ],
+ "homepage": "https://github.com/isaacs/minipass#readme",
+ "keywords": [
+ "passthrough",
+ "stream"
+ ],
+ "license": "ISC",
+ "main": "index.js",
+ "name": "minipass",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/minipass.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true
+ },
+ "version": "2.9.0"
+}
diff --git a/node_modules/node-gyp/node_modules/minizlib/LICENSE b/node_modules/node-gyp/node_modules/minizlib/LICENSE
new file mode 100644
index 000000000..ffce7383f
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minizlib/LICENSE
@@ -0,0 +1,26 @@
+Minizlib was created by Isaac Z. Schlueter.
+It is a derivative work of the Node.js project.
+
+"""
+Copyright Isaac Z. Schlueter and Contributors
+Copyright Node.js contributors. All rights reserved.
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
diff --git a/node_modules/node-gyp/node_modules/minizlib/README.md b/node_modules/node-gyp/node_modules/minizlib/README.md
new file mode 100644
index 000000000..4097b8522
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minizlib/README.md
@@ -0,0 +1,53 @@
+# minizlib
+
+A fast zlib stream built on [minipass](http://npm.im/minipass) and
+Node.js's zlib binding.
+
+This module was created to serve the needs of
+[node-tar](http://npm.im/tar) and
+[minipass-fetch](http://npm.im/minipass-fetch).
+
+Brotli is supported in versions of node with a Brotli binding.
+
+## How does this differ from the streams in `require('zlib')`?
+
+First, there are no convenience methods to compress or decompress a
+buffer. If you want those, use the built-in `zlib` module. This is
+only streams. That being said, Minipass streams to make it fairly easy to
+use as one-liners: `new zlib.Deflate().end(data).read()` will return the
+deflate compressed result.
+
+This module compresses and decompresses the data as fast as you feed
+it in. It is synchronous, and runs on the main process thread. Zlib
+and Brotli operations can be high CPU, but they're very fast, and doing it
+this way means much less bookkeeping and artificial deferral.
+
+Node's built in zlib streams are built on top of `stream.Transform`.
+They do the maximally safe thing with respect to consistent
+asynchrony, buffering, and backpressure.
+
+See [Minipass](http://npm.im/minipass) for more on the differences between
+Node.js core streams and Minipass streams, and the convenience methods
+provided by that class.
+
+## Classes
+
+- Deflate
+- Inflate
+- Gzip
+- Gunzip
+- DeflateRaw
+- InflateRaw
+- Unzip
+- BrotliCompress (Node v10 and higher)
+- BrotliDecompress (Node v10 and higher)
+
+## USAGE
+
+```js
+const zlib = require('minizlib')
+const input = sourceOfCompressedData()
+const decode = new zlib.BrotliDecompress()
+const output = whereToWriteTheDecodedData()
+input.pipe(decode).pipe(output)
+```
diff --git a/node_modules/node-gyp/node_modules/minizlib/constants.js b/node_modules/node-gyp/node_modules/minizlib/constants.js
new file mode 100644
index 000000000..641ebc731
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minizlib/constants.js
@@ -0,0 +1,115 @@
+// Update with any zlib constants that are added or changed in the future.
+// Node v6 didn't export this, so we just hard code the version and rely
+// on all the other hard-coded values from zlib v4736. When node v6
+// support drops, we can just export the realZlibConstants object.
+const realZlibConstants = require('zlib').constants ||
+ /* istanbul ignore next */ { ZLIB_VERNUM: 4736 }
+
+module.exports = Object.freeze(Object.assign(Object.create(null), {
+ Z_NO_FLUSH: 0,
+ Z_PARTIAL_FLUSH: 1,
+ Z_SYNC_FLUSH: 2,
+ Z_FULL_FLUSH: 3,
+ Z_FINISH: 4,
+ Z_BLOCK: 5,
+ Z_OK: 0,
+ Z_STREAM_END: 1,
+ Z_NEED_DICT: 2,
+ Z_ERRNO: -1,
+ Z_STREAM_ERROR: -2,
+ Z_DATA_ERROR: -3,
+ Z_MEM_ERROR: -4,
+ Z_BUF_ERROR: -5,
+ Z_VERSION_ERROR: -6,
+ Z_NO_COMPRESSION: 0,
+ Z_BEST_SPEED: 1,
+ Z_BEST_COMPRESSION: 9,
+ Z_DEFAULT_COMPRESSION: -1,
+ Z_FILTERED: 1,
+ Z_HUFFMAN_ONLY: 2,
+ Z_RLE: 3,
+ Z_FIXED: 4,
+ Z_DEFAULT_STRATEGY: 0,
+ DEFLATE: 1,
+ INFLATE: 2,
+ GZIP: 3,
+ GUNZIP: 4,
+ DEFLATERAW: 5,
+ INFLATERAW: 6,
+ UNZIP: 7,
+ BROTLI_DECODE: 8,
+ BROTLI_ENCODE: 9,
+ Z_MIN_WINDOWBITS: 8,
+ Z_MAX_WINDOWBITS: 15,
+ Z_DEFAULT_WINDOWBITS: 15,
+ Z_MIN_CHUNK: 64,
+ Z_MAX_CHUNK: Infinity,
+ Z_DEFAULT_CHUNK: 16384,
+ Z_MIN_MEMLEVEL: 1,
+ Z_MAX_MEMLEVEL: 9,
+ Z_DEFAULT_MEMLEVEL: 8,
+ Z_MIN_LEVEL: -1,
+ Z_MAX_LEVEL: 9,
+ Z_DEFAULT_LEVEL: -1,
+ BROTLI_OPERATION_PROCESS: 0,
+ BROTLI_OPERATION_FLUSH: 1,
+ BROTLI_OPERATION_FINISH: 2,
+ BROTLI_OPERATION_EMIT_METADATA: 3,
+ BROTLI_MODE_GENERIC: 0,
+ BROTLI_MODE_TEXT: 1,
+ BROTLI_MODE_FONT: 2,
+ BROTLI_DEFAULT_MODE: 0,
+ BROTLI_MIN_QUALITY: 0,
+ BROTLI_MAX_QUALITY: 11,
+ BROTLI_DEFAULT_QUALITY: 11,
+ BROTLI_MIN_WINDOW_BITS: 10,
+ BROTLI_MAX_WINDOW_BITS: 24,
+ BROTLI_LARGE_MAX_WINDOW_BITS: 30,
+ BROTLI_DEFAULT_WINDOW: 22,
+ BROTLI_MIN_INPUT_BLOCK_BITS: 16,
+ BROTLI_MAX_INPUT_BLOCK_BITS: 24,
+ BROTLI_PARAM_MODE: 0,
+ BROTLI_PARAM_QUALITY: 1,
+ BROTLI_PARAM_LGWIN: 2,
+ BROTLI_PARAM_LGBLOCK: 3,
+ BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4,
+ BROTLI_PARAM_SIZE_HINT: 5,
+ BROTLI_PARAM_LARGE_WINDOW: 6,
+ BROTLI_PARAM_NPOSTFIX: 7,
+ BROTLI_PARAM_NDIRECT: 8,
+ BROTLI_DECODER_RESULT_ERROR: 0,
+ BROTLI_DECODER_RESULT_SUCCESS: 1,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0,
+ BROTLI_DECODER_PARAM_LARGE_WINDOW: 1,
+ BROTLI_DECODER_NO_ERROR: 0,
+ BROTLI_DECODER_SUCCESS: 1,
+ BROTLI_DECODER_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1,
+ BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5,
+ BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6,
+ BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7,
+ BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10,
+ BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11,
+ BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12,
+ BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15,
+ BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16,
+ BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19,
+ BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21,
+ BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27,
+ BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30,
+ BROTLI_DECODER_ERROR_UNREACHABLE: -31,
+}, realZlibConstants))
diff --git a/node_modules/node-gyp/node_modules/minizlib/index.js b/node_modules/node-gyp/node_modules/minizlib/index.js
new file mode 100644
index 000000000..295047b9c
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minizlib/index.js
@@ -0,0 +1,320 @@
+'use strict'
+
+const assert = require('assert')
+const Buffer = require('buffer').Buffer
+const realZlib = require('zlib')
+
+const constants = exports.constants = require('./constants.js')
+const Minipass = require('minipass')
+
+const OriginalBufferConcat = Buffer.concat
+
+class ZlibError extends Error {
+ constructor (err) {
+ super('zlib: ' + err.message)
+ this.code = err.code
+ this.errno = err.errno
+ /* istanbul ignore if */
+ if (!this.code)
+ this.code = 'ZLIB_ERROR'
+
+ this.message = 'zlib: ' + err.message
+ Error.captureStackTrace(this, this.constructor)
+ }
+
+ get name () {
+ return 'ZlibError'
+ }
+}
+
+// the Zlib class they all inherit from
+// This thing manages the queue of requests, and returns
+// true or false if there is anything in the queue when
+// you call the .write() method.
+const _opts = Symbol('opts')
+const _flushFlag = Symbol('flushFlag')
+const _finishFlushFlag = Symbol('finishFlushFlag')
+const _fullFlushFlag = Symbol('fullFlushFlag')
+const _handle = Symbol('handle')
+const _onError = Symbol('onError')
+const _sawError = Symbol('sawError')
+const _level = Symbol('level')
+const _strategy = Symbol('strategy')
+const _ended = Symbol('ended')
+const _defaultFullFlush = Symbol('_defaultFullFlush')
+
+class ZlibBase extends Minipass {
+ constructor (opts, mode) {
+ if (!opts || typeof opts !== 'object')
+ throw new TypeError('invalid options for ZlibBase constructor')
+
+ super(opts)
+ this[_ended] = false
+ this[_opts] = opts
+
+ this[_flushFlag] = opts.flush
+ this[_finishFlushFlag] = opts.finishFlush
+ // this will throw if any options are invalid for the class selected
+ try {
+ this[_handle] = new realZlib[mode](opts)
+ } catch (er) {
+ // make sure that all errors get decorated properly
+ throw new ZlibError(er)
+ }
+
+ this[_onError] = (err) => {
+ this[_sawError] = true
+ // there is no way to cleanly recover.
+ // continuing only obscures problems.
+ this.close()
+ this.emit('error', err)
+ }
+
+ this[_handle].on('error', er => this[_onError](new ZlibError(er)))
+ this.once('end', () => this.close)
+ }
+
+ close () {
+ if (this[_handle]) {
+ this[_handle].close()
+ this[_handle] = null
+ this.emit('close')
+ }
+ }
+
+ reset () {
+ if (!this[_sawError]) {
+ assert(this[_handle], 'zlib binding closed')
+ return this[_handle].reset()
+ }
+ }
+
+ flush (flushFlag) {
+ if (this.ended)
+ return
+
+ if (typeof flushFlag !== 'number')
+ flushFlag = this[_fullFlushFlag]
+ this.write(Object.assign(Buffer.alloc(0), { [_flushFlag]: flushFlag }))
+ }
+
+ end (chunk, encoding, cb) {
+ if (chunk)
+ this.write(chunk, encoding)
+ this.flush(this[_finishFlushFlag])
+ this[_ended] = true
+ return super.end(null, null, cb)
+ }
+
+ get ended () {
+ return this[_ended]
+ }
+
+ write (chunk, encoding, cb) {
+ // process the chunk using the sync process
+ // then super.write() all the outputted chunks
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+
+ if (typeof chunk === 'string')
+ chunk = Buffer.from(chunk, encoding)
+
+ if (this[_sawError])
+ return
+ assert(this[_handle], 'zlib binding closed')
+
+ // _processChunk tries to .close() the native handle after it's done, so we
+ // intercept that by temporarily making it a no-op.
+ const nativeHandle = this[_handle]._handle
+ const originalNativeClose = nativeHandle.close
+ nativeHandle.close = () => {}
+ const originalClose = this[_handle].close
+ this[_handle].close = () => {}
+ // It also calls `Buffer.concat()` at the end, which may be convenient
+ // for some, but which we are not interested in as it slows us down.
+ Buffer.concat = (args) => args
+ let result
+ try {
+ const flushFlag = typeof chunk[_flushFlag] === 'number'
+ ? chunk[_flushFlag] : this[_flushFlag]
+ result = this[_handle]._processChunk(chunk, flushFlag)
+ // if we don't throw, reset it back how it was
+ Buffer.concat = OriginalBufferConcat
+ } catch (err) {
+ // or if we do, put Buffer.concat() back before we emit error
+ // Error events call into user code, which may call Buffer.concat()
+ Buffer.concat = OriginalBufferConcat
+ this[_onError](new ZlibError(err))
+ } finally {
+ if (this[_handle]) {
+ // Core zlib resets `_handle` to null after attempting to close the
+ // native handle. Our no-op handler prevented actual closure, but we
+ // need to restore the `._handle` property.
+ this[_handle]._handle = nativeHandle
+ nativeHandle.close = originalNativeClose
+ this[_handle].close = originalClose
+ // `_processChunk()` adds an 'error' listener. If we don't remove it
+ // after each call, these handlers start piling up.
+ this[_handle].removeAllListeners('error')
+ }
+ }
+
+ let writeReturn
+ if (result) {
+ if (Array.isArray(result) && result.length > 0) {
+ // The first buffer is always `handle._outBuffer`, which would be
+ // re-used for later invocations; so, we always have to copy that one.
+ writeReturn = super.write(Buffer.from(result[0]))
+ for (let i = 1; i < result.length; i++) {
+ writeReturn = super.write(result[i])
+ }
+ } else {
+ writeReturn = super.write(Buffer.from(result))
+ }
+ }
+
+ if (cb)
+ cb()
+ return writeReturn
+ }
+}
+
+class Zlib extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.Z_NO_FLUSH
+ opts.finishFlush = opts.finishFlush || constants.Z_FINISH
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.Z_FULL_FLUSH
+ this[_level] = opts.level
+ this[_strategy] = opts.strategy
+ }
+
+ params (level, strategy) {
+ if (this[_sawError])
+ return
+
+ if (!this[_handle])
+ throw new Error('cannot switch params when binding is closed')
+
+ // no way to test this without also not supporting params at all
+ /* istanbul ignore if */
+ if (!this[_handle].params)
+ throw new Error('not supported in this implementation')
+
+ if (this[_level] !== level || this[_strategy] !== strategy) {
+ this.flush(constants.Z_SYNC_FLUSH)
+ assert(this[_handle], 'zlib binding closed')
+ // .params() calls .flush(), but the latter is always async in the
+ // core zlib. We override .flush() temporarily to intercept that and
+ // flush synchronously.
+ const origFlush = this[_handle].flush
+ this[_handle].flush = (flushFlag, cb) => {
+ this.flush(flushFlag)
+ cb()
+ }
+ try {
+ this[_handle].params(level, strategy)
+ } finally {
+ this[_handle].flush = origFlush
+ }
+ /* istanbul ignore else */
+ if (this[_handle]) {
+ this[_level] = level
+ this[_strategy] = strategy
+ }
+ }
+ }
+}
+
+// minimal 2-byte header
+class Deflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Deflate')
+ }
+}
+
+class Inflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Inflate')
+ }
+}
+
+// gzip - bigger header, same deflate compression
+class Gzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gzip')
+ }
+}
+
+class Gunzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gunzip')
+ }
+}
+
+// raw - no header
+class DeflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'DeflateRaw')
+ }
+}
+
+class InflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'InflateRaw')
+ }
+}
+
+// auto-detect header.
+class Unzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Unzip')
+ }
+}
+
+class Brotli extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.BROTLI_OPERATION_PROCESS
+ opts.finishFlush = opts.finishFlush || constants.BROTLI_OPERATION_FINISH
+
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.BROTLI_OPERATION_FLUSH
+ }
+}
+
+class BrotliCompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliCompress')
+ }
+}
+
+class BrotliDecompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliDecompress')
+ }
+}
+
+exports.Deflate = Deflate
+exports.Inflate = Inflate
+exports.Gzip = Gzip
+exports.Gunzip = Gunzip
+exports.DeflateRaw = DeflateRaw
+exports.InflateRaw = InflateRaw
+exports.Unzip = Unzip
+/* istanbul ignore else */
+if (typeof realZlib.BrotliCompress === 'function') {
+ exports.BrotliCompress = BrotliCompress
+ exports.BrotliDecompress = BrotliDecompress
+} else {
+ exports.BrotliCompress = exports.BrotliDecompress = class {
+ constructor () {
+ throw new Error('Brotli is not supported in this version of Node.js')
+ }
+ }
+}
diff --git a/node_modules/node-gyp/node_modules/minizlib/package.json b/node_modules/node-gyp/node_modules/minizlib/package.json
new file mode 100644
index 000000000..cb750be92
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/minizlib/package.json
@@ -0,0 +1,71 @@
+{
+ "_from": "minizlib@^1.2.1",
+ "_id": "minizlib@1.3.3",
+ "_inBundle": false,
+ "_integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
+ "_location": "/node-gyp/minizlib",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "minizlib@^1.2.1",
+ "name": "minizlib",
+ "escapedName": "minizlib",
+ "rawSpec": "^1.2.1",
+ "saveSpec": null,
+ "fetchSpec": "^1.2.1"
+ },
+ "_requiredBy": [
+ "/node-gyp/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
+ "_shasum": "2290de96818a34c29551c8a8d301216bd65a861d",
+ "_spec": "minizlib@^1.2.1",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/node-gyp/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/minizlib/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^2.9.0"
+ },
+ "deprecated": false,
+ "description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
+ "devDependencies": {
+ "tap": "^12.0.1"
+ },
+ "files": [
+ "index.js",
+ "constants.js"
+ ],
+ "homepage": "https://github.com/isaacs/minizlib#readme",
+ "keywords": [
+ "zlib",
+ "gzip",
+ "gunzip",
+ "deflate",
+ "inflate",
+ "compression",
+ "zip",
+ "unzip"
+ ],
+ "license": "MIT",
+ "main": "index.js",
+ "name": "minizlib",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/minizlib.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100 -J"
+ },
+ "version": "1.3.3"
+}
diff --git a/node_modules/node-gyp/node_modules/tar/LICENSE b/node_modules/node-gyp/node_modules/tar/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/node-gyp/node_modules/tar/README.md b/node_modules/node-gyp/node_modules/tar/README.md
new file mode 100644
index 000000000..034e4865c
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/README.md
@@ -0,0 +1,954 @@
+# node-tar
+
+[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar)
+
+[Fast](./benchmarks) and full-featured Tar for Node.js
+
+The API is designed to mimic the behavior of `tar(1)` on unix systems.
+If you are familiar with how tar works, most of this will hopefully be
+straightforward for you. If not, then hopefully this module can teach
+you useful unix skills that may come in handy someday :)
+
+## Background
+
+A "tar file" or "tarball" is an archive of file system entries
+(directories, files, links, etc.) The name comes from "tape archive".
+If you run `man tar` on almost any Unix command line, you'll learn
+quite a bit about what it can do, and its history.
+
+Tar has 5 main top-level commands:
+
+* `c` Create an archive
+* `r` Replace entries within an archive
+* `u` Update entries within an archive (ie, replace if they're newer)
+* `t` List out the contents of an archive
+* `x` Extract an archive to disk
+
+The other flags and options modify how this top level function works.
+
+## High-Level API
+
+These 5 functions are the high-level API. All of them have a
+single-character name (for unix nerds familiar with `tar(1)`) as well
+as a long name (for everyone else).
+
+All the high-level functions take the following arguments, all three
+of which are optional and may be omitted.
+
+1. `options` - An optional object specifying various options
+2. `paths` - An array of paths to add or extract
+3. `callback` - Called when the command is completed, if async. (If
+ sync or no file specified, providing a callback throws a
+ `TypeError`.)
+
+If the command is sync (ie, if `options.sync=true`), then the
+callback is not allowed, since the action will be completed immediately.
+
+If a `file` argument is specified, and the command is async, then a
+`Promise` is returned. In this case, if async, a callback may be
+provided which is called when the command is completed.
+
+If a `file` option is not specified, then a stream is returned. For
+`create`, this is a readable stream of the generated archive. For
+`list` and `extract` this is a writable stream that an archive should
+be written into. If a file is not specified, then a callback is not
+allowed, because you're already getting a stream to work with.
+
+`replace` and `update` only work on existing archives, and so require
+a `file` argument.
+
+Sync commands without a file argument return a stream that acts on its
+input immediately in the same tick. For readable streams, this means
+that all of the data is immediately available by calling
+`stream.read()`. For writable streams, it will be acted upon as soon
+as it is provided, but this can be at any time.
+
+### Warnings
+
+Some things cause tar to emit a warning, but should usually not cause
+the entire operation to fail. There are three ways to handle
+warnings:
+
+1. **Ignore them** (default) Invalid entries won't be put in the
+ archive, and invalid entries won't be unpacked. This is usually
+ fine, but can hide failures that you might care about.
+2. **Notice them** Add an `onwarn` function to the options, or listen
+ to the `'warn'` event on any tar stream. The function will get
+ called as `onwarn(message, data)`. Handle as appropriate.
+3. **Explode them.** Set `strict: true` in the options object, and
+ `warn` messages will be emitted as `'error'` events instead. If
+ there's no `error` handler, this causes the program to crash. If
+ used with a promise-returning/callback-taking method, then it'll
+ send the error to the promise/callback.
+
+### Examples
+
+The API mimics the `tar(1)` command line functionality, with aliases
+for more human-readable option and function names. The goal is that
+if you know how to use `tar(1)` in Unix, then you know how to use
+`require('tar')` in JavaScript.
+
+To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
+
+```js
+tar.c(
+ {
+ gzip: <true|gzip options>,
+ file: 'my-tarball.tgz'
+ },
+ ['some', 'files', 'and', 'folders']
+).then(_ => { .. tarball has been created .. })
+```
+
+To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
+
+```js
+tar.c( // or tar.create
+ {
+ gzip: <true|gzip options>
+ },
+ ['some', 'files', 'and', 'folders']
+).pipe(fs.createWriteStream('my-tarball.tgz'))
+```
+
+To replicate `tar xf my-tarball.tgz` you'd do:
+
+```js
+tar.x( // or tar.extract(
+ {
+ file: 'my-tarball.tgz'
+ }
+).then(_=> { .. tarball has been dumped in cwd .. })
+```
+
+To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
+
+```js
+fs.createReadStream('my-tarball.tgz').pipe(
+ tar.x({
+ strip: 1,
+ C: 'some-dir' // alias for cwd:'some-dir', also ok
+ })
+)
+```
+
+To replicate `tar tf my-tarball.tgz`, do this:
+
+```js
+tar.t({
+ file: 'my-tarball.tgz',
+ onentry: entry => { .. do whatever with it .. }
+})
+```
+
+To replicate `cat my-tarball.tgz | tar t` do:
+
+```js
+fs.createReadStream('my-tarball.tgz')
+ .pipe(tar.t())
+ .on('entry', entry => { .. do whatever with it .. })
+```
+
+To do anything synchronous, add `sync: true` to the options. Note
+that sync functions don't take a callback and don't return a promise.
+When the function returns, it's already done. Sync methods without a
+file argument return a sync stream, which flushes immediately. But,
+of course, it still won't be done until you `.end()` it.
+
+To filter entries, add `filter: <function>` to the options.
+Tar-creating methods call the filter with `filter(path, stat)`.
+Tar-reading methods (including extraction) call the filter with
+`filter(path, entry)`. The filter is called in the `this`-context of
+the `Pack` or `Unpack` stream object.
+
+The arguments list to `tar t` and `tar x` specify a list of filenames
+to extract or list, so they're equivalent to a filter that tests if
+the file is in the list.
+
+For those who _aren't_ fans of tar's single-character command names:
+
+```
+tar.c === tar.create
+tar.r === tar.replace (appends to archive, file is required)
+tar.u === tar.update (appends if newer, file is required)
+tar.x === tar.extract
+tar.t === tar.list
+```
+
+Keep reading for all the command descriptions and options, as well as
+the low-level API that they are built on.
+
+### tar.c(options, fileList, callback) [alias: tar.create]
+
+Create a tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Write the tarball archive to the specified filename. If this
+ is specified, then the callback will be fired when the file has been
+ written, and a promise will be returned that resolves when the file
+ is written. If a filename is not specified, then a Readable Stream
+ will be returned which will emit the file data. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`. If this is set,
+ and a file is not provided, then the resulting stream will already
+ have the data ready to `read` or `emit('data')` as soon as you
+ request it.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `mode` The mode to set on the created file archive
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+
+### tar.x(options, fileList, callback) [alias: tar.extract]
+
+Extract a tarball archive.
+
+The `fileList` is an array of paths to extract from the tarball. If
+no paths are provided, then all the entries are extracted.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+Most extraction errors will cause a `warn` event to be emitted. If
+the `cwd` is missing, or not a directory, then the extraction will
+fail completely.
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory. [Alias: `C`]
+- `file` The archive file to extract. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Create files and directories synchronously.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive. [Alias: `keep-newer`,
+ `keep-newer-files`]
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies. [Alias: `k`, `keep-existing`]
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+ [Alias: `P`]
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file. [Alias:
+ `U`]
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks. [Alias: `strip-components`, `stripComponents`]
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+ [Alias: `p`]
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries. [Alias: `m`, `no-mtime`]
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync extractions.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### tar.t(options, fileList, callback) [alias: tar.list]
+
+List the contents of a tarball archive.
+
+The `fileList` is an array of paths to list from the tarball. If
+no paths are provided, then all the entries are listed.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
+events. (If you want to get actual readable entries, use the
+`tar.Parse` class instead.)
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. [Alias: `C`]
+- `file` The archive file to list. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Read the specified file synchronously. (This has no effect
+ when a file option isn't specified, because entries are emitted as
+ fast as they are parsed from the stream anyway.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter. This is important for when both `file` and
+ `sync` are set, because it will be called synchronously.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noResume` By default, `entry` streams are resumed immediately after
+ the call to `onentry`. Set `noResume: true` to suppress this
+ behavior. Note that by opting into this, the stream will never
+ complete until the entry data is consumed.
+
+### tar.u(options, fileList, callback) [alias: tar.update]
+
+Add files to an archive if they are newer than the entry already in
+the tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+### tar.r(options, fileList, callback) [alias: tar.replace]
+
+Add files to an existing archive. Because later entries override
+earlier entries, this effectively replaces any existing entries.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+## Low-Level API
+
+### class tar.Pack
+
+A readable tar stream.
+
+Has all the standard readable stream interface stuff. `'data'` and
+`'end'` events, `read()` method, `pause()` and `resume()`, etc.
+
+#### constructor(options)
+
+The following options are supported:
+
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()`
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories.
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+#### add(path)
+
+Adds an entry to the archive. Returns the Pack stream.
+
+#### write(path)
+
+Adds an entry to the archive. Returns true if flushed.
+
+#### end()
+
+Finishes the archive.
+
+### class tar.Pack.Sync
+
+Synchronous version of `tar.Pack`.
+
+### class tar.Unpack
+
+A writable stream that unpacks a tar archive onto the file system.
+
+All the normal writable stream stuff is supported. `write()` and
+`end()` methods, `'drain'` events, etc.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+`'close'` is emitted when it's done writing stuff to the file system.
+
+Most unpack errors will cause a `warn` event to be emitted. If the
+`cwd` is missing, or not a directory, then an error will be emitted.
+
+#### constructor(options)
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive.
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies.
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file.
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+- `win32` True if on a windows platform. Causes behavior where
+ filenames containing `<|>?` chars are converted to
+ windows-compatible values while being unpacked.
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries.
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+
+### class tar.Unpack.Sync
+
+Synchronous version of `tar.Unpack`.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync unpack streams.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### class tar.Parse
+
+A writable stream that parses a tar archive stream. All the standard
+writable stream stuff is supported.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Emits `'entry'` events with `tar.ReadEntry` objects, which are
+themselves readable streams that you can pipe wherever.
+
+Each `entry` will not emit until the one before it is flushed through,
+so make sure to either consume the data (with `on('data', ...)` or
+`.pipe(...)`) or throw it away with `.resume()` to keep the stream
+flowing.
+
+#### constructor(options)
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects.
+
+The following options are supported:
+
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+
+#### abort(message, error)
+
+Stop all parsing activities. This is called when there are zlib
+errors. It also emits a warning with the message and error provided.
+
+### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being read out of a tar archive.
+
+It has the following fields:
+
+- `extended` The extended metadata object provided to the constructor.
+- `globalExtended` The global extended metadata object provided to the
+ constructor.
+- `remain` The number of bytes remaining to be written into the
+ stream.
+- `blockRemain` The number of 512-byte blocks remaining to be written
+ into the stream.
+- `ignore` Whether this entry should be ignored.
+- `meta` True if this represents metadata about the next entry, false
+ if it represents a filesystem object.
+- All the fields from the header, extended header, and global extended
+ header are added to the ReadEntry object. So it has `path`, `type`,
+ `size, `mode`, and so on.
+
+#### constructor(header, extended, globalExtended)
+
+Create a new ReadEntry object with the specified header, extended
+header, and global extended header values.
+
+### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being written from the file
+system into a tar archive.
+
+Emits data for the Header, and for the Pax Extended Header if one is
+required, as well as any body data.
+
+Creating a WriteEntry for a directory does not also create
+WriteEntry objects for all of the directory contents.
+
+It has the following fields:
+
+- `path` The path field that will be written to the archive. By
+ default, this is also the path from the cwd to the file system
+ object.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `myuid` If supported, the uid of the user running the current
+ process.
+- `myuser` The `env.USER` string if set, or `''`. Set as the entry
+ `uname` field if the file's `uid` matches `this.myuid`.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/` and filenames containing the windows-compatible
+ forms of `<|>?:` characters are converted to actual `<|>?:` characters
+ in the archive.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+
+#### constructor(path, options)
+
+`path` is the path of the entry as it is written in the archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `umask` Set to restrict the modes on the entries in the archive,
+ somewhat like how umask works on file creation. Defaults to
+ `process.umask()` on unix systems, or `0o22` on Windows.
+
+#### warn(message, data)
+
+If strict, emit an error with the provided message.
+
+Othewise, emit a `'warn'` event with the provided message and data.
+
+### class tar.WriteEntry.Sync
+
+Synchronous version of tar.WriteEntry
+
+### class tar.WriteEntry.Tar
+
+A version of tar.WriteEntry that gets its data from a tar.ReadEntry
+instead of from the filesystem.
+
+#### constructor(readEntry, options)
+
+`readEntry` is the entry being read out of another archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+### class tar.Header
+
+A class for reading and writing header blocks.
+
+It has the following fields:
+
+- `nullBlock` True if decoding a block which is entirely composed of
+ `0x00` null bytes. (Useful because tar files are terminated by
+ at least 2 null blocks.)
+- `cksumValid` True if the checksum in the header is valid, false
+ otherwise.
+- `needPax` True if the values, as encoded, will require a Pax
+ extended header.
+- `path` The path of the entry.
+- `mode` The 4 lowest-order octal digits of the file mode. That is,
+ read/write/execute permissions for world, group, and owner, and the
+ setuid, setgid, and sticky bits.
+- `uid` Numeric user id of the file owner
+- `gid` Numeric group id of the file owner
+- `size` Size of the file in bytes
+- `mtime` Modified time of the file
+- `cksum` The checksum of the header. This is generated by adding all
+ the bytes of the header block, treating the checksum field itself as
+ all ascii space characters (that is, `0x20`).
+- `type` The human-readable name of the type of entry this represents,
+ or the alphanumeric key if unknown.
+- `typeKey` The alphanumeric key for the type of entry this header
+ represents.
+- `linkpath` The target of Link and SymbolicLink entries.
+- `uname` Human-readable user name of the file owner
+- `gname` Human-readable group name of the file owner
+- `devmaj` The major portion of the device number. Always `0` for
+ files, directories, and links.
+- `devmin` The minor portion of the device number. Always `0` for
+ files, directories, and links.
+- `atime` File access time.
+- `ctime` File change time.
+
+#### constructor(data, [offset=0])
+
+`data` is optional. It is either a Buffer that should be interpreted
+as a tar Header starting at the specified offset and continuing for
+512 bytes, or a data object of keys and values to set on the header
+object, and eventually encode as a tar Header.
+
+#### decode(block, offset)
+
+Decode the provided buffer starting at the specified offset.
+
+Buffer length must be greater than 512 bytes.
+
+#### set(data)
+
+Set the fields in the data object.
+
+#### encode(buffer, offset)
+
+Encode the header fields into the buffer at the specified offset.
+
+Returns `this.needPax` to indicate whether a Pax Extended Header is
+required to properly encode the specified data.
+
+### class tar.Pax
+
+An object representing a set of key-value pairs in an Pax extended
+header entry.
+
+It has the following fields. Where the same name is used, they have
+the same semantics as the tar.Header field of the same name.
+
+- `global` True if this represents a global extended header, or false
+ if it is for a single entry.
+- `atime`
+- `charset`
+- `comment`
+- `ctime`
+- `gid`
+- `gname`
+- `linkpath`
+- `mtime`
+- `path`
+- `size`
+- `uid`
+- `uname`
+- `dev`
+- `ino`
+- `nlink`
+
+#### constructor(object, global)
+
+Set the fields set in the object. `global` is a boolean that defaults
+to false.
+
+#### encode()
+
+Return a Buffer containing the header and body for the Pax extended
+header entry, or `null` if there is nothing to encode.
+
+#### encodeBody()
+
+Return a string representing the body of the pax extended header
+entry.
+
+#### encodeField(fieldName)
+
+Return a string representing the key/value encoding for the specified
+fieldName, or `''` if the field is unset.
+
+### tar.Pax.parse(string, extended, global)
+
+Return a new Pax object created by parsing the contents of the string
+provided.
+
+If the `extended` object is set, then also add the fields from that
+object. (This is necessary because multiple metadata entries can
+occur in sequence.)
+
+### tar.types
+
+A translation table for the `type` field in tar headers.
+
+#### tar.types.name.get(code)
+
+Get the human-readable name for a given alphanumeric code.
+
+#### tar.types.code.get(name)
+
+Get the alphanumeric code for a given human-readable name.
diff --git a/node_modules/node-gyp/node_modules/tar/index.js b/node_modules/node-gyp/node_modules/tar/index.js
new file mode 100644
index 000000000..c9ae06e79
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/index.js
@@ -0,0 +1,18 @@
+'use strict'
+
+// high-level commands
+exports.c = exports.create = require('./lib/create.js')
+exports.r = exports.replace = require('./lib/replace.js')
+exports.t = exports.list = require('./lib/list.js')
+exports.u = exports.update = require('./lib/update.js')
+exports.x = exports.extract = require('./lib/extract.js')
+
+// classes
+exports.Pack = require('./lib/pack.js')
+exports.Unpack = require('./lib/unpack.js')
+exports.Parse = require('./lib/parse.js')
+exports.ReadEntry = require('./lib/read-entry.js')
+exports.WriteEntry = require('./lib/write-entry.js')
+exports.Header = require('./lib/header.js')
+exports.Pax = require('./lib/pax.js')
+exports.types = require('./lib/types.js')
diff --git a/node_modules/tar/lib/buffer.js b/node_modules/node-gyp/node_modules/tar/lib/buffer.js
index 7876d5b3e..7876d5b3e 100644
--- a/node_modules/tar/lib/buffer.js
+++ b/node_modules/node-gyp/node_modules/tar/lib/buffer.js
diff --git a/node_modules/node-gyp/node_modules/tar/lib/create.js b/node_modules/node-gyp/node_modules/tar/lib/create.js
new file mode 100644
index 000000000..a37aa52e6
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/create.js
@@ -0,0 +1,105 @@
+'use strict'
+
+// tar -c
+const hlo = require('./high-level-opt.js')
+
+const Pack = require('./pack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+const c = module.exports = (opt_, files, cb) => {
+ if (typeof files === 'function')
+ cb = files
+
+ if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ return opt.file && opt.sync ? createFileSync(opt, files)
+ : opt.file ? createFile(opt, files, cb)
+ : opt.sync ? createSync(opt, files)
+ : create(opt, files)
+}
+
+const createFileSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const createFile = (opt, files, cb) => {
+ const p = new Pack(opt)
+ const stream = new fsm.WriteStream(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+
+ const promise = new Promise((res, rej) => {
+ stream.on('error', rej)
+ stream.on('close', res)
+ p.on('error', rej)
+ })
+
+ addFilesAsync(p, files)
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
+
+const createSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ addFilesSync(p, files)
+ return p
+}
+
+const create = (opt, files) => {
+ const p = new Pack(opt)
+ addFilesAsync(p, files)
+ return p
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/extract.js b/node_modules/node-gyp/node_modules/tar/lib/extract.js
new file mode 100644
index 000000000..cbb458a0a
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/extract.js
@@ -0,0 +1,112 @@
+'use strict'
+
+// tar -x
+const hlo = require('./high-level-opt.js')
+const Unpack = require('./unpack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const x = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ return opt.file && opt.sync ? extractFileSync(opt)
+ : opt.file ? extractFile(opt, cb)
+ : opt.sync ? extractSync(opt)
+ : extract(opt)
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const extractFileSync = opt => {
+ const u = new Unpack.Sync(opt)
+
+ const file = opt.file
+ let threw = true
+ let fd
+ const stat = fs.statSync(file)
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ const readSize = opt.maxReadSize || 16*1024*1024
+ const stream = new fsm.ReadStreamSync(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.pipe(u)
+}
+
+const extractFile = (opt, cb) => {
+ const u = new Unpack(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ u.on('error', reject)
+ u.on('close', resolve)
+
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(u)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const extractSync = opt => {
+ return new Unpack.Sync(opt)
+}
+
+const extract = opt => {
+ return new Unpack(opt)
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/header.js b/node_modules/node-gyp/node_modules/tar/lib/header.js
new file mode 100644
index 000000000..d29c3b990
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/header.js
@@ -0,0 +1,289 @@
+'use strict'
+// parse a 512-byte header block to a data object, or vice-versa
+// encode returns `true` if a pax extended header is needed, because
+// the data could not be faithfully encoded in a simple header.
+// (Also, check header.needPax to see if it needs a pax header.)
+
+const Buffer = require('./buffer.js')
+const types = require('./types.js')
+const pathModule = require('path').posix
+const large = require('./large-numbers.js')
+
+const SLURP = Symbol('slurp')
+const TYPE = Symbol('type')
+
+class Header {
+ constructor (data, off, ex, gex) {
+ this.cksumValid = false
+ this.needPax = false
+ this.nullBlock = false
+
+ this.block = null
+ this.path = null
+ this.mode = null
+ this.uid = null
+ this.gid = null
+ this.size = null
+ this.mtime = null
+ this.cksum = null
+ this[TYPE] = '0'
+ this.linkpath = null
+ this.uname = null
+ this.gname = null
+ this.devmaj = 0
+ this.devmin = 0
+ this.atime = null
+ this.ctime = null
+
+ if (Buffer.isBuffer(data))
+ this.decode(data, off || 0, ex, gex)
+ else if (data)
+ this.set(data)
+ }
+
+ decode (buf, off, ex, gex) {
+ if (!off)
+ off = 0
+
+ if (!buf || !(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ this.path = decString(buf, off, 100)
+ this.mode = decNumber(buf, off + 100, 8)
+ this.uid = decNumber(buf, off + 108, 8)
+ this.gid = decNumber(buf, off + 116, 8)
+ this.size = decNumber(buf, off + 124, 12)
+ this.mtime = decDate(buf, off + 136, 12)
+ this.cksum = decNumber(buf, off + 148, 12)
+
+ // if we have extended or global extended headers, apply them now
+ // See https://github.com/npm/node-tar/pull/187
+ this[SLURP](ex)
+ this[SLURP](gex, true)
+
+ // old tar versions marked dirs as a file with a trailing /
+ this[TYPE] = decString(buf, off + 156, 1)
+ if (this[TYPE] === '')
+ this[TYPE] = '0'
+ if (this[TYPE] === '0' && this.path.substr(-1) === '/')
+ this[TYPE] = '5'
+
+ // tar implementations sometimes incorrectly put the stat(dir).size
+ // as the size in the tarball, even though Directory entries are
+ // not able to have any body at all. In the very rare chance that
+ // it actually DOES have a body, we weren't going to do anything with
+ // it anyway, and it'll just be a warning about an invalid header.
+ if (this[TYPE] === '5')
+ this.size = 0
+
+ this.linkpath = decString(buf, off + 157, 100)
+ if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
+ this.uname = decString(buf, off + 265, 32)
+ this.gname = decString(buf, off + 297, 32)
+ this.devmaj = decNumber(buf, off + 329, 8)
+ this.devmin = decNumber(buf, off + 337, 8)
+ if (buf[off + 475] !== 0) {
+ // definitely a prefix, definitely >130 chars.
+ const prefix = decString(buf, off + 345, 155)
+ this.path = prefix + '/' + this.path
+ } else {
+ const prefix = decString(buf, off + 345, 130)
+ if (prefix)
+ this.path = prefix + '/' + this.path
+ this.atime = decDate(buf, off + 476, 12)
+ this.ctime = decDate(buf, off + 488, 12)
+ }
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksumValid = sum === this.cksum
+ if (this.cksum === null && sum === 8 * 0x20)
+ this.nullBlock = true
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+
+ encode (buf, off) {
+ if (!buf) {
+ buf = this.block = Buffer.alloc(512)
+ off = 0
+ }
+
+ if (!off)
+ off = 0
+
+ if (!(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ const prefixSize = this.ctime || this.atime ? 130 : 155
+ const split = splitPrefix(this.path || '', prefixSize)
+ const path = split[0]
+ const prefix = split[1]
+ this.needPax = split[2]
+
+ this.needPax = encString(buf, off, 100, path) || this.needPax
+ this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
+ this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
+ this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
+ this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
+ this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
+ buf[off + 156] = this[TYPE].charCodeAt(0)
+ this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
+ buf.write('ustar\u000000', off + 257, 8)
+ this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
+ this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
+ this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
+ this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
+ this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
+ if (buf[off + 475] !== 0)
+ this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
+ else {
+ this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
+ this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
+ this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksum = sum
+ encNumber(buf, off + 148, 8, this.cksum)
+ this.cksumValid = true
+
+ return this.needPax
+ }
+
+ set (data) {
+ for (let i in data) {
+ if (data[i] !== null && data[i] !== undefined)
+ this[i] = data[i]
+ }
+ }
+
+ get type () {
+ return types.name.get(this[TYPE]) || this[TYPE]
+ }
+
+ get typeKey () {
+ return this[TYPE]
+ }
+
+ set type (type) {
+ if (types.code.has(type))
+ this[TYPE] = types.code.get(type)
+ else
+ this[TYPE] = type
+ }
+}
+
+const splitPrefix = (p, prefixSize) => {
+ const pathSize = 100
+ let pp = p
+ let prefix = ''
+ let ret
+ const root = pathModule.parse(p).root || '.'
+
+ if (Buffer.byteLength(pp) < pathSize)
+ ret = [pp, prefix, false]
+ else {
+ // first set prefix to the dir, and path to the base
+ prefix = pathModule.dirname(pp)
+ pp = pathModule.basename(pp)
+
+ do {
+ // both fit!
+ if (Buffer.byteLength(pp) <= pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp, prefix, false]
+
+ // prefix fits in prefix, but path doesn't fit in path
+ else if (Buffer.byteLength(pp) > pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp.substr(0, pathSize - 1), prefix, true]
+
+ else {
+ // make path take a bit from prefix
+ pp = pathModule.join(pathModule.basename(prefix), pp)
+ prefix = pathModule.dirname(prefix)
+ }
+ } while (prefix !== root && !ret)
+
+ // at this point, found no resolution, just truncate
+ if (!ret)
+ ret = [p.substr(0, pathSize - 1), '', true]
+ }
+ return ret
+}
+
+const decString = (buf, off, size) =>
+ buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
+
+const decDate = (buf, off, size) =>
+ numToDate(decNumber(buf, off, size))
+
+const numToDate = num => num === null ? null : new Date(num * 1000)
+
+const decNumber = (buf, off, size) =>
+ buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
+ : decSmallNumber(buf, off, size)
+
+const nanNull = value => isNaN(value) ? null : value
+
+const decSmallNumber = (buf, off, size) =>
+ nanNull(parseInt(
+ buf.slice(off, off + size)
+ .toString('utf8').replace(/\0.*$/, '').trim(), 8))
+
+// the maximum encodable as a null-terminated octal, by field size
+const MAXNUM = {
+ 12: 0o77777777777,
+ 8 : 0o7777777
+}
+
+const encNumber = (buf, off, size, number) =>
+ number === null ? false :
+ number > MAXNUM[size] || number < 0
+ ? (large.encode(number, buf.slice(off, off + size)), true)
+ : (encSmallNumber(buf, off, size, number), false)
+
+const encSmallNumber = (buf, off, size, number) =>
+ buf.write(octalString(number, size), off, size, 'ascii')
+
+const octalString = (number, size) =>
+ padOctal(Math.floor(number).toString(8), size)
+
+const padOctal = (string, size) =>
+ (string.length === size - 1 ? string
+ : new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
+
+const encDate = (buf, off, size, date) =>
+ date === null ? false :
+ encNumber(buf, off, size, date.getTime() / 1000)
+
+// enough to fill the longest string we've got
+const NULLS = new Array(156).join('\0')
+// pad with nulls, return true if it's longer or non-ascii
+const encString = (buf, off, size, string) =>
+ string === null ? false :
+ (buf.write(string + NULLS, off, size, 'utf8'),
+ string.length !== Buffer.byteLength(string) || string.length > size)
+
+module.exports = Header
diff --git a/node_modules/node-gyp/node_modules/tar/lib/high-level-opt.js b/node_modules/node-gyp/node_modules/tar/lib/high-level-opt.js
new file mode 100644
index 000000000..7333db915
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/high-level-opt.js
@@ -0,0 +1,29 @@
+'use strict'
+
+// turn tar(1) style args like `C` into the more verbose things like `cwd`
+
+const argmap = new Map([
+ ['C', 'cwd'],
+ ['f', 'file'],
+ ['z', 'gzip'],
+ ['P', 'preservePaths'],
+ ['U', 'unlink'],
+ ['strip-components', 'strip'],
+ ['stripComponents', 'strip'],
+ ['keep-newer', 'newer'],
+ ['keepNewer', 'newer'],
+ ['keep-newer-files', 'newer'],
+ ['keepNewerFiles', 'newer'],
+ ['k', 'keep'],
+ ['keep-existing', 'keep'],
+ ['keepExisting', 'keep'],
+ ['m', 'noMtime'],
+ ['no-mtime', 'noMtime'],
+ ['p', 'preserveOwner'],
+ ['L', 'follow'],
+ ['h', 'follow']
+])
+
+const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [
+ argmap.has(k) ? argmap.get(k) : k, opt[k]
+]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/large-numbers.js b/node_modules/node-gyp/node_modules/tar/lib/large-numbers.js
new file mode 100644
index 000000000..3e5c99255
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/large-numbers.js
@@ -0,0 +1,97 @@
+'use strict'
+// Tar can encode large and negative numbers using a leading byte of
+// 0xff for negative, and 0x80 for positive.
+
+const encode = exports.encode = (num, buf) => {
+ if (!Number.isSafeInteger(num))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw TypeError('cannot encode number outside of javascript safe integer range')
+ else if (num < 0)
+ encodeNegative(num, buf)
+ else
+ encodePositive(num, buf)
+ return buf
+}
+
+const encodePositive = (num, buf) => {
+ buf[0] = 0x80
+
+ for (var i = buf.length; i > 1; i--) {
+ buf[i-1] = num & 0xff
+ num = Math.floor(num / 0x100)
+ }
+}
+
+const encodeNegative = (num, buf) => {
+ buf[0] = 0xff
+ var flipped = false
+ num = num * -1
+ for (var i = buf.length; i > 1; i--) {
+ var byte = num & 0xff
+ num = Math.floor(num / 0x100)
+ if (flipped)
+ buf[i-1] = onesComp(byte)
+ else if (byte === 0)
+ buf[i-1] = 0
+ else {
+ flipped = true
+ buf[i-1] = twosComp(byte)
+ }
+ }
+}
+
+const parse = exports.parse = (buf) => {
+ var post = buf[buf.length - 1]
+ var pre = buf[0]
+ var value;
+ if (pre === 0x80)
+ value = pos(buf.slice(1, buf.length))
+ else if (pre === 0xff)
+ value = twos(buf)
+ else
+ throw TypeError('invalid base256 encoding')
+
+ if (!Number.isSafeInteger(value))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw TypeError('parsed number outside of javascript safe integer range')
+
+ return value
+}
+
+const twos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ var flipped = false
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ var f
+ if (flipped)
+ f = onesComp(byte)
+ else if (byte === 0)
+ f = byte
+ else {
+ flipped = true
+ f = twosComp(byte)
+ }
+ if (f !== 0)
+ sum -= f * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const pos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ if (byte !== 0)
+ sum += byte * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const onesComp = byte => (0xff ^ byte) & 0xff
+
+const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
diff --git a/node_modules/node-gyp/node_modules/tar/lib/list.js b/node_modules/node-gyp/node_modules/tar/lib/list.js
new file mode 100644
index 000000000..250ebe001
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/list.js
@@ -0,0 +1,130 @@
+'use strict'
+
+const Buffer = require('./buffer.js')
+
+// XXX: This shares a lot in common with extract.js
+// maybe some DRY opportunity here?
+
+// tar -t
+const hlo = require('./high-level-opt.js')
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const t = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ if (!opt.noResume)
+ onentryFunction(opt)
+
+ return opt.file && opt.sync ? listFileSync(opt)
+ : opt.file ? listFile(opt, cb)
+ : list(opt)
+}
+
+const onentryFunction = opt => {
+ const onentry = opt.onentry
+ opt.onentry = onentry ? e => {
+ onentry(e)
+ e.resume()
+ } : e => e.resume()
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const listFileSync = opt => {
+ const p = list(opt)
+ const file = opt.file
+ let threw = true
+ let fd
+ try {
+ const stat = fs.statSync(file)
+ const readSize = opt.maxReadSize || 16*1024*1024
+ if (stat.size < readSize) {
+ p.end(fs.readFileSync(file))
+ } else {
+ let pos = 0
+ const buf = Buffer.allocUnsafe(readSize)
+ fd = fs.openSync(file, 'r')
+ while (pos < stat.size) {
+ let bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
+ pos += bytesRead
+ p.write(buf.slice(0, bytesRead))
+ }
+ p.end()
+ }
+ threw = false
+ } finally {
+ if (threw && fd)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const listFile = (opt, cb) => {
+ const parse = new Parser(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ parse.on('error', reject)
+ parse.on('end', resolve)
+
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(parse)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const list = opt => new Parser(opt)
diff --git a/node_modules/node-gyp/node_modules/tar/lib/mkdir.js b/node_modules/node-gyp/node_modules/tar/lib/mkdir.js
new file mode 100644
index 000000000..c6a154c24
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/mkdir.js
@@ -0,0 +1,206 @@
+'use strict'
+// wrapper around mkdirp for tar's needs.
+
+// TODO: This should probably be a class, not functionally
+// passing around state in a gazillion args.
+
+const mkdirp = require('mkdirp')
+const fs = require('fs')
+const path = require('path')
+const chownr = require('chownr')
+
+class SymlinkError extends Error {
+ constructor (symlink, path) {
+ super('Cannot extract through symbolic link')
+ this.path = path
+ this.symlink = symlink
+ }
+
+ get name () {
+ return 'SylinkError'
+ }
+}
+
+class CwdError extends Error {
+ constructor (path, code) {
+ super(code + ': Cannot cd into \'' + path + '\'')
+ this.path = path
+ this.code = code
+ }
+
+ get name () {
+ return 'CwdError'
+ }
+}
+
+const mkdir = module.exports = (dir, opt, cb) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (er, created) => {
+ if (er)
+ cb(er)
+ else {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr(created, uid, gid, er => done(er))
+ else if (needChmod)
+ fs.chmod(dir, mode, cb)
+ else
+ cb()
+ }
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd)
+ return fs.stat(dir, (er, st) => {
+ if (er || !st.isDirectory())
+ er = new CwdError(dir, er && er.code || 'ENOTDIR')
+ done(er)
+ })
+
+ if (preserve)
+ return mkdirp(dir, mode, done)
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
+}
+
+const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
+ if (!parts.length)
+ return cb(null, created)
+ const p = parts.shift()
+ const part = base + '/' + p
+ if (cache.get(part))
+ return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+}
+
+const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
+ if (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return cb(new CwdError(cwd, er.code))
+
+ fs.lstat(part, (statEr, st) => {
+ if (statEr)
+ cb(statEr)
+ else if (st.isDirectory())
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ else if (unlink)
+ fs.unlink(part, er => {
+ if (er)
+ return cb(er)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+ })
+ else if (st.isSymbolicLink())
+ return cb(new SymlinkError(part, part + '/' + parts.join('/')))
+ else
+ cb(er)
+ })
+ } else {
+ created = created || part
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ }
+}
+
+const mkdirSync = module.exports.sync = (dir, opt) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (created) => {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr.sync(created, uid, gid)
+ if (needChmod)
+ fs.chmodSync(dir, mode)
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd) {
+ let ok = false
+ let code = 'ENOTDIR'
+ try {
+ ok = fs.statSync(dir).isDirectory()
+ } catch (er) {
+ code = er.code
+ } finally {
+ if (!ok)
+ throw new CwdError(dir, code)
+ }
+ done()
+ return
+ }
+
+ if (preserve)
+ return done(mkdirp.sync(dir, mode))
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ let created = null
+ for (let p = parts.shift(), part = cwd;
+ p && (part += '/' + p);
+ p = parts.shift()) {
+
+ if (cache.get(part))
+ continue
+
+ try {
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ } catch (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return new CwdError(cwd, er.code)
+
+ const st = fs.lstatSync(part)
+ if (st.isDirectory()) {
+ cache.set(part, true)
+ continue
+ } else if (unlink) {
+ fs.unlinkSync(part)
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ continue
+ } else if (st.isSymbolicLink())
+ return new SymlinkError(part, part + '/' + parts.join('/'))
+ }
+ }
+
+ return done(created)
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/mode-fix.js b/node_modules/node-gyp/node_modules/tar/lib/mode-fix.js
new file mode 100644
index 000000000..3363a3b15
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/mode-fix.js
@@ -0,0 +1,14 @@
+'use strict'
+module.exports = (mode, isDir) => {
+ mode &= 0o7777
+ // if dirs are readable, then they should be listable
+ if (isDir) {
+ if (mode & 0o400)
+ mode |= 0o100
+ if (mode & 0o40)
+ mode |= 0o10
+ if (mode & 0o4)
+ mode |= 0o1
+ }
+ return mode
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/pack.js b/node_modules/node-gyp/node_modules/tar/lib/pack.js
new file mode 100644
index 000000000..857cea910
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/pack.js
@@ -0,0 +1,404 @@
+'use strict'
+
+const Buffer = require('./buffer.js')
+
+// A readable tar stream creator
+// Technically, this is a transform stream that you write paths into,
+// and tar format comes out of.
+// The `add()` method is like `write()` but returns this,
+// and end() return `this` as well, so you can
+// do `new Pack(opt).add('files').add('dir').end().pipe(output)
+// You could also do something like:
+// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
+
+class PackJob {
+ constructor (path, absolute) {
+ this.path = path || './'
+ this.absolute = absolute
+ this.entry = null
+ this.stat = null
+ this.readdir = null
+ this.pending = false
+ this.ignore = false
+ this.piped = false
+ }
+}
+
+const MiniPass = require('minipass')
+const zlib = require('minizlib')
+const ReadEntry = require('./read-entry.js')
+const WriteEntry = require('./write-entry.js')
+const WriteEntrySync = WriteEntry.Sync
+const WriteEntryTar = WriteEntry.Tar
+const Yallist = require('yallist')
+const EOF = Buffer.alloc(1024)
+const ONSTAT = Symbol('onStat')
+const ENDED = Symbol('ended')
+const QUEUE = Symbol('queue')
+const CURRENT = Symbol('current')
+const PROCESS = Symbol('process')
+const PROCESSING = Symbol('processing')
+const PROCESSJOB = Symbol('processJob')
+const JOBS = Symbol('jobs')
+const JOBDONE = Symbol('jobDone')
+const ADDFSENTRY = Symbol('addFSEntry')
+const ADDTARENTRY = Symbol('addTarEntry')
+const STAT = Symbol('stat')
+const READDIR = Symbol('readdir')
+const ONREADDIR = Symbol('onreaddir')
+const PIPE = Symbol('pipe')
+const ENTRY = Symbol('entry')
+const ENTRYOPT = Symbol('entryOpt')
+const WRITEENTRYCLASS = Symbol('writeEntryClass')
+const WRITE = Symbol('write')
+const ONDRAIN = Symbol('ondrain')
+
+const fs = require('fs')
+const path = require('path')
+const warner = require('./warn-mixin.js')
+
+const Pack = warner(class Pack extends MiniPass {
+ constructor (opt) {
+ super(opt)
+ opt = opt || Object.create(null)
+ this.opt = opt
+ this.cwd = opt.cwd || process.cwd()
+ this.maxReadSize = opt.maxReadSize
+ this.preservePaths = !!opt.preservePaths
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.readdirCache = opt.readdirCache || new Map()
+
+ this[WRITEENTRYCLASS] = WriteEntry
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ this.zip = null
+ if (opt.gzip) {
+ if (typeof opt.gzip !== 'object')
+ opt.gzip = {}
+ this.zip = new zlib.Gzip(opt.gzip)
+ this.zip.on('data', chunk => super.write(chunk))
+ this.zip.on('end', _ => super.end())
+ this.zip.on('drain', _ => this[ONDRAIN]())
+ this.on('resume', _ => this.zip.resume())
+ } else
+ this.on('drain', this[ONDRAIN])
+
+ this.portable = !!opt.portable
+ this.noDirRecurse = !!opt.noDirRecurse
+ this.follow = !!opt.follow
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
+
+ this[QUEUE] = new Yallist
+ this[JOBS] = 0
+ this.jobs = +opt.jobs || 4
+ this[PROCESSING] = false
+ this[ENDED] = false
+ }
+
+ [WRITE] (chunk) {
+ return super.write(chunk)
+ }
+
+ add (path) {
+ this.write(path)
+ return this
+ }
+
+ end (path) {
+ if (path)
+ this.write(path)
+ this[ENDED] = true
+ this[PROCESS]()
+ return this
+ }
+
+ write (path) {
+ if (this[ENDED])
+ throw new Error('write after end')
+
+ if (path instanceof ReadEntry)
+ this[ADDTARENTRY](path)
+ else
+ this[ADDFSENTRY](path)
+ return this.flowing
+ }
+
+ [ADDTARENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p.path)
+ if (this.prefix)
+ p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
+
+ // in this case, we don't have to wait for the stat
+ if (!this.filter(p.path, p))
+ p.resume()
+ else {
+ const job = new PackJob(p.path, absolute, false)
+ job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
+ job.entry.on('end', _ => this[JOBDONE](job))
+ this[JOBS] += 1
+ this[QUEUE].push(job)
+ }
+
+ this[PROCESS]()
+ }
+
+ [ADDFSENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p)
+ if (this.prefix)
+ p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
+
+ this[QUEUE].push(new PackJob(p, absolute))
+ this[PROCESS]()
+ }
+
+ [STAT] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ const stat = this.follow ? 'stat' : 'lstat'
+ fs[stat](job.absolute, (er, stat) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ this.emit('error', er)
+ else
+ this[ONSTAT](job, stat)
+ })
+ }
+
+ [ONSTAT] (job, stat) {
+ this.statCache.set(job.absolute, stat)
+ job.stat = stat
+
+ // now we have the stat, we can filter it.
+ if (!this.filter(job.path, stat))
+ job.ignore = true
+
+ this[PROCESS]()
+ }
+
+ [READDIR] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ fs.readdir(job.absolute, (er, entries) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ return this.emit('error', er)
+ this[ONREADDIR](job, entries)
+ })
+ }
+
+ [ONREADDIR] (job, entries) {
+ this.readdirCache.set(job.absolute, entries)
+ job.readdir = entries
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ if (this[PROCESSING])
+ return
+
+ this[PROCESSING] = true
+ for (let w = this[QUEUE].head;
+ w !== null && this[JOBS] < this.jobs;
+ w = w.next) {
+ this[PROCESSJOB](w.value)
+ if (w.value.ignore) {
+ const p = w.next
+ this[QUEUE].removeNode(w)
+ w.next = p
+ }
+ }
+
+ this[PROCESSING] = false
+
+ if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
+ if (this.zip)
+ this.zip.end(EOF)
+ else {
+ super.write(EOF)
+ super.end()
+ }
+ }
+ }
+
+ get [CURRENT] () {
+ return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
+ }
+
+ [JOBDONE] (job) {
+ this[QUEUE].shift()
+ this[JOBS] -= 1
+ this[PROCESS]()
+ }
+
+ [PROCESSJOB] (job) {
+ if (job.pending)
+ return
+
+ if (job.entry) {
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ return
+ }
+
+ if (!job.stat) {
+ if (this.statCache.has(job.absolute))
+ this[ONSTAT](job, this.statCache.get(job.absolute))
+ else
+ this[STAT](job)
+ }
+ if (!job.stat)
+ return
+
+ // filtered out!
+ if (job.ignore)
+ return
+
+ if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
+ if (this.readdirCache.has(job.absolute))
+ this[ONREADDIR](job, this.readdirCache.get(job.absolute))
+ else
+ this[READDIR](job)
+ if (!job.readdir)
+ return
+ }
+
+ // we know it doesn't have an entry, because that got checked above
+ job.entry = this[ENTRY](job)
+ if (!job.entry) {
+ job.ignore = true
+ return
+ }
+
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ }
+
+ [ENTRYOPT] (job) {
+ return {
+ onwarn: (msg, data) => {
+ this.warn(msg, data)
+ },
+ noPax: this.noPax,
+ cwd: this.cwd,
+ absolute: job.absolute,
+ preservePaths: this.preservePaths,
+ maxReadSize: this.maxReadSize,
+ strict: this.strict,
+ portable: this.portable,
+ linkCache: this.linkCache,
+ statCache: this.statCache,
+ noMtime: this.noMtime,
+ mtime: this.mtime
+ }
+ }
+
+ [ENTRY] (job) {
+ this[JOBS] += 1
+ try {
+ return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
+ .on('end', () => this[JOBDONE](job))
+ .on('error', er => this.emit('error', er))
+ } catch (er) {
+ this.emit('error', er)
+ }
+ }
+
+ [ONDRAIN] () {
+ if (this[CURRENT] && this[CURRENT].entry)
+ this[CURRENT].entry.resume()
+ }
+
+ // like .pipe() but using super, because our write() is special
+ [PIPE] (job) {
+ job.piped = true
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ const source = job.entry
+ const zip = this.zip
+
+ if (zip)
+ source.on('data', chunk => {
+ if (!zip.write(chunk))
+ source.pause()
+ })
+ else
+ source.on('data', chunk => {
+ if (!super.write(chunk))
+ source.pause()
+ })
+ }
+
+ pause () {
+ if (this.zip)
+ this.zip.pause()
+ return super.pause()
+ }
+})
+
+class PackSync extends Pack {
+ constructor (opt) {
+ super(opt)
+ this[WRITEENTRYCLASS] = WriteEntrySync
+ }
+
+ // pause/resume are no-ops in sync streams.
+ pause () {}
+ resume () {}
+
+ [STAT] (job) {
+ const stat = this.follow ? 'statSync' : 'lstatSync'
+ this[ONSTAT](job, fs[stat](job.absolute))
+ }
+
+ [READDIR] (job, stat) {
+ this[ONREADDIR](job, fs.readdirSync(job.absolute))
+ }
+
+ // gotta get it all in this tick
+ [PIPE] (job) {
+ const source = job.entry
+ const zip = this.zip
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ if (zip)
+ source.on('data', chunk => {
+ zip.write(chunk)
+ })
+ else
+ source.on('data', chunk => {
+ super[WRITE](chunk)
+ })
+ }
+}
+
+Pack.Sync = PackSync
+
+module.exports = Pack
diff --git a/node_modules/node-gyp/node_modules/tar/lib/parse.js b/node_modules/node-gyp/node_modules/tar/lib/parse.js
new file mode 100644
index 000000000..43d4383dd
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/parse.js
@@ -0,0 +1,428 @@
+'use strict'
+
+// this[BUFFER] is the remainder of a chunk if we're waiting for
+// the full 512 bytes of a header to come in. We will Buffer.concat()
+// it to the next write(), which is a mem copy, but a small one.
+//
+// this[QUEUE] is a Yallist of entries that haven't been emitted
+// yet this can only get filled up if the user keeps write()ing after
+// a write() returns false, or does a write() with more than one entry
+//
+// We don't buffer chunks, we always parse them and either create an
+// entry, or push it into the active entry. The ReadEntry class knows
+// to throw data away if .ignore=true
+//
+// Shift entry off the buffer when it emits 'end', and emit 'entry' for
+// the next one in the list.
+//
+// At any time, we're pushing body chunks into the entry at WRITEENTRY,
+// and waiting for 'end' on the entry at READENTRY
+//
+// ignored entries get .resume() called on them straight away
+
+const warner = require('./warn-mixin.js')
+const path = require('path')
+const Header = require('./header.js')
+const EE = require('events')
+const Yallist = require('yallist')
+const maxMetaEntrySize = 1024 * 1024
+const Entry = require('./read-entry.js')
+const Pax = require('./pax.js')
+const zlib = require('minizlib')
+const Buffer = require('./buffer.js')
+
+const gzipHeader = Buffer.from([0x1f, 0x8b])
+const STATE = Symbol('state')
+const WRITEENTRY = Symbol('writeEntry')
+const READENTRY = Symbol('readEntry')
+const NEXTENTRY = Symbol('nextEntry')
+const PROCESSENTRY = Symbol('processEntry')
+const EX = Symbol('extendedHeader')
+const GEX = Symbol('globalExtendedHeader')
+const META = Symbol('meta')
+const EMITMETA = Symbol('emitMeta')
+const BUFFER = Symbol('buffer')
+const QUEUE = Symbol('queue')
+const ENDED = Symbol('ended')
+const EMITTEDEND = Symbol('emittedEnd')
+const EMIT = Symbol('emit')
+const UNZIP = Symbol('unzip')
+const CONSUMECHUNK = Symbol('consumeChunk')
+const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
+const CONSUMEBODY = Symbol('consumeBody')
+const CONSUMEMETA = Symbol('consumeMeta')
+const CONSUMEHEADER = Symbol('consumeHeader')
+const CONSUMING = Symbol('consuming')
+const BUFFERCONCAT = Symbol('bufferConcat')
+const MAYBEEND = Symbol('maybeEnd')
+const WRITING = Symbol('writing')
+const ABORTED = Symbol('aborted')
+const DONE = Symbol('onDone')
+
+const noop = _ => true
+
+module.exports = warner(class Parser extends EE {
+ constructor (opt) {
+ opt = opt || {}
+ super(opt)
+
+ if (opt.ondone)
+ this.on(DONE, opt.ondone)
+ else
+ this.on(DONE, _ => {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ })
+
+ this.strict = !!opt.strict
+ this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
+ this.filter = typeof opt.filter === 'function' ? opt.filter : noop
+
+ // have to set this so that streams are ok piping into it
+ this.writable = true
+ this.readable = false
+
+ this[QUEUE] = new Yallist()
+ this[BUFFER] = null
+ this[READENTRY] = null
+ this[WRITEENTRY] = null
+ this[STATE] = 'begin'
+ this[META] = ''
+ this[EX] = null
+ this[GEX] = null
+ this[ENDED] = false
+ this[UNZIP] = null
+ this[ABORTED] = false
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+ if (typeof opt.onentry === 'function')
+ this.on('entry', opt.onentry)
+ }
+
+ [CONSUMEHEADER] (chunk, position) {
+ let header
+ try {
+ header = new Header(chunk, position, this[EX], this[GEX])
+ } catch (er) {
+ return this.warn('invalid entry', er)
+ }
+
+ if (header.nullBlock)
+ this[EMIT]('nullBlock')
+ else if (!header.cksumValid)
+ this.warn('invalid entry', header)
+ else if (!header.path)
+ this.warn('invalid: path is required', header)
+ else {
+ const type = header.type
+ if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
+ this.warn('invalid: linkpath required', header)
+ else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
+ this.warn('invalid: linkpath forbidden', header)
+ else {
+ const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
+
+ if (entry.meta) {
+ if (entry.size > this.maxMetaEntrySize) {
+ entry.ignore = true
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = 'ignore'
+ } else if (entry.size > 0) {
+ this[META] = ''
+ entry.on('data', c => this[META] += c)
+ this[STATE] = 'meta'
+ }
+ } else {
+
+ this[EX] = null
+ entry.ignore = entry.ignore || !this.filter(entry.path, entry)
+ if (entry.ignore) {
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = entry.remain ? 'ignore' : 'begin'
+ } else {
+ if (entry.remain)
+ this[STATE] = 'body'
+ else {
+ this[STATE] = 'begin'
+ entry.end()
+ }
+
+ if (!this[READENTRY]) {
+ this[QUEUE].push(entry)
+ this[NEXTENTRY]()
+ } else
+ this[QUEUE].push(entry)
+ }
+ }
+ }
+ }
+ }
+
+ [PROCESSENTRY] (entry) {
+ let go = true
+
+ if (!entry) {
+ this[READENTRY] = null
+ go = false
+ } else if (Array.isArray(entry))
+ this.emit.apply(this, entry)
+ else {
+ this[READENTRY] = entry
+ this.emit('entry', entry)
+ if (!entry.emittedEnd) {
+ entry.on('end', _ => this[NEXTENTRY]())
+ go = false
+ }
+ }
+
+ return go
+ }
+
+ [NEXTENTRY] () {
+ do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
+
+ if (!this[QUEUE].length) {
+ // At this point, there's nothing in the queue, but we may have an
+ // entry which is being consumed (readEntry).
+ // If we don't, then we definitely can handle more data.
+ // If we do, and either it's flowing, or it has never had any data
+ // written to it, then it needs more.
+ // The only other possibility is that it has returned false from a
+ // write() call, so we wait for the next drain to continue.
+ const re = this[READENTRY]
+ const drainNow = !re || re.flowing || re.size === re.remain
+ if (drainNow) {
+ if (!this[WRITING])
+ this.emit('drain')
+ } else
+ re.once('drain', _ => this.emit('drain'))
+ }
+ }
+
+ [CONSUMEBODY] (chunk, position) {
+ // write up to but no more than writeEntry.blockRemain
+ const entry = this[WRITEENTRY]
+ const br = entry.blockRemain
+ const c = (br >= chunk.length && position === 0) ? chunk
+ : chunk.slice(position, position + br)
+
+ entry.write(c)
+
+ if (!entry.blockRemain) {
+ this[STATE] = 'begin'
+ this[WRITEENTRY] = null
+ entry.end()
+ }
+
+ return c.length
+ }
+
+ [CONSUMEMETA] (chunk, position) {
+ const entry = this[WRITEENTRY]
+ const ret = this[CONSUMEBODY](chunk, position)
+
+ // if we finished, then the entry is reset
+ if (!this[WRITEENTRY])
+ this[EMITMETA](entry)
+
+ return ret
+ }
+
+ [EMIT] (ev, data, extra) {
+ if (!this[QUEUE].length && !this[READENTRY])
+ this.emit(ev, data, extra)
+ else
+ this[QUEUE].push([ev, data, extra])
+ }
+
+ [EMITMETA] (entry) {
+ this[EMIT]('meta', this[META])
+ switch (entry.type) {
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this[EX] = Pax.parse(this[META], this[EX], false)
+ break
+
+ case 'GlobalExtendedHeader':
+ this[GEX] = Pax.parse(this[META], this[GEX], true)
+ break
+
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].path = this[META].replace(/\0.*/, '')
+ break
+
+ case 'NextFileHasLongLinkpath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].linkpath = this[META].replace(/\0.*/, '')
+ break
+
+ /* istanbul ignore next */
+ default: throw new Error('unknown meta: ' + entry.type)
+ }
+ }
+
+ abort (msg, error) {
+ this[ABORTED] = true
+ this.warn(msg, error)
+ this.emit('abort', error)
+ this.emit('error', error)
+ }
+
+ write (chunk) {
+ if (this[ABORTED])
+ return
+
+ // first write, might be gzipped
+ if (this[UNZIP] === null && chunk) {
+ if (this[BUFFER]) {
+ chunk = Buffer.concat([this[BUFFER], chunk])
+ this[BUFFER] = null
+ }
+ if (chunk.length < gzipHeader.length) {
+ this[BUFFER] = chunk
+ return true
+ }
+ for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
+ if (chunk[i] !== gzipHeader[i])
+ this[UNZIP] = false
+ }
+ if (this[UNZIP] === null) {
+ const ended = this[ENDED]
+ this[ENDED] = false
+ this[UNZIP] = new zlib.Unzip()
+ this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
+ this[UNZIP].on('error', er =>
+ this.abort(er.message, er))
+ this[UNZIP].on('end', _ => {
+ this[ENDED] = true
+ this[CONSUMECHUNK]()
+ })
+ this[WRITING] = true
+ const ret = this[UNZIP][ended ? 'end' : 'write' ](chunk)
+ this[WRITING] = false
+ return ret
+ }
+ }
+
+ this[WRITING] = true
+ if (this[UNZIP])
+ this[UNZIP].write(chunk)
+ else
+ this[CONSUMECHUNK](chunk)
+ this[WRITING] = false
+
+ // return false if there's a queue, or if the current entry isn't flowing
+ const ret =
+ this[QUEUE].length ? false :
+ this[READENTRY] ? this[READENTRY].flowing :
+ true
+
+ // if we have no queue, then that means a clogged READENTRY
+ if (!ret && !this[QUEUE].length)
+ this[READENTRY].once('drain', _ => this.emit('drain'))
+
+ return ret
+ }
+
+ [BUFFERCONCAT] (c) {
+ if (c && !this[ABORTED])
+ this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
+ }
+
+ [MAYBEEND] () {
+ if (this[ENDED] &&
+ !this[EMITTEDEND] &&
+ !this[ABORTED] &&
+ !this[CONSUMING]) {
+ this[EMITTEDEND] = true
+ const entry = this[WRITEENTRY]
+ if (entry && entry.blockRemain) {
+ const have = this[BUFFER] ? this[BUFFER].length : 0
+ this.warn('Truncated input (needed ' + entry.blockRemain +
+ ' more bytes, only ' + have + ' available)', entry)
+ if (this[BUFFER])
+ entry.write(this[BUFFER])
+ entry.end()
+ }
+ this[EMIT](DONE)
+ }
+ }
+
+ [CONSUMECHUNK] (chunk) {
+ if (this[CONSUMING]) {
+ this[BUFFERCONCAT](chunk)
+ } else if (!chunk && !this[BUFFER]) {
+ this[MAYBEEND]()
+ } else {
+ this[CONSUMING] = true
+ if (this[BUFFER]) {
+ this[BUFFERCONCAT](chunk)
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ } else {
+ this[CONSUMECHUNKSUB](chunk)
+ }
+
+ while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) {
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ }
+ this[CONSUMING] = false
+ }
+
+ if (!this[BUFFER] || this[ENDED])
+ this[MAYBEEND]()
+ }
+
+ [CONSUMECHUNKSUB] (chunk) {
+ // we know that we are in CONSUMING mode, so anything written goes into
+ // the buffer. Advance the position and put any remainder in the buffer.
+ let position = 0
+ let length = chunk.length
+ while (position + 512 <= length && !this[ABORTED]) {
+ switch (this[STATE]) {
+ case 'begin':
+ this[CONSUMEHEADER](chunk, position)
+ position += 512
+ break
+
+ case 'ignore':
+ case 'body':
+ position += this[CONSUMEBODY](chunk, position)
+ break
+
+ case 'meta':
+ position += this[CONSUMEMETA](chunk, position)
+ break
+
+ /* istanbul ignore next */
+ default:
+ throw new Error('invalid state: ' + this[STATE])
+ }
+ }
+
+ if (position < length) {
+ if (this[BUFFER])
+ this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
+ else
+ this[BUFFER] = chunk.slice(position)
+ }
+ }
+
+ end (chunk) {
+ if (!this[ABORTED]) {
+ if (this[UNZIP])
+ this[UNZIP].end(chunk)
+ else {
+ this[ENDED] = true
+ this.write(chunk)
+ }
+ }
+ }
+})
diff --git a/node_modules/node-gyp/node_modules/tar/lib/pax.js b/node_modules/node-gyp/node_modules/tar/lib/pax.js
new file mode 100644
index 000000000..9d7e4aba5
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/pax.js
@@ -0,0 +1,146 @@
+'use strict'
+const Buffer = require('./buffer.js')
+const Header = require('./header.js')
+const path = require('path')
+
+class Pax {
+ constructor (obj, global) {
+ this.atime = obj.atime || null
+ this.charset = obj.charset || null
+ this.comment = obj.comment || null
+ this.ctime = obj.ctime || null
+ this.gid = obj.gid || null
+ this.gname = obj.gname || null
+ this.linkpath = obj.linkpath || null
+ this.mtime = obj.mtime || null
+ this.path = obj.path || null
+ this.size = obj.size || null
+ this.uid = obj.uid || null
+ this.uname = obj.uname || null
+ this.dev = obj.dev || null
+ this.ino = obj.ino || null
+ this.nlink = obj.nlink || null
+ this.global = global || false
+ }
+
+ encode () {
+ const body = this.encodeBody()
+ if (body === '')
+ return null
+
+ const bodyLen = Buffer.byteLength(body)
+ // round up to 512 bytes
+ // add 512 for header
+ const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
+ const buf = Buffer.allocUnsafe(bufLen)
+
+ // 0-fill the header section, it might not hit every field
+ for (let i = 0; i < 512; i++) {
+ buf[i] = 0
+ }
+
+ new Header({
+ // XXX split the path
+ // then the path should be PaxHeader + basename, but less than 99,
+ // prepend with the dirname
+ path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
+ mode: this.mode || 0o644,
+ uid: this.uid || null,
+ gid: this.gid || null,
+ size: bodyLen,
+ mtime: this.mtime || null,
+ type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
+ linkpath: '',
+ uname: this.uname || '',
+ gname: this.gname || '',
+ devmaj: 0,
+ devmin: 0,
+ atime: this.atime || null,
+ ctime: this.ctime || null
+ }).encode(buf)
+
+ buf.write(body, 512, bodyLen, 'utf8')
+
+ // null pad after the body
+ for (let i = bodyLen + 512; i < buf.length; i++) {
+ buf[i] = 0
+ }
+
+ return buf
+ }
+
+ encodeBody () {
+ return (
+ this.encodeField('path') +
+ this.encodeField('ctime') +
+ this.encodeField('atime') +
+ this.encodeField('dev') +
+ this.encodeField('ino') +
+ this.encodeField('nlink') +
+ this.encodeField('charset') +
+ this.encodeField('comment') +
+ this.encodeField('gid') +
+ this.encodeField('gname') +
+ this.encodeField('linkpath') +
+ this.encodeField('mtime') +
+ this.encodeField('size') +
+ this.encodeField('uid') +
+ this.encodeField('uname')
+ )
+ }
+
+ encodeField (field) {
+ if (this[field] === null || this[field] === undefined)
+ return ''
+ const v = this[field] instanceof Date ? this[field].getTime() / 1000
+ : this[field]
+ const s = ' ' +
+ (field === 'dev' || field === 'ino' || field === 'nlink'
+ ? 'SCHILY.' : '') +
+ field + '=' + v + '\n'
+ const byteLen = Buffer.byteLength(s)
+ // the digits includes the length of the digits in ascii base-10
+ // so if it's 9 characters, then adding 1 for the 9 makes it 10
+ // which makes it 11 chars.
+ let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
+ if (byteLen + digits >= Math.pow(10, digits))
+ digits += 1
+ const len = digits + byteLen
+ return len + s
+ }
+}
+
+Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
+
+const merge = (a, b) =>
+ b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
+
+const parseKV = string =>
+ string
+ .replace(/\n$/, '')
+ .split('\n')
+ .reduce(parseKVLine, Object.create(null))
+
+const parseKVLine = (set, line) => {
+ const n = parseInt(line, 10)
+
+ // XXX Values with \n in them will fail this.
+ // Refactor to not be a naive line-by-line parse.
+ if (n !== Buffer.byteLength(line) + 1)
+ return set
+
+ line = line.substr((n + ' ').length)
+ const kv = line.split('=')
+ const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
+ if (!k)
+ return set
+
+ const v = kv.join('=')
+ set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
+ ? new Date(v * 1000)
+ : /^[0-9]+$/.test(v) ? +v
+ : v
+ return set
+}
+
+module.exports = Pax
diff --git a/node_modules/node-gyp/node_modules/tar/lib/read-entry.js b/node_modules/node-gyp/node_modules/tar/lib/read-entry.js
new file mode 100644
index 000000000..8acee94ba
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/read-entry.js
@@ -0,0 +1,98 @@
+'use strict'
+const types = require('./types.js')
+const MiniPass = require('minipass')
+
+const SLURP = Symbol('slurp')
+module.exports = class ReadEntry extends MiniPass {
+ constructor (header, ex, gex) {
+ super()
+ // read entries always start life paused. this is to avoid the
+ // situation where Minipass's auto-ending empty streams results
+ // in an entry ending before we're ready for it.
+ this.pause()
+ this.extended = ex
+ this.globalExtended = gex
+ this.header = header
+ this.startBlockSize = 512 * Math.ceil(header.size / 512)
+ this.blockRemain = this.startBlockSize
+ this.remain = header.size
+ this.type = header.type
+ this.meta = false
+ this.ignore = false
+ switch (this.type) {
+ case 'File':
+ case 'OldFile':
+ case 'Link':
+ case 'SymbolicLink':
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'Directory':
+ case 'FIFO':
+ case 'ContiguousFile':
+ case 'GNUDumpDir':
+ break
+
+ case 'NextFileHasLongLinkpath':
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ case 'GlobalExtendedHeader':
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this.meta = true
+ break
+
+ // NOTE: gnutar and bsdtar treat unrecognized types as 'File'
+ // it may be worth doing the same, but with a warning.
+ default:
+ this.ignore = true
+ }
+
+ this.path = header.path
+ this.mode = header.mode
+ if (this.mode)
+ this.mode = this.mode & 0o7777
+ this.uid = header.uid
+ this.gid = header.gid
+ this.uname = header.uname
+ this.gname = header.gname
+ this.size = header.size
+ this.mtime = header.mtime
+ this.atime = header.atime
+ this.ctime = header.ctime
+ this.linkpath = header.linkpath
+ this.uname = header.uname
+ this.gname = header.gname
+
+ if (ex) this[SLURP](ex)
+ if (gex) this[SLURP](gex, true)
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+
+ const r = this.remain
+ const br = this.blockRemain
+ this.remain = Math.max(0, r - writeLen)
+ this.blockRemain = Math.max(0, br - writeLen)
+ if (this.ignore)
+ return true
+
+ if (r >= writeLen)
+ return super.write(data)
+
+ // r < writeLen
+ return super.write(data.slice(0, r))
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/replace.js b/node_modules/node-gyp/node_modules/tar/lib/replace.js
new file mode 100644
index 000000000..571cee94a
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/replace.js
@@ -0,0 +1,220 @@
+'use strict'
+const Buffer = require('./buffer.js')
+
+// tar -r
+const hlo = require('./high-level-opt.js')
+const Pack = require('./pack.js')
+const Parse = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+// starting at the head of the file, read a Header
+// If the checksum is invalid, that's our position to start writing
+// If it is, jump forward by the specified size (round up to 512)
+// and try again.
+// Write the new Pack stream starting there.
+
+const Header = require('./header.js')
+
+const r = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ return opt.sync ? replaceSync(opt, files)
+ : replace(opt, files, cb)
+}
+
+const replaceSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+
+ let threw = true
+ let fd
+ let position
+
+ try {
+ try {
+ fd = fs.openSync(opt.file, 'r+')
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ fd = fs.openSync(opt.file, 'w+')
+ else
+ throw er
+ }
+
+ const st = fs.fstatSync(fd)
+ const headBuf = Buffer.alloc(512)
+
+ POSITION: for (position = 0; position < st.size; position += 512) {
+ for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
+ bytes = fs.readSync(
+ fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ throw new Error('cannot append to compressed archives')
+
+ if (!bytes)
+ break POSITION
+ }
+
+ let h = new Header(headBuf)
+ if (!h.cksumValid)
+ break
+ let entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > st.size)
+ break
+ // the 512 for the header we just parsed will be added as well
+ // also jump ahead all the blocks for the body
+ position += entryBlockSize
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ }
+ threw = false
+
+ streamSync(opt, p, position, fd, files)
+ } finally {
+ if (threw)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const streamSync = (opt, p, position, fd, files) => {
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const replace = (opt, files, cb) => {
+ files = Array.from(files)
+ const p = new Pack(opt)
+
+ const getPos = (fd, size, cb_) => {
+ const cb = (er, pos) => {
+ if (er)
+ fs.close(fd, _ => cb_(er))
+ else
+ cb_(null, pos)
+ }
+
+ let position = 0
+ if (size === 0)
+ return cb(null, 0)
+
+ let bufPos = 0
+ const headBuf = Buffer.alloc(512)
+ const onread = (er, bytes) => {
+ if (er)
+ return cb(er)
+ bufPos += bytes
+ if (bufPos < 512 && bytes)
+ return fs.read(
+ fd, headBuf, bufPos, headBuf.length - bufPos,
+ position + bufPos, onread
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ return cb(new Error('cannot append to compressed archives'))
+
+ // truncated header
+ if (bufPos < 512)
+ return cb(null, position)
+
+ const h = new Header(headBuf)
+ if (!h.cksumValid)
+ return cb(null, position)
+
+ const entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > size)
+ return cb(null, position)
+
+ position += entryBlockSize + 512
+ if (position >= size)
+ return cb(null, position)
+
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ bufPos = 0
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+
+ const promise = new Promise((resolve, reject) => {
+ p.on('error', reject)
+ let flag = 'r+'
+ const onopen = (er, fd) => {
+ if (er && er.code === 'ENOENT' && flag === 'r+') {
+ flag = 'w+'
+ return fs.open(opt.file, flag, onopen)
+ }
+
+ if (er)
+ return reject(er)
+
+ fs.fstat(fd, (er, st) => {
+ if (er)
+ return reject(er)
+ getPos(fd, st.size, (er, position) => {
+ if (er)
+ return reject(er)
+ const stream = new fsm.WriteStream(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ stream.on('error', reject)
+ stream.on('close', resolve)
+ addFilesAsync(p, files)
+ })
+ })
+ }
+ fs.open(opt.file, flag, onopen)
+ })
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/types.js b/node_modules/node-gyp/node_modules/tar/lib/types.js
new file mode 100644
index 000000000..df425652b
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/types.js
@@ -0,0 +1,44 @@
+'use strict'
+// map types from key to human-friendly name
+exports.name = new Map([
+ ['0', 'File'],
+ // same as File
+ ['', 'OldFile'],
+ ['1', 'Link'],
+ ['2', 'SymbolicLink'],
+ // Devices and FIFOs aren't fully supported
+ // they are parsed, but skipped when unpacking
+ ['3', 'CharacterDevice'],
+ ['4', 'BlockDevice'],
+ ['5', 'Directory'],
+ ['6', 'FIFO'],
+ // same as File
+ ['7', 'ContiguousFile'],
+ // pax headers
+ ['g', 'GlobalExtendedHeader'],
+ ['x', 'ExtendedHeader'],
+ // vendor-specific stuff
+ // skip
+ ['A', 'SolarisACL'],
+ // like 5, but with data, which should be skipped
+ ['D', 'GNUDumpDir'],
+ // metadata only, skip
+ ['I', 'Inode'],
+ // data = link path of next file
+ ['K', 'NextFileHasLongLinkpath'],
+ // data = path of next file
+ ['L', 'NextFileHasLongPath'],
+ // skip
+ ['M', 'ContinuationFile'],
+ // like L
+ ['N', 'OldGnuLongPath'],
+ // skip
+ ['S', 'SparseFile'],
+ // skip
+ ['V', 'TapeVolumeHeader'],
+ // like x
+ ['X', 'OldExtendedHeader']
+])
+
+// map the other direction
+exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
diff --git a/node_modules/node-gyp/node_modules/tar/lib/unpack.js b/node_modules/node-gyp/node_modules/tar/lib/unpack.js
new file mode 100644
index 000000000..fc765096e
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/unpack.js
@@ -0,0 +1,621 @@
+'use strict'
+
+const assert = require('assert')
+const EE = require('events').EventEmitter
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+const mkdir = require('./mkdir.js')
+const mkdirSync = mkdir.sync
+const wc = require('./winchars.js')
+
+const ONENTRY = Symbol('onEntry')
+const CHECKFS = Symbol('checkFs')
+const ISREUSABLE = Symbol('isReusable')
+const MAKEFS = Symbol('makeFs')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const LINK = Symbol('link')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const UNSUPPORTED = Symbol('unsupported')
+const UNKNOWN = Symbol('unknown')
+const CHECKPATH = Symbol('checkPath')
+const MKDIR = Symbol('mkdir')
+const ONERROR = Symbol('onError')
+const PENDING = Symbol('pending')
+const PEND = Symbol('pend')
+const UNPEND = Symbol('unpend')
+const ENDED = Symbol('ended')
+const MAYBECLOSE = Symbol('maybeClose')
+const SKIP = Symbol('skip')
+const DOCHOWN = Symbol('doChown')
+const UID = Symbol('uid')
+const GID = Symbol('gid')
+const crypto = require('crypto')
+
+// Unlinks on Windows are not atomic.
+//
+// This means that if you have a file entry, followed by another
+// file entry with an identical name, and you cannot re-use the file
+// (because it's a hardlink, or because unlink:true is set, or it's
+// Windows, which does not have useful nlink values), then the unlink
+// will be committed to the disk AFTER the new file has been written
+// over the old one, deleting the new file.
+//
+// To work around this, on Windows systems, we rename the file and then
+// delete the renamed file. It's a sloppy kludge, but frankly, I do not
+// know of a better way to do this, given windows' non-atomic unlink
+// semantics.
+//
+// See: https://github.com/npm/node-tar/issues/183
+/* istanbul ignore next */
+const unlinkFile = (path, cb) => {
+ if (process.platform !== 'win32')
+ return fs.unlink(path, cb)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.rename(path, name, er => {
+ if (er)
+ return cb(er)
+ fs.unlink(name, cb)
+ })
+}
+
+/* istanbul ignore next */
+const unlinkFileSync = path => {
+ if (process.platform !== 'win32')
+ return fs.unlinkSync(path)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.renameSync(path, name)
+ fs.unlinkSync(name)
+}
+
+// this.gid, entry.gid, this.processUid
+const uint32 = (a, b, c) =>
+ a === a >>> 0 ? a
+ : b === b >>> 0 ? b
+ : c
+
+class Unpack extends Parser {
+ constructor (opt) {
+ if (!opt)
+ opt = {}
+
+ opt.ondone = _ => {
+ this[ENDED] = true
+ this[MAYBECLOSE]()
+ }
+
+ super(opt)
+
+ this.transform = typeof opt.transform === 'function' ? opt.transform : null
+
+ this.writable = true
+ this.readable = false
+
+ this[PENDING] = 0
+ this[ENDED] = false
+
+ this.dirCache = opt.dirCache || new Map()
+
+ if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
+ // need both or neither
+ if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
+ throw new TypeError('cannot set owner without number uid and gid')
+ if (opt.preserveOwner)
+ throw new TypeError(
+ 'cannot preserve owner in archive and also set owner explicitly')
+ this.uid = opt.uid
+ this.gid = opt.gid
+ this.setOwner = true
+ } else {
+ this.uid = null
+ this.gid = null
+ this.setOwner = false
+ }
+
+ // default true for root
+ if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
+ this.preserveOwner = process.getuid && process.getuid() === 0
+ else
+ this.preserveOwner = !!opt.preserveOwner
+
+ this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
+ process.getuid() : null
+ this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
+ process.getgid() : null
+
+ // mostly just for testing, but useful in some cases.
+ // Forcibly trigger a chown on every entry, no matter what
+ this.forceChown = opt.forceChown === true
+
+ // turn ><?| in filenames into 0xf000-higher encoded forms
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+
+ // do not unpack over files that are newer than what's in the archive
+ this.newer = !!opt.newer
+
+ // do not unpack over ANY files
+ this.keep = !!opt.keep
+
+ // do not set mtime/atime of extracted entries
+ this.noMtime = !!opt.noMtime
+
+ // allow .., absolute path entries, and unpacking through symlinks
+ // without this, warn and skip .., relativize absolutes, and error
+ // on symlinks in extraction path
+ this.preservePaths = !!opt.preservePaths
+
+ // unlink files and links before writing. This breaks existing hard
+ // links, and removes symlink directories rather than erroring
+ this.unlink = !!opt.unlink
+
+ this.cwd = path.resolve(opt.cwd || process.cwd())
+ this.strip = +opt.strip || 0
+ this.processUmask = process.umask()
+ this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
+ // default mode for dirs created as parents
+ this.dmode = opt.dmode || (0o0777 & (~this.umask))
+ this.fmode = opt.fmode || (0o0666 & (~this.umask))
+ this.on('entry', entry => this[ONENTRY](entry))
+ }
+
+ [MAYBECLOSE] () {
+ if (this[ENDED] && this[PENDING] === 0) {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ }
+ }
+
+ [CHECKPATH] (entry) {
+ if (this.strip) {
+ const parts = entry.path.split(/\/|\\/)
+ if (parts.length < this.strip)
+ return false
+ entry.path = parts.slice(this.strip).join('/')
+
+ if (entry.type === 'Link') {
+ const linkparts = entry.linkpath.split(/\/|\\/)
+ if (linkparts.length >= this.strip)
+ entry.linkpath = linkparts.slice(this.strip).join('/')
+ }
+ }
+
+ if (!this.preservePaths) {
+ const p = entry.path
+ if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
+ this.warn('path contains \'..\'', p)
+ return false
+ }
+
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ if (path.win32.isAbsolute(p)) {
+ const parsed = path.win32.parse(p)
+ this.warn('stripping ' + parsed.root + ' from absolute path', p)
+ entry.path = p.substr(parsed.root.length)
+ }
+ }
+
+ // only encode : chars that aren't drive letter indicators
+ if (this.win32) {
+ const parsed = path.win32.parse(entry.path)
+ entry.path = parsed.root === '' ? wc.encode(entry.path)
+ : parsed.root + wc.encode(entry.path.substr(parsed.root.length))
+ }
+
+ if (path.isAbsolute(entry.path))
+ entry.absolute = entry.path
+ else
+ entry.absolute = path.resolve(this.cwd, entry.path)
+
+ return true
+ }
+
+ [ONENTRY] (entry) {
+ if (!this[CHECKPATH](entry))
+ return entry.resume()
+
+ assert.equal(typeof entry.absolute, 'string')
+
+ switch (entry.type) {
+ case 'Directory':
+ case 'GNUDumpDir':
+ if (entry.mode)
+ entry.mode = entry.mode | 0o700
+
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ case 'Link':
+ case 'SymbolicLink':
+ return this[CHECKFS](entry)
+
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'FIFO':
+ return this[UNSUPPORTED](entry)
+ }
+ }
+
+ [ONERROR] (er, entry) {
+ // Cwd has to exist, or else nothing works. That's serious.
+ // Other errors are warnings, which raise the error in strict
+ // mode, but otherwise continue on.
+ if (er.name === 'CwdError')
+ this.emit('error', er)
+ else {
+ this.warn(er.message, er)
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ [MKDIR] (dir, mode, cb) {
+ mkdir(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ }, cb)
+ }
+
+ [DOCHOWN] (entry) {
+ // in preserve owner mode, chown if the entry doesn't match process
+ // in set owner mode, chown if setting doesn't match process
+ return this.forceChown ||
+ this.preserveOwner &&
+ ( typeof entry.uid === 'number' && entry.uid !== this.processUid ||
+ typeof entry.gid === 'number' && entry.gid !== this.processGid )
+ ||
+ ( typeof this.uid === 'number' && this.uid !== this.processUid ||
+ typeof this.gid === 'number' && this.gid !== this.processGid )
+ }
+
+ [UID] (entry) {
+ return uint32(this.uid, entry.uid, this.processUid)
+ }
+
+ [GID] (entry) {
+ return uint32(this.gid, entry.gid, this.processGid)
+ }
+
+ [FILE] (entry) {
+ const mode = entry.mode & 0o7777 || this.fmode
+ const stream = new fsm.WriteStream(entry.absolute, {
+ mode: mode,
+ autoClose: false
+ })
+ stream.on('error', er => this[ONERROR](er, entry))
+
+ let actions = 1
+ const done = er => {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ if (--actions === 0)
+ fs.close(stream.fd, _ => this[UNPEND]())
+ }
+
+ stream.on('finish', _ => {
+ // if futimes fails, try utimes
+ // if utimes fails, fail with the original error
+ // same for fchown/chown
+ const abs = entry.absolute
+ const fd = stream.fd
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ fs.futimes(fd, atime, mtime, er =>
+ er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
+ : done())
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+ fs.fchown(fd, uid, gid, er =>
+ er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
+ : done())
+ }
+
+ done()
+ })
+
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+ tx.pipe(stream)
+ }
+
+ [DIRECTORY] (entry) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ this[MKDIR](entry.absolute, mode, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ let actions = 1
+ const done = _ => {
+ if (--actions === 0) {
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
+ }
+
+ done()
+ })
+ }
+
+ [UNSUPPORTED] (entry) {
+ this.warn('unsupported entry type: ' + entry.type, entry)
+ entry.resume()
+ }
+
+ [SYMLINK] (entry) {
+ this[LINK](entry, entry.linkpath, 'symlink')
+ }
+
+ [HARDLINK] (entry) {
+ this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link')
+ }
+
+ [PEND] () {
+ this[PENDING]++
+ }
+
+ [UNPEND] () {
+ this[PENDING]--
+ this[MAYBECLOSE]()
+ }
+
+ [SKIP] (entry) {
+ this[UNPEND]()
+ entry.resume()
+ }
+
+ // Check if we can reuse an existing filesystem entry safely and
+ // overwrite it, rather than unlinking and recreating
+ // Windows doesn't report a useful nlink, so we just never reuse entries
+ [ISREUSABLE] (entry, st) {
+ return entry.type === 'File' &&
+ !this.unlink &&
+ st.isFile() &&
+ st.nlink <= 1 &&
+ process.platform !== 'win32'
+ }
+
+ // check if a thing is there, and if so, try to clobber it
+ [CHECKFS] (entry) {
+ this[PEND]()
+ this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+ fs.lstat(entry.absolute, (er, st) => {
+ if (st && (this.keep || this.newer && st.mtime > entry.mtime))
+ this[SKIP](entry)
+ else if (er || this[ISREUSABLE](entry, st))
+ this[MAKEFS](null, entry)
+ else if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (!entry.mode || (st.mode & 0o7777) === entry.mode)
+ this[MAKEFS](null, entry)
+ else
+ fs.chmod(entry.absolute, entry.mode, er => this[MAKEFS](er, entry))
+ } else
+ fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry))
+ } else
+ unlinkFile(entry.absolute, er => this[MAKEFS](er, entry))
+ })
+ })
+ }
+
+ [MAKEFS] (er, entry) {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ switch (entry.type) {
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ return this[FILE](entry)
+
+ case 'Link':
+ return this[HARDLINK](entry)
+
+ case 'SymbolicLink':
+ return this[SYMLINK](entry)
+
+ case 'Directory':
+ case 'GNUDumpDir':
+ return this[DIRECTORY](entry)
+ }
+ }
+
+ [LINK] (entry, linkpath, link) {
+ // XXX: get the type ('file' or 'dir') for windows
+ fs[link](linkpath, entry.absolute, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+ this[UNPEND]()
+ entry.resume()
+ })
+ }
+}
+
+class UnpackSync extends Unpack {
+ constructor (opt) {
+ super(opt)
+ }
+
+ [CHECKFS] (entry) {
+ const er = this[MKDIR](path.dirname(entry.absolute), this.dmode)
+ if (er)
+ return this[ONERROR](er, entry)
+ try {
+ const st = fs.lstatSync(entry.absolute)
+ if (this.keep || this.newer && st.mtime > entry.mtime)
+ return this[SKIP](entry)
+ else if (this[ISREUSABLE](entry, st))
+ return this[MAKEFS](null, entry)
+ else {
+ try {
+ if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (entry.mode && (st.mode & 0o7777) !== entry.mode)
+ fs.chmodSync(entry.absolute, entry.mode)
+ } else
+ fs.rmdirSync(entry.absolute)
+ } else
+ unlinkFileSync(entry.absolute)
+ return this[MAKEFS](null, entry)
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+ } catch (er) {
+ return this[MAKEFS](null, entry)
+ }
+ }
+
+ [FILE] (entry) {
+ const mode = entry.mode & 0o7777 || this.fmode
+
+ const oner = er => {
+ try { fs.closeSync(fd) } catch (_) {}
+ if (er)
+ this[ONERROR](er, entry)
+ }
+
+ let stream
+ let fd
+ try {
+ fd = fs.openSync(entry.absolute, 'w', mode)
+ } catch (er) {
+ return oner(er)
+ }
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+
+ tx.on('data', chunk => {
+ try {
+ fs.writeSync(fd, chunk, 0, chunk.length)
+ } catch (er) {
+ oner(er)
+ }
+ })
+
+ tx.on('end', _ => {
+ let er = null
+ // try both, falling futimes back to utimes
+ // if either fails, handle the first error
+ if (entry.mtime && !this.noMtime) {
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ try {
+ fs.futimesSync(fd, atime, mtime)
+ } catch (futimeser) {
+ try {
+ fs.utimesSync(entry.absolute, atime, mtime)
+ } catch (utimeser) {
+ er = futimeser
+ }
+ }
+ }
+
+ if (this[DOCHOWN](entry)) {
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+
+ try {
+ fs.fchownSync(fd, uid, gid)
+ } catch (fchowner) {
+ try {
+ fs.chownSync(entry.absolute, uid, gid)
+ } catch (chowner) {
+ er = er || fchowner
+ }
+ }
+ }
+
+ oner(er)
+ })
+ }
+
+ [DIRECTORY] (entry) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ const er = this[MKDIR](entry.absolute, mode)
+ if (er)
+ return this[ONERROR](er, entry)
+ if (entry.mtime && !this.noMtime) {
+ try {
+ fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
+ } catch (er) {}
+ }
+ if (this[DOCHOWN](entry)) {
+ try {
+ fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
+ } catch (er) {}
+ }
+ entry.resume()
+ }
+
+ [MKDIR] (dir, mode) {
+ try {
+ return mkdir.sync(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ })
+ } catch (er) {
+ return er
+ }
+ }
+
+ [LINK] (entry, linkpath, link) {
+ try {
+ fs[link + 'Sync'](linkpath, entry.absolute)
+ entry.resume()
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+}
+
+Unpack.Sync = UnpackSync
+module.exports = Unpack
diff --git a/node_modules/node-gyp/node_modules/tar/lib/update.js b/node_modules/node-gyp/node_modules/tar/lib/update.js
new file mode 100644
index 000000000..16c3e93ed
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/update.js
@@ -0,0 +1,36 @@
+'use strict'
+
+// tar -u
+
+const hlo = require('./high-level-opt.js')
+const r = require('./replace.js')
+// just call tar.r with the filter and mtimeCache
+
+const u = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ mtimeFilter(opt)
+ return r(opt, files, cb)
+}
+
+const mtimeFilter = opt => {
+ const filter = opt.filter
+
+ if (!opt.mtimeCache)
+ opt.mtimeCache = new Map()
+
+ opt.filter = filter ? (path, stat) =>
+ filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
+ : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/warn-mixin.js b/node_modules/node-gyp/node_modules/tar/lib/warn-mixin.js
new file mode 100644
index 000000000..94a4b9b99
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/warn-mixin.js
@@ -0,0 +1,14 @@
+'use strict'
+module.exports = Base => class extends Base {
+ warn (msg, data) {
+ if (!this.strict)
+ this.emit('warn', msg, data)
+ else if (data instanceof Error)
+ this.emit('error', data)
+ else {
+ const er = new Error(msg)
+ er.data = data
+ this.emit('error', er)
+ }
+ }
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/winchars.js b/node_modules/node-gyp/node_modules/tar/lib/winchars.js
new file mode 100644
index 000000000..cf6ea0606
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/winchars.js
@@ -0,0 +1,23 @@
+'use strict'
+
+// When writing files on Windows, translate the characters to their
+// 0xf000 higher-encoded versions.
+
+const raw = [
+ '|',
+ '<',
+ '>',
+ '?',
+ ':'
+]
+
+const win = raw.map(char =>
+ String.fromCharCode(0xf000 + char.charCodeAt(0)))
+
+const toWin = new Map(raw.map((char, i) => [char, win[i]]))
+const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
+
+module.exports = {
+ encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
+ decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s)
+}
diff --git a/node_modules/node-gyp/node_modules/tar/lib/write-entry.js b/node_modules/node-gyp/node_modules/tar/lib/write-entry.js
new file mode 100644
index 000000000..0c019006f
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/lib/write-entry.js
@@ -0,0 +1,422 @@
+'use strict'
+const Buffer = require('./buffer.js')
+const MiniPass = require('minipass')
+const Pax = require('./pax.js')
+const Header = require('./header.js')
+const ReadEntry = require('./read-entry.js')
+const fs = require('fs')
+const path = require('path')
+
+const types = require('./types.js')
+const maxReadSize = 16 * 1024 * 1024
+const PROCESS = Symbol('process')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const HEADER = Symbol('header')
+const READ = Symbol('read')
+const LSTAT = Symbol('lstat')
+const ONLSTAT = Symbol('onlstat')
+const ONREAD = Symbol('onread')
+const ONREADLINK = Symbol('onreadlink')
+const OPENFILE = Symbol('openfile')
+const ONOPENFILE = Symbol('onopenfile')
+const CLOSE = Symbol('close')
+const MODE = Symbol('mode')
+const warner = require('./warn-mixin.js')
+const winchars = require('./winchars.js')
+
+const modeFix = require('./mode-fix.js')
+
+const WriteEntry = warner(class WriteEntry extends MiniPass {
+ constructor (p, opt) {
+ opt = opt || {}
+ super(opt)
+ if (typeof p !== 'string')
+ throw new TypeError('path is required')
+ this.path = p
+ // suppress atime, ctime, uid, gid, uname, gname
+ this.portable = !!opt.portable
+ // until node has builtin pwnam functions, this'll have to do
+ this.myuid = process.getuid && process.getuid()
+ this.myuser = process.env.USER || ''
+ this.maxReadSize = opt.maxReadSize || maxReadSize
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.preservePaths = !!opt.preservePaths
+ this.cwd = opt.cwd || process.cwd()
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ if (!this.preservePaths && path.win32.isAbsolute(p)) {
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ const parsed = path.win32.parse(p)
+ this.warn('stripping ' + parsed.root + ' from absolute path', p)
+ this.path = p.substr(parsed.root.length)
+ }
+
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+ if (this.win32) {
+ this.path = winchars.decode(this.path.replace(/\\/g, '/'))
+ p = p.replace(/\\/g, '/')
+ }
+
+ this.absolute = opt.absolute || path.resolve(this.cwd, p)
+
+ if (this.path === '')
+ this.path = './'
+
+ if (this.statCache.has(this.absolute))
+ this[ONLSTAT](this.statCache.get(this.absolute))
+ else
+ this[LSTAT]()
+ }
+
+ [LSTAT] () {
+ fs.lstat(this.absolute, (er, stat) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONLSTAT](stat)
+ })
+ }
+
+ [ONLSTAT] (stat) {
+ this.statCache.set(this.absolute, stat)
+ this.stat = stat
+ if (!stat.isFile())
+ stat.size = 0
+ this.type = getType(stat)
+ this.emit('stat', stat)
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ switch (this.type) {
+ case 'File': return this[FILE]()
+ case 'Directory': return this[DIRECTORY]()
+ case 'SymbolicLink': return this[SYMLINK]()
+ // unsupported types are ignored.
+ default: return this.end()
+ }
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory')
+ }
+
+ [HEADER] () {
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this[MODE](this.stat.mode),
+ uid: this.portable ? null : this.stat.uid,
+ gid: this.portable ? null : this.stat.gid,
+ size: this.stat.size,
+ mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
+ type: this.type,
+ uname: this.portable ? null :
+ this.stat.uid === this.myuid ? this.myuser : '',
+ atime: this.portable ? null : this.stat.atime,
+ ctime: this.portable ? null : this.stat.ctime
+ })
+
+ if (this.header.encode() && !this.noPax)
+ this.write(new Pax({
+ atime: this.portable ? null : this.header.atime,
+ ctime: this.portable ? null : this.header.ctime,
+ gid: this.portable ? null : this.header.gid,
+ mtime: this.noMtime ? null : this.mtime || this.header.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.header.size,
+ uid: this.portable ? null : this.header.uid,
+ uname: this.portable ? null : this.header.uname,
+ dev: this.portable ? null : this.stat.dev,
+ ino: this.portable ? null : this.stat.ino,
+ nlink: this.portable ? null : this.stat.nlink
+ }).encode())
+ this.write(this.header.block)
+ }
+
+ [DIRECTORY] () {
+ if (this.path.substr(-1) !== '/')
+ this.path += '/'
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [SYMLINK] () {
+ fs.readlink(this.absolute, (er, linkpath) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONREADLINK](linkpath)
+ })
+ }
+
+ [ONREADLINK] (linkpath) {
+ this.linkpath = linkpath
+ this[HEADER]()
+ this.end()
+ }
+
+ [HARDLINK] (linkpath) {
+ this.type = 'Link'
+ this.linkpath = path.relative(this.cwd, linkpath)
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [FILE] () {
+ if (this.stat.nlink > 1) {
+ const linkKey = this.stat.dev + ':' + this.stat.ino
+ if (this.linkCache.has(linkKey)) {
+ const linkpath = this.linkCache.get(linkKey)
+ if (linkpath.indexOf(this.cwd) === 0)
+ return this[HARDLINK](linkpath)
+ }
+ this.linkCache.set(linkKey, this.absolute)
+ }
+
+ this[HEADER]()
+ if (this.stat.size === 0)
+ return this.end()
+
+ this[OPENFILE]()
+ }
+
+ [OPENFILE] () {
+ fs.open(this.absolute, 'r', (er, fd) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONOPENFILE](fd)
+ })
+ }
+
+ [ONOPENFILE] (fd) {
+ const blockLen = 512 * Math.ceil(this.stat.size / 512)
+ const bufLen = Math.min(blockLen, this.maxReadSize)
+ const buf = Buffer.allocUnsafe(bufLen)
+ this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen)
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
+ if (er)
+ return this[CLOSE](fd, _ => this.emit('error', er))
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ })
+ }
+
+ [CLOSE] (fd, cb) {
+ fs.close(fd, cb)
+ }
+
+ [ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) {
+ if (bytesRead <= 0 && remain > 0) {
+ const er = new Error('encountered unexpected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ this[CLOSE](fd, _ => _)
+ return this.emit('error', er)
+ }
+
+ if (bytesRead > remain) {
+ const er = new Error('did not encounter expected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ this[CLOSE](fd, _ => _)
+ return this.emit('error', er)
+ }
+
+ // null out the rest of the buffer, if we could fit the block padding
+ if (bytesRead === remain) {
+ for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) {
+ buf[i + offset] = 0
+ bytesRead ++
+ remain ++
+ }
+ }
+
+ const writeBuf = offset === 0 && bytesRead === buf.length ?
+ buf : buf.slice(offset, offset + bytesRead)
+ remain -= bytesRead
+ blockRemain -= bytesRead
+ pos += bytesRead
+ offset += bytesRead
+
+ this.write(writeBuf)
+
+ if (!remain) {
+ if (blockRemain)
+ this.write(Buffer.alloc(blockRemain))
+ this.end()
+ this[CLOSE](fd, _ => _)
+ return
+ }
+
+ if (offset >= length) {
+ buf = Buffer.allocUnsafe(length)
+ offset = 0
+ }
+ length = buf.length - offset
+ this[READ](fd, buf, offset, length, pos, remain, blockRemain)
+ }
+})
+
+class WriteEntrySync extends WriteEntry {
+ constructor (path, opt) {
+ super(path, opt)
+ }
+
+ [LSTAT] () {
+ this[ONLSTAT](fs.lstatSync(this.absolute))
+ }
+
+ [SYMLINK] () {
+ this[ONREADLINK](fs.readlinkSync(this.absolute))
+ }
+
+ [OPENFILE] () {
+ this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ let threw = true
+ try {
+ const bytesRead = fs.readSync(fd, buf, offset, length, pos)
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ threw = false
+ } finally {
+ if (threw)
+ try { this[CLOSE](fd) } catch (er) {}
+ }
+ }
+
+ [CLOSE] (fd) {
+ fs.closeSync(fd)
+ }
+}
+
+const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
+ constructor (readEntry, opt) {
+ opt = opt || {}
+ super(opt)
+ this.preservePaths = !!opt.preservePaths
+ this.portable = !!opt.portable
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+
+ this.readEntry = readEntry
+ this.type = readEntry.type
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.path = readEntry.path
+ this.mode = this[MODE](readEntry.mode)
+ this.uid = this.portable ? null : readEntry.uid
+ this.gid = this.portable ? null : readEntry.gid
+ this.uname = this.portable ? null : readEntry.uname
+ this.gname = this.portable ? null : readEntry.gname
+ this.size = readEntry.size
+ this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
+ this.atime = this.portable ? null : readEntry.atime
+ this.ctime = this.portable ? null : readEntry.ctime
+ this.linkpath = readEntry.linkpath
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ if (path.isAbsolute(this.path) && !this.preservePaths) {
+ const parsed = path.parse(this.path)
+ this.warn(
+ 'stripping ' + parsed.root + ' from absolute path',
+ this.path
+ )
+ this.path = this.path.substr(parsed.root.length)
+ }
+
+ this.remain = readEntry.size
+ this.blockRemain = readEntry.startBlockSize
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this.mode,
+ uid: this.portable ? null : this.uid,
+ gid: this.portable ? null : this.gid,
+ size: this.size,
+ mtime: this.noMtime ? null : this.mtime,
+ type: this.type,
+ uname: this.portable ? null : this.uname,
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime
+ })
+
+ if (this.header.encode() && !this.noPax)
+ super.write(new Pax({
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime,
+ gid: this.portable ? null : this.gid,
+ mtime: this.noMtime ? null : this.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.size,
+ uid: this.portable ? null : this.uid,
+ uname: this.portable ? null : this.uname,
+ dev: this.portable ? null : this.readEntry.dev,
+ ino: this.portable ? null : this.readEntry.ino,
+ nlink: this.portable ? null : this.readEntry.nlink
+ }).encode())
+
+ super.write(this.header.block)
+ readEntry.pipe(this)
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory')
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+ this.blockRemain -= writeLen
+ return super.write(data)
+ }
+
+ end () {
+ if (this.blockRemain)
+ this.write(Buffer.alloc(this.blockRemain))
+ return super.end()
+ }
+})
+
+WriteEntry.Sync = WriteEntrySync
+WriteEntry.Tar = WriteEntryTar
+
+const getType = stat =>
+ stat.isFile() ? 'File'
+ : stat.isDirectory() ? 'Directory'
+ : stat.isSymbolicLink() ? 'SymbolicLink'
+ : 'Unsupported'
+
+module.exports = WriteEntry
diff --git a/node_modules/node-gyp/node_modules/tar/package.json b/node_modules/node-gyp/node_modules/tar/package.json
new file mode 100644
index 000000000..4063f6912
--- /dev/null
+++ b/node_modules/node-gyp/node_modules/tar/package.json
@@ -0,0 +1,82 @@
+{
+ "_from": "tar@^4.4.12",
+ "_id": "tar@4.4.13",
+ "_inBundle": false,
+ "_integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
+ "_location": "/node-gyp/tar",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "tar@^4.4.12",
+ "name": "tar",
+ "escapedName": "tar",
+ "rawSpec": "^4.4.12",
+ "saveSpec": null,
+ "fetchSpec": "^4.4.12"
+ },
+ "_requiredBy": [
+ "/node-gyp"
+ ],
+ "_resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
+ "_shasum": "43b364bc52888d555298637b10d60790254ab525",
+ "_spec": "tar@^4.4.12",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/node-gyp",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/node-tar/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.8.6",
+ "minizlib": "^1.2.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.3"
+ },
+ "deprecated": false,
+ "description": "tar for node",
+ "devDependencies": {
+ "chmodr": "^1.2.0",
+ "end-of-stream": "^1.4.1",
+ "events-to-array": "^1.1.2",
+ "mutate-fs": "^2.1.1",
+ "rimraf": "^2.6.3",
+ "tap": "^14.6.5",
+ "tar-fs": "^1.16.3",
+ "tar-stream": "^1.6.2"
+ },
+ "engines": {
+ "node": ">=4.5"
+ },
+ "files": [
+ "index.js",
+ "lib/"
+ ],
+ "homepage": "https://github.com/npm/node-tar#readme",
+ "license": "ISC",
+ "name": "tar",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/node-tar.git"
+ },
+ "scripts": {
+ "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done",
+ "genparse": "node scripts/generate-parse-fixtures.js",
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "coverage-map": "map.js",
+ "check-coverage": true
+ },
+ "version": "4.4.13"
+}
diff --git a/node_modules/pacote/node_modules/fs-minipass/LICENSE b/node_modules/pacote/node_modules/fs-minipass/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/pacote/node_modules/fs-minipass/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/pacote/node_modules/fs-minipass/README.md b/node_modules/pacote/node_modules/fs-minipass/README.md
new file mode 100644
index 000000000..1e61241cf
--- /dev/null
+++ b/node_modules/pacote/node_modules/fs-minipass/README.md
@@ -0,0 +1,70 @@
+# fs-minipass
+
+Filesystem streams based on [minipass](http://npm.im/minipass).
+
+4 classes are exported:
+
+- ReadStream
+- ReadStreamSync
+- WriteStream
+- WriteStreamSync
+
+When using `ReadStreamSync`, all of the data is made available
+immediately upon consuming the stream. Nothing is buffered in memory
+when the stream is constructed. If the stream is piped to a writer,
+then it will synchronously `read()` and emit data into the writer as
+fast as the writer can consume it. (That is, it will respect
+backpressure.) If you call `stream.read()` then it will read the
+entire file and return the contents.
+
+When using `WriteStreamSync`, every write is flushed to the file
+synchronously. If your writes all come in a single tick, then it'll
+write it all out in a single tick. It's as synchronous as you are.
+
+The async versions work much like their node builtin counterparts,
+with the exception of introducing significantly less Stream machinery
+overhead.
+
+## USAGE
+
+It's just streams, you pipe them or read() them or write() to them.
+
+```js
+const fsm = require('fs-minipass')
+const readStream = new fsm.ReadStream('file.txt')
+const writeStream = new fsm.WriteStream('output.txt')
+writeStream.write('some file header or whatever\n')
+readStream.pipe(writeStream)
+```
+
+## ReadStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `readSize` The size of reads to do, defaults to 16MB
+- `size` The size of the file, if known. Prevents zero-byte read()
+ call at the end.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the file is done being read.
+
+## WriteStream(path, options)
+
+Path string is required, but somewhat irrelevant if an open file
+descriptor is passed in as an option.
+
+Options:
+
+- `fd` Pass in a numeric file descriptor, if the file is already open.
+- `mode` The mode to create the file with. Defaults to `0o666`.
+- `start` The position in the file to start reading. If not
+ specified, then the file will start writing at position zero, and be
+ truncated by default.
+- `autoClose` Set to `false` to prevent the file descriptor from being
+ closed when the stream is ended.
+- `flags` Flags to use when opening the file. Irrelevant if `fd` is
+ passed in, since file won't be opened in that case. Defaults to
+ `'a'` if a `pos` is specified, or `'w'` otherwise.
diff --git a/node_modules/pacote/node_modules/fs-minipass/index.js b/node_modules/pacote/node_modules/fs-minipass/index.js
new file mode 100644
index 000000000..cd585a83c
--- /dev/null
+++ b/node_modules/pacote/node_modules/fs-minipass/index.js
@@ -0,0 +1,387 @@
+'use strict'
+const MiniPass = require('minipass')
+const EE = require('events').EventEmitter
+const fs = require('fs')
+
+// for writev
+const binding = process.binding('fs')
+const writeBuffers = binding.writeBuffers
+/* istanbul ignore next */
+const FSReqWrap = binding.FSReqWrap || binding.FSReqCallback
+
+const _autoClose = Symbol('_autoClose')
+const _close = Symbol('_close')
+const _ended = Symbol('_ended')
+const _fd = Symbol('_fd')
+const _finished = Symbol('_finished')
+const _flags = Symbol('_flags')
+const _flush = Symbol('_flush')
+const _handleChunk = Symbol('_handleChunk')
+const _makeBuf = Symbol('_makeBuf')
+const _mode = Symbol('_mode')
+const _needDrain = Symbol('_needDrain')
+const _onerror = Symbol('_onerror')
+const _onopen = Symbol('_onopen')
+const _onread = Symbol('_onread')
+const _onwrite = Symbol('_onwrite')
+const _open = Symbol('_open')
+const _path = Symbol('_path')
+const _pos = Symbol('_pos')
+const _queue = Symbol('_queue')
+const _read = Symbol('_read')
+const _readSize = Symbol('_readSize')
+const _reading = Symbol('_reading')
+const _remain = Symbol('_remain')
+const _size = Symbol('_size')
+const _write = Symbol('_write')
+const _writing = Symbol('_writing')
+const _defaultFlag = Symbol('_defaultFlag')
+
+class ReadStream extends MiniPass {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+
+ this.writable = false
+
+ if (typeof path !== 'string')
+ throw new TypeError('path must be a string')
+
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_path] = path
+ this[_readSize] = opt.readSize || 16*1024*1024
+ this[_reading] = false
+ this[_size] = typeof opt.size === 'number' ? opt.size : Infinity
+ this[_remain] = this[_size]
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ else
+ this[_open]()
+ }
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ write () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ end () {
+ throw new TypeError('this is a readable stream')
+ }
+
+ [_open] () {
+ fs.open(this[_path], 'r', (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_read]()
+ }
+ }
+
+ [_makeBuf] () {
+ return Buffer.allocUnsafe(Math.min(this[_readSize], this[_remain]))
+ }
+
+ [_read] () {
+ if (!this[_reading]) {
+ this[_reading] = true
+ const buf = this[_makeBuf]()
+ /* istanbul ignore if */
+ if (buf.length === 0) return process.nextTick(() => this[_onread](null, 0, buf))
+ fs.read(this[_fd], buf, 0, buf.length, null, (er, br, buf) =>
+ this[_onread](er, br, buf))
+ }
+ }
+
+ [_onread] (er, br, buf) {
+ this[_reading] = false
+ if (er)
+ this[_onerror](er)
+ else if (this[_handleChunk](br, buf))
+ this[_read]()
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ fs.close(this[_fd], _ => this.emit('close'))
+ this[_fd] = null
+ }
+ }
+
+ [_onerror] (er) {
+ this[_reading] = true
+ this[_close]()
+ this.emit('error', er)
+ }
+
+ [_handleChunk] (br, buf) {
+ let ret = false
+ // no effect if infinite
+ this[_remain] -= br
+ if (br > 0)
+ ret = super.write(br < buf.length ? buf.slice(0, br) : buf)
+
+ if (br === 0 || this[_remain] <= 0) {
+ ret = false
+ this[_close]()
+ super.end()
+ }
+
+ return ret
+ }
+
+ emit (ev, data) {
+ switch (ev) {
+ case 'prefinish':
+ case 'finish':
+ break
+
+ case 'drain':
+ if (typeof this[_fd] === 'number')
+ this[_read]()
+ break
+
+ default:
+ return super.emit(ev, data)
+ }
+ }
+}
+
+class ReadStreamSync extends ReadStream {
+ [_open] () {
+ let threw = true
+ try {
+ this[_onopen](null, fs.openSync(this[_path], 'r'))
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_read] () {
+ let threw = true
+ try {
+ if (!this[_reading]) {
+ this[_reading] = true
+ do {
+ const buf = this[_makeBuf]()
+ /* istanbul ignore next */
+ const br = buf.length === 0 ? 0 : fs.readSync(this[_fd], buf, 0, buf.length, null)
+ if (!this[_handleChunk](br, buf))
+ break
+ } while (true)
+ this[_reading] = false
+ }
+ threw = false
+ } finally {
+ if (threw)
+ this[_close]()
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ try {
+ fs.closeSync(this[_fd])
+ } catch (er) {}
+ this[_fd] = null
+ this.emit('close')
+ }
+ }
+}
+
+class WriteStream extends EE {
+ constructor (path, opt) {
+ opt = opt || {}
+ super(opt)
+ this.readable = false
+ this[_writing] = false
+ this[_ended] = false
+ this[_needDrain] = false
+ this[_queue] = []
+ this[_path] = path
+ this[_fd] = typeof opt.fd === 'number' ? opt.fd : null
+ this[_mode] = opt.mode === undefined ? 0o666 : opt.mode
+ this[_pos] = typeof opt.start === 'number' ? opt.start : null
+ this[_autoClose] = typeof opt.autoClose === 'boolean' ?
+ opt.autoClose : true
+
+ // truncating makes no sense when writing into the middle
+ const defaultFlag = this[_pos] !== null ? 'r+' : 'w'
+ this[_defaultFlag] = opt.flags === undefined
+ this[_flags] = this[_defaultFlag] ? defaultFlag : opt.flags
+
+ if (this[_fd] === null)
+ this[_open]()
+ }
+
+ get fd () { return this[_fd] }
+ get path () { return this[_path] }
+
+ [_onerror] (er) {
+ this[_close]()
+ this[_writing] = true
+ this.emit('error', er)
+ }
+
+ [_open] () {
+ fs.open(this[_path], this[_flags], this[_mode],
+ (er, fd) => this[_onopen](er, fd))
+ }
+
+ [_onopen] (er, fd) {
+ if (this[_defaultFlag] &&
+ this[_flags] === 'r+' &&
+ er && er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ this[_open]()
+ } else if (er)
+ this[_onerror](er)
+ else {
+ this[_fd] = fd
+ this.emit('open', fd)
+ this[_flush]()
+ }
+ }
+
+ end (buf, enc) {
+ if (buf)
+ this.write(buf, enc)
+
+ this[_ended] = true
+
+ // synthetic after-write logic, where drain/finish live
+ if (!this[_writing] && !this[_queue].length &&
+ typeof this[_fd] === 'number')
+ this[_onwrite](null, 0)
+ }
+
+ write (buf, enc) {
+ if (typeof buf === 'string')
+ buf = new Buffer(buf, enc)
+
+ if (this[_ended]) {
+ this.emit('error', new Error('write() after end()'))
+ return false
+ }
+
+ if (this[_fd] === null || this[_writing] || this[_queue].length) {
+ this[_queue].push(buf)
+ this[_needDrain] = true
+ return false
+ }
+
+ this[_writing] = true
+ this[_write](buf)
+ return true
+ }
+
+ [_write] (buf) {
+ fs.write(this[_fd], buf, 0, buf.length, this[_pos], (er, bw) =>
+ this[_onwrite](er, bw))
+ }
+
+ [_onwrite] (er, bw) {
+ if (er)
+ this[_onerror](er)
+ else {
+ if (this[_pos] !== null)
+ this[_pos] += bw
+ if (this[_queue].length)
+ this[_flush]()
+ else {
+ this[_writing] = false
+
+ if (this[_ended] && !this[_finished]) {
+ this[_finished] = true
+ this[_close]()
+ this.emit('finish')
+ } else if (this[_needDrain]) {
+ this[_needDrain] = false
+ this.emit('drain')
+ }
+ }
+ }
+ }
+
+ [_flush] () {
+ if (this[_queue].length === 0) {
+ if (this[_ended])
+ this[_onwrite](null, 0)
+ } else if (this[_queue].length === 1)
+ this[_write](this[_queue].pop())
+ else {
+ const iovec = this[_queue]
+ this[_queue] = []
+ writev(this[_fd], iovec, this[_pos],
+ (er, bw) => this[_onwrite](er, bw))
+ }
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ fs.close(this[_fd], _ => this.emit('close'))
+ this[_fd] = null
+ }
+ }
+}
+
+class WriteStreamSync extends WriteStream {
+ [_open] () {
+ let fd
+ try {
+ fd = fs.openSync(this[_path], this[_flags], this[_mode])
+ } catch (er) {
+ if (this[_defaultFlag] &&
+ this[_flags] === 'r+' &&
+ er && er.code === 'ENOENT') {
+ this[_flags] = 'w'
+ return this[_open]()
+ } else
+ throw er
+ }
+ this[_onopen](null, fd)
+ }
+
+ [_close] () {
+ if (this[_autoClose] && typeof this[_fd] === 'number') {
+ try {
+ fs.closeSync(this[_fd])
+ } catch (er) {}
+ this[_fd] = null
+ this.emit('close')
+ }
+ }
+
+ [_write] (buf) {
+ try {
+ this[_onwrite](null,
+ fs.writeSync(this[_fd], buf, 0, buf.length, this[_pos]))
+ } catch (er) {
+ this[_onwrite](er, 0)
+ }
+ }
+}
+
+const writev = (fd, iovec, pos, cb) => {
+ const done = (er, bw) => cb(er, bw, iovec)
+ const req = new FSReqWrap()
+ req.oncomplete = done
+ binding.writeBuffers(fd, iovec, pos, req)
+}
+
+exports.ReadStream = ReadStream
+exports.ReadStreamSync = ReadStreamSync
+
+exports.WriteStream = WriteStream
+exports.WriteStreamSync = WriteStreamSync
diff --git a/node_modules/pacote/node_modules/fs-minipass/package.json b/node_modules/pacote/node_modules/fs-minipass/package.json
new file mode 100644
index 000000000..8fbc81c81
--- /dev/null
+++ b/node_modules/pacote/node_modules/fs-minipass/package.json
@@ -0,0 +1,65 @@
+{
+ "_from": "fs-minipass@^1.2.5",
+ "_id": "fs-minipass@1.2.7",
+ "_inBundle": false,
+ "_integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
+ "_location": "/pacote/fs-minipass",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "fs-minipass@^1.2.5",
+ "name": "fs-minipass",
+ "escapedName": "fs-minipass",
+ "rawSpec": "^1.2.5",
+ "saveSpec": null,
+ "fetchSpec": "^1.2.5"
+ },
+ "_requiredBy": [
+ "/pacote/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
+ "_shasum": "ccff8570841e7fe4265693da88936c55aed7f7c7",
+ "_spec": "fs-minipass@^1.2.5",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/pacote/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/fs-minipass/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^2.6.0"
+ },
+ "deprecated": false,
+ "description": "fs read and write streams based on minipass",
+ "devDependencies": {
+ "mutate-fs": "^2.0.1",
+ "tap": "^14.6.4"
+ },
+ "files": [
+ "index.js"
+ ],
+ "homepage": "https://github.com/npm/fs-minipass#readme",
+ "keywords": [],
+ "license": "ISC",
+ "main": "index.js",
+ "name": "fs-minipass",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/fs-minipass.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true
+ },
+ "version": "1.2.7"
+}
diff --git a/node_modules/pacote/node_modules/minizlib/LICENSE b/node_modules/pacote/node_modules/minizlib/LICENSE
new file mode 100644
index 000000000..ffce7383f
--- /dev/null
+++ b/node_modules/pacote/node_modules/minizlib/LICENSE
@@ -0,0 +1,26 @@
+Minizlib was created by Isaac Z. Schlueter.
+It is a derivative work of the Node.js project.
+
+"""
+Copyright Isaac Z. Schlueter and Contributors
+Copyright Node.js contributors. All rights reserved.
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
diff --git a/node_modules/pacote/node_modules/minizlib/README.md b/node_modules/pacote/node_modules/minizlib/README.md
new file mode 100644
index 000000000..4097b8522
--- /dev/null
+++ b/node_modules/pacote/node_modules/minizlib/README.md
@@ -0,0 +1,53 @@
+# minizlib
+
+A fast zlib stream built on [minipass](http://npm.im/minipass) and
+Node.js's zlib binding.
+
+This module was created to serve the needs of
+[node-tar](http://npm.im/tar) and
+[minipass-fetch](http://npm.im/minipass-fetch).
+
+Brotli is supported in versions of node with a Brotli binding.
+
+## How does this differ from the streams in `require('zlib')`?
+
+First, there are no convenience methods to compress or decompress a
+buffer. If you want those, use the built-in `zlib` module. This is
+only streams. That being said, Minipass streams to make it fairly easy to
+use as one-liners: `new zlib.Deflate().end(data).read()` will return the
+deflate compressed result.
+
+This module compresses and decompresses the data as fast as you feed
+it in. It is synchronous, and runs on the main process thread. Zlib
+and Brotli operations can be high CPU, but they're very fast, and doing it
+this way means much less bookkeeping and artificial deferral.
+
+Node's built in zlib streams are built on top of `stream.Transform`.
+They do the maximally safe thing with respect to consistent
+asynchrony, buffering, and backpressure.
+
+See [Minipass](http://npm.im/minipass) for more on the differences between
+Node.js core streams and Minipass streams, and the convenience methods
+provided by that class.
+
+## Classes
+
+- Deflate
+- Inflate
+- Gzip
+- Gunzip
+- DeflateRaw
+- InflateRaw
+- Unzip
+- BrotliCompress (Node v10 and higher)
+- BrotliDecompress (Node v10 and higher)
+
+## USAGE
+
+```js
+const zlib = require('minizlib')
+const input = sourceOfCompressedData()
+const decode = new zlib.BrotliDecompress()
+const output = whereToWriteTheDecodedData()
+input.pipe(decode).pipe(output)
+```
diff --git a/node_modules/pacote/node_modules/minizlib/constants.js b/node_modules/pacote/node_modules/minizlib/constants.js
new file mode 100644
index 000000000..641ebc731
--- /dev/null
+++ b/node_modules/pacote/node_modules/minizlib/constants.js
@@ -0,0 +1,115 @@
+// Update with any zlib constants that are added or changed in the future.
+// Node v6 didn't export this, so we just hard code the version and rely
+// on all the other hard-coded values from zlib v4736. When node v6
+// support drops, we can just export the realZlibConstants object.
+const realZlibConstants = require('zlib').constants ||
+ /* istanbul ignore next */ { ZLIB_VERNUM: 4736 }
+
+module.exports = Object.freeze(Object.assign(Object.create(null), {
+ Z_NO_FLUSH: 0,
+ Z_PARTIAL_FLUSH: 1,
+ Z_SYNC_FLUSH: 2,
+ Z_FULL_FLUSH: 3,
+ Z_FINISH: 4,
+ Z_BLOCK: 5,
+ Z_OK: 0,
+ Z_STREAM_END: 1,
+ Z_NEED_DICT: 2,
+ Z_ERRNO: -1,
+ Z_STREAM_ERROR: -2,
+ Z_DATA_ERROR: -3,
+ Z_MEM_ERROR: -4,
+ Z_BUF_ERROR: -5,
+ Z_VERSION_ERROR: -6,
+ Z_NO_COMPRESSION: 0,
+ Z_BEST_SPEED: 1,
+ Z_BEST_COMPRESSION: 9,
+ Z_DEFAULT_COMPRESSION: -1,
+ Z_FILTERED: 1,
+ Z_HUFFMAN_ONLY: 2,
+ Z_RLE: 3,
+ Z_FIXED: 4,
+ Z_DEFAULT_STRATEGY: 0,
+ DEFLATE: 1,
+ INFLATE: 2,
+ GZIP: 3,
+ GUNZIP: 4,
+ DEFLATERAW: 5,
+ INFLATERAW: 6,
+ UNZIP: 7,
+ BROTLI_DECODE: 8,
+ BROTLI_ENCODE: 9,
+ Z_MIN_WINDOWBITS: 8,
+ Z_MAX_WINDOWBITS: 15,
+ Z_DEFAULT_WINDOWBITS: 15,
+ Z_MIN_CHUNK: 64,
+ Z_MAX_CHUNK: Infinity,
+ Z_DEFAULT_CHUNK: 16384,
+ Z_MIN_MEMLEVEL: 1,
+ Z_MAX_MEMLEVEL: 9,
+ Z_DEFAULT_MEMLEVEL: 8,
+ Z_MIN_LEVEL: -1,
+ Z_MAX_LEVEL: 9,
+ Z_DEFAULT_LEVEL: -1,
+ BROTLI_OPERATION_PROCESS: 0,
+ BROTLI_OPERATION_FLUSH: 1,
+ BROTLI_OPERATION_FINISH: 2,
+ BROTLI_OPERATION_EMIT_METADATA: 3,
+ BROTLI_MODE_GENERIC: 0,
+ BROTLI_MODE_TEXT: 1,
+ BROTLI_MODE_FONT: 2,
+ BROTLI_DEFAULT_MODE: 0,
+ BROTLI_MIN_QUALITY: 0,
+ BROTLI_MAX_QUALITY: 11,
+ BROTLI_DEFAULT_QUALITY: 11,
+ BROTLI_MIN_WINDOW_BITS: 10,
+ BROTLI_MAX_WINDOW_BITS: 24,
+ BROTLI_LARGE_MAX_WINDOW_BITS: 30,
+ BROTLI_DEFAULT_WINDOW: 22,
+ BROTLI_MIN_INPUT_BLOCK_BITS: 16,
+ BROTLI_MAX_INPUT_BLOCK_BITS: 24,
+ BROTLI_PARAM_MODE: 0,
+ BROTLI_PARAM_QUALITY: 1,
+ BROTLI_PARAM_LGWIN: 2,
+ BROTLI_PARAM_LGBLOCK: 3,
+ BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4,
+ BROTLI_PARAM_SIZE_HINT: 5,
+ BROTLI_PARAM_LARGE_WINDOW: 6,
+ BROTLI_PARAM_NPOSTFIX: 7,
+ BROTLI_PARAM_NDIRECT: 8,
+ BROTLI_DECODER_RESULT_ERROR: 0,
+ BROTLI_DECODER_RESULT_SUCCESS: 1,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0,
+ BROTLI_DECODER_PARAM_LARGE_WINDOW: 1,
+ BROTLI_DECODER_NO_ERROR: 0,
+ BROTLI_DECODER_SUCCESS: 1,
+ BROTLI_DECODER_NEEDS_MORE_INPUT: 2,
+ BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1,
+ BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2,
+ BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4,
+ BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5,
+ BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6,
+ BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7,
+ BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9,
+ BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10,
+ BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11,
+ BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12,
+ BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14,
+ BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15,
+ BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16,
+ BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19,
+ BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21,
+ BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22,
+ BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26,
+ BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27,
+ BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30,
+ BROTLI_DECODER_ERROR_UNREACHABLE: -31,
+}, realZlibConstants))
diff --git a/node_modules/pacote/node_modules/minizlib/index.js b/node_modules/pacote/node_modules/minizlib/index.js
new file mode 100644
index 000000000..295047b9c
--- /dev/null
+++ b/node_modules/pacote/node_modules/minizlib/index.js
@@ -0,0 +1,320 @@
+'use strict'
+
+const assert = require('assert')
+const Buffer = require('buffer').Buffer
+const realZlib = require('zlib')
+
+const constants = exports.constants = require('./constants.js')
+const Minipass = require('minipass')
+
+const OriginalBufferConcat = Buffer.concat
+
+class ZlibError extends Error {
+ constructor (err) {
+ super('zlib: ' + err.message)
+ this.code = err.code
+ this.errno = err.errno
+ /* istanbul ignore if */
+ if (!this.code)
+ this.code = 'ZLIB_ERROR'
+
+ this.message = 'zlib: ' + err.message
+ Error.captureStackTrace(this, this.constructor)
+ }
+
+ get name () {
+ return 'ZlibError'
+ }
+}
+
+// the Zlib class they all inherit from
+// This thing manages the queue of requests, and returns
+// true or false if there is anything in the queue when
+// you call the .write() method.
+const _opts = Symbol('opts')
+const _flushFlag = Symbol('flushFlag')
+const _finishFlushFlag = Symbol('finishFlushFlag')
+const _fullFlushFlag = Symbol('fullFlushFlag')
+const _handle = Symbol('handle')
+const _onError = Symbol('onError')
+const _sawError = Symbol('sawError')
+const _level = Symbol('level')
+const _strategy = Symbol('strategy')
+const _ended = Symbol('ended')
+const _defaultFullFlush = Symbol('_defaultFullFlush')
+
+class ZlibBase extends Minipass {
+ constructor (opts, mode) {
+ if (!opts || typeof opts !== 'object')
+ throw new TypeError('invalid options for ZlibBase constructor')
+
+ super(opts)
+ this[_ended] = false
+ this[_opts] = opts
+
+ this[_flushFlag] = opts.flush
+ this[_finishFlushFlag] = opts.finishFlush
+ // this will throw if any options are invalid for the class selected
+ try {
+ this[_handle] = new realZlib[mode](opts)
+ } catch (er) {
+ // make sure that all errors get decorated properly
+ throw new ZlibError(er)
+ }
+
+ this[_onError] = (err) => {
+ this[_sawError] = true
+ // there is no way to cleanly recover.
+ // continuing only obscures problems.
+ this.close()
+ this.emit('error', err)
+ }
+
+ this[_handle].on('error', er => this[_onError](new ZlibError(er)))
+ this.once('end', () => this.close)
+ }
+
+ close () {
+ if (this[_handle]) {
+ this[_handle].close()
+ this[_handle] = null
+ this.emit('close')
+ }
+ }
+
+ reset () {
+ if (!this[_sawError]) {
+ assert(this[_handle], 'zlib binding closed')
+ return this[_handle].reset()
+ }
+ }
+
+ flush (flushFlag) {
+ if (this.ended)
+ return
+
+ if (typeof flushFlag !== 'number')
+ flushFlag = this[_fullFlushFlag]
+ this.write(Object.assign(Buffer.alloc(0), { [_flushFlag]: flushFlag }))
+ }
+
+ end (chunk, encoding, cb) {
+ if (chunk)
+ this.write(chunk, encoding)
+ this.flush(this[_finishFlushFlag])
+ this[_ended] = true
+ return super.end(null, null, cb)
+ }
+
+ get ended () {
+ return this[_ended]
+ }
+
+ write (chunk, encoding, cb) {
+ // process the chunk using the sync process
+ // then super.write() all the outputted chunks
+ if (typeof encoding === 'function')
+ cb = encoding, encoding = 'utf8'
+
+ if (typeof chunk === 'string')
+ chunk = Buffer.from(chunk, encoding)
+
+ if (this[_sawError])
+ return
+ assert(this[_handle], 'zlib binding closed')
+
+ // _processChunk tries to .close() the native handle after it's done, so we
+ // intercept that by temporarily making it a no-op.
+ const nativeHandle = this[_handle]._handle
+ const originalNativeClose = nativeHandle.close
+ nativeHandle.close = () => {}
+ const originalClose = this[_handle].close
+ this[_handle].close = () => {}
+ // It also calls `Buffer.concat()` at the end, which may be convenient
+ // for some, but which we are not interested in as it slows us down.
+ Buffer.concat = (args) => args
+ let result
+ try {
+ const flushFlag = typeof chunk[_flushFlag] === 'number'
+ ? chunk[_flushFlag] : this[_flushFlag]
+ result = this[_handle]._processChunk(chunk, flushFlag)
+ // if we don't throw, reset it back how it was
+ Buffer.concat = OriginalBufferConcat
+ } catch (err) {
+ // or if we do, put Buffer.concat() back before we emit error
+ // Error events call into user code, which may call Buffer.concat()
+ Buffer.concat = OriginalBufferConcat
+ this[_onError](new ZlibError(err))
+ } finally {
+ if (this[_handle]) {
+ // Core zlib resets `_handle` to null after attempting to close the
+ // native handle. Our no-op handler prevented actual closure, but we
+ // need to restore the `._handle` property.
+ this[_handle]._handle = nativeHandle
+ nativeHandle.close = originalNativeClose
+ this[_handle].close = originalClose
+ // `_processChunk()` adds an 'error' listener. If we don't remove it
+ // after each call, these handlers start piling up.
+ this[_handle].removeAllListeners('error')
+ }
+ }
+
+ let writeReturn
+ if (result) {
+ if (Array.isArray(result) && result.length > 0) {
+ // The first buffer is always `handle._outBuffer`, which would be
+ // re-used for later invocations; so, we always have to copy that one.
+ writeReturn = super.write(Buffer.from(result[0]))
+ for (let i = 1; i < result.length; i++) {
+ writeReturn = super.write(result[i])
+ }
+ } else {
+ writeReturn = super.write(Buffer.from(result))
+ }
+ }
+
+ if (cb)
+ cb()
+ return writeReturn
+ }
+}
+
+class Zlib extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.Z_NO_FLUSH
+ opts.finishFlush = opts.finishFlush || constants.Z_FINISH
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.Z_FULL_FLUSH
+ this[_level] = opts.level
+ this[_strategy] = opts.strategy
+ }
+
+ params (level, strategy) {
+ if (this[_sawError])
+ return
+
+ if (!this[_handle])
+ throw new Error('cannot switch params when binding is closed')
+
+ // no way to test this without also not supporting params at all
+ /* istanbul ignore if */
+ if (!this[_handle].params)
+ throw new Error('not supported in this implementation')
+
+ if (this[_level] !== level || this[_strategy] !== strategy) {
+ this.flush(constants.Z_SYNC_FLUSH)
+ assert(this[_handle], 'zlib binding closed')
+ // .params() calls .flush(), but the latter is always async in the
+ // core zlib. We override .flush() temporarily to intercept that and
+ // flush synchronously.
+ const origFlush = this[_handle].flush
+ this[_handle].flush = (flushFlag, cb) => {
+ this.flush(flushFlag)
+ cb()
+ }
+ try {
+ this[_handle].params(level, strategy)
+ } finally {
+ this[_handle].flush = origFlush
+ }
+ /* istanbul ignore else */
+ if (this[_handle]) {
+ this[_level] = level
+ this[_strategy] = strategy
+ }
+ }
+ }
+}
+
+// minimal 2-byte header
+class Deflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Deflate')
+ }
+}
+
+class Inflate extends Zlib {
+ constructor (opts) {
+ super(opts, 'Inflate')
+ }
+}
+
+// gzip - bigger header, same deflate compression
+class Gzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gzip')
+ }
+}
+
+class Gunzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Gunzip')
+ }
+}
+
+// raw - no header
+class DeflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'DeflateRaw')
+ }
+}
+
+class InflateRaw extends Zlib {
+ constructor (opts) {
+ super(opts, 'InflateRaw')
+ }
+}
+
+// auto-detect header.
+class Unzip extends Zlib {
+ constructor (opts) {
+ super(opts, 'Unzip')
+ }
+}
+
+class Brotli extends ZlibBase {
+ constructor (opts, mode) {
+ opts = opts || {}
+
+ opts.flush = opts.flush || constants.BROTLI_OPERATION_PROCESS
+ opts.finishFlush = opts.finishFlush || constants.BROTLI_OPERATION_FINISH
+
+ super(opts, mode)
+
+ this[_fullFlushFlag] = constants.BROTLI_OPERATION_FLUSH
+ }
+}
+
+class BrotliCompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliCompress')
+ }
+}
+
+class BrotliDecompress extends Brotli {
+ constructor (opts) {
+ super(opts, 'BrotliDecompress')
+ }
+}
+
+exports.Deflate = Deflate
+exports.Inflate = Inflate
+exports.Gzip = Gzip
+exports.Gunzip = Gunzip
+exports.DeflateRaw = DeflateRaw
+exports.InflateRaw = InflateRaw
+exports.Unzip = Unzip
+/* istanbul ignore else */
+if (typeof realZlib.BrotliCompress === 'function') {
+ exports.BrotliCompress = BrotliCompress
+ exports.BrotliDecompress = BrotliDecompress
+} else {
+ exports.BrotliCompress = exports.BrotliDecompress = class {
+ constructor () {
+ throw new Error('Brotli is not supported in this version of Node.js')
+ }
+ }
+}
diff --git a/node_modules/pacote/node_modules/minizlib/package.json b/node_modules/pacote/node_modules/minizlib/package.json
new file mode 100644
index 000000000..354447bd1
--- /dev/null
+++ b/node_modules/pacote/node_modules/minizlib/package.json
@@ -0,0 +1,71 @@
+{
+ "_from": "minizlib@^1.2.1",
+ "_id": "minizlib@1.3.3",
+ "_inBundle": false,
+ "_integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
+ "_location": "/pacote/minizlib",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "minizlib@^1.2.1",
+ "name": "minizlib",
+ "escapedName": "minizlib",
+ "rawSpec": "^1.2.1",
+ "saveSpec": null,
+ "fetchSpec": "^1.2.1"
+ },
+ "_requiredBy": [
+ "/pacote/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
+ "_shasum": "2290de96818a34c29551c8a8d301216bd65a861d",
+ "_spec": "minizlib@^1.2.1",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/pacote/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/minizlib/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "minipass": "^2.9.0"
+ },
+ "deprecated": false,
+ "description": "A small fast zlib stream built on [minipass](http://npm.im/minipass) and Node.js's zlib binding.",
+ "devDependencies": {
+ "tap": "^12.0.1"
+ },
+ "files": [
+ "index.js",
+ "constants.js"
+ ],
+ "homepage": "https://github.com/isaacs/minizlib#readme",
+ "keywords": [
+ "zlib",
+ "gzip",
+ "gunzip",
+ "deflate",
+ "inflate",
+ "compression",
+ "zip",
+ "unzip"
+ ],
+ "license": "MIT",
+ "main": "index.js",
+ "name": "minizlib",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/minizlib.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100 -J"
+ },
+ "version": "1.3.3"
+}
diff --git a/node_modules/pacote/node_modules/tar/LICENSE b/node_modules/pacote/node_modules/tar/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/pacote/node_modules/tar/README.md b/node_modules/pacote/node_modules/tar/README.md
new file mode 100644
index 000000000..034e4865c
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/README.md
@@ -0,0 +1,954 @@
+# node-tar
+
+[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar)
+
+[Fast](./benchmarks) and full-featured Tar for Node.js
+
+The API is designed to mimic the behavior of `tar(1)` on unix systems.
+If you are familiar with how tar works, most of this will hopefully be
+straightforward for you. If not, then hopefully this module can teach
+you useful unix skills that may come in handy someday :)
+
+## Background
+
+A "tar file" or "tarball" is an archive of file system entries
+(directories, files, links, etc.) The name comes from "tape archive".
+If you run `man tar` on almost any Unix command line, you'll learn
+quite a bit about what it can do, and its history.
+
+Tar has 5 main top-level commands:
+
+* `c` Create an archive
+* `r` Replace entries within an archive
+* `u` Update entries within an archive (ie, replace if they're newer)
+* `t` List out the contents of an archive
+* `x` Extract an archive to disk
+
+The other flags and options modify how this top level function works.
+
+## High-Level API
+
+These 5 functions are the high-level API. All of them have a
+single-character name (for unix nerds familiar with `tar(1)`) as well
+as a long name (for everyone else).
+
+All the high-level functions take the following arguments, all three
+of which are optional and may be omitted.
+
+1. `options` - An optional object specifying various options
+2. `paths` - An array of paths to add or extract
+3. `callback` - Called when the command is completed, if async. (If
+ sync or no file specified, providing a callback throws a
+ `TypeError`.)
+
+If the command is sync (ie, if `options.sync=true`), then the
+callback is not allowed, since the action will be completed immediately.
+
+If a `file` argument is specified, and the command is async, then a
+`Promise` is returned. In this case, if async, a callback may be
+provided which is called when the command is completed.
+
+If a `file` option is not specified, then a stream is returned. For
+`create`, this is a readable stream of the generated archive. For
+`list` and `extract` this is a writable stream that an archive should
+be written into. If a file is not specified, then a callback is not
+allowed, because you're already getting a stream to work with.
+
+`replace` and `update` only work on existing archives, and so require
+a `file` argument.
+
+Sync commands without a file argument return a stream that acts on its
+input immediately in the same tick. For readable streams, this means
+that all of the data is immediately available by calling
+`stream.read()`. For writable streams, it will be acted upon as soon
+as it is provided, but this can be at any time.
+
+### Warnings
+
+Some things cause tar to emit a warning, but should usually not cause
+the entire operation to fail. There are three ways to handle
+warnings:
+
+1. **Ignore them** (default) Invalid entries won't be put in the
+ archive, and invalid entries won't be unpacked. This is usually
+ fine, but can hide failures that you might care about.
+2. **Notice them** Add an `onwarn` function to the options, or listen
+ to the `'warn'` event on any tar stream. The function will get
+ called as `onwarn(message, data)`. Handle as appropriate.
+3. **Explode them.** Set `strict: true` in the options object, and
+ `warn` messages will be emitted as `'error'` events instead. If
+ there's no `error` handler, this causes the program to crash. If
+ used with a promise-returning/callback-taking method, then it'll
+ send the error to the promise/callback.
+
+### Examples
+
+The API mimics the `tar(1)` command line functionality, with aliases
+for more human-readable option and function names. The goal is that
+if you know how to use `tar(1)` in Unix, then you know how to use
+`require('tar')` in JavaScript.
+
+To replicate `tar czf my-tarball.tgz files and folders`, you'd do:
+
+```js
+tar.c(
+ {
+ gzip: <true|gzip options>,
+ file: 'my-tarball.tgz'
+ },
+ ['some', 'files', 'and', 'folders']
+).then(_ => { .. tarball has been created .. })
+```
+
+To replicate `tar cz files and folders > my-tarball.tgz`, you'd do:
+
+```js
+tar.c( // or tar.create
+ {
+ gzip: <true|gzip options>
+ },
+ ['some', 'files', 'and', 'folders']
+).pipe(fs.createWriteStream('my-tarball.tgz'))
+```
+
+To replicate `tar xf my-tarball.tgz` you'd do:
+
+```js
+tar.x( // or tar.extract(
+ {
+ file: 'my-tarball.tgz'
+ }
+).then(_=> { .. tarball has been dumped in cwd .. })
+```
+
+To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`:
+
+```js
+fs.createReadStream('my-tarball.tgz').pipe(
+ tar.x({
+ strip: 1,
+ C: 'some-dir' // alias for cwd:'some-dir', also ok
+ })
+)
+```
+
+To replicate `tar tf my-tarball.tgz`, do this:
+
+```js
+tar.t({
+ file: 'my-tarball.tgz',
+ onentry: entry => { .. do whatever with it .. }
+})
+```
+
+To replicate `cat my-tarball.tgz | tar t` do:
+
+```js
+fs.createReadStream('my-tarball.tgz')
+ .pipe(tar.t())
+ .on('entry', entry => { .. do whatever with it .. })
+```
+
+To do anything synchronous, add `sync: true` to the options. Note
+that sync functions don't take a callback and don't return a promise.
+When the function returns, it's already done. Sync methods without a
+file argument return a sync stream, which flushes immediately. But,
+of course, it still won't be done until you `.end()` it.
+
+To filter entries, add `filter: <function>` to the options.
+Tar-creating methods call the filter with `filter(path, stat)`.
+Tar-reading methods (including extraction) call the filter with
+`filter(path, entry)`. The filter is called in the `this`-context of
+the `Pack` or `Unpack` stream object.
+
+The arguments list to `tar t` and `tar x` specify a list of filenames
+to extract or list, so they're equivalent to a filter that tests if
+the file is in the list.
+
+For those who _aren't_ fans of tar's single-character command names:
+
+```
+tar.c === tar.create
+tar.r === tar.replace (appends to archive, file is required)
+tar.u === tar.update (appends if newer, file is required)
+tar.x === tar.extract
+tar.t === tar.list
+```
+
+Keep reading for all the command descriptions and options, as well as
+the low-level API that they are built on.
+
+### tar.c(options, fileList, callback) [alias: tar.create]
+
+Create a tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Write the tarball archive to the specified filename. If this
+ is specified, then the callback will be fired when the file has been
+ written, and a promise will be returned that resolves when the file
+ is written. If a filename is not specified, then a Readable Stream
+ will be returned which will emit the file data. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`. If this is set,
+ and a file is not provided, then the resulting stream will already
+ have the data ready to `read` or `emit('data')` as soon as you
+ request it.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `mode` The mode to set on the created file archive
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+
+### tar.x(options, fileList, callback) [alias: tar.extract]
+
+Extract a tarball archive.
+
+The `fileList` is an array of paths to extract from the tarball. If
+no paths are provided, then all the entries are extracted.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+Most extraction errors will cause a `warn` event to be emitted. If
+the `cwd` is missing, or not a directory, then the extraction will
+fail completely.
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory. [Alias: `C`]
+- `file` The archive file to extract. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Create files and directories synchronously.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive. [Alias: `keep-newer`,
+ `keep-newer-files`]
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies. [Alias: `k`, `keep-existing`]
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+ [Alias: `P`]
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file. [Alias:
+ `U`]
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks. [Alias: `strip-components`, `stripComponents`]
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+ [Alias: `p`]
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries. [Alias: `m`, `no-mtime`]
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+
+The following options are mostly internal, but can be modified in some
+advanced use cases, such as re-using caches between runs.
+
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync extractions.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### tar.t(options, fileList, callback) [alias: tar.list]
+
+List the contents of a tarball archive.
+
+The `fileList` is an array of paths to list from the tarball. If
+no paths are provided, then all the entries are listed.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'`
+events. (If you want to get actual readable entries, use the
+`tar.Parse` class instead.)
+
+The following options are supported:
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. [Alias: `C`]
+- `file` The archive file to list. If not specified, then a
+ Writable stream is returned where the archive data should be
+ written. [Alias: `f`]
+- `sync` Read the specified file synchronously. (This has no effect
+ when a file option isn't specified, because entries are emitted as
+ fast as they are parsed from the stream anyway.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter. This is important for when both `file` and
+ `sync` are set, because it will be called synchronously.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noResume` By default, `entry` streams are resumed immediately after
+ the call to `onentry`. Set `noResume: true` to suppress this
+ behavior. Note that by opting into this, the stream will never
+ complete until the entry data is consumed.
+
+### tar.u(options, fileList, callback) [alias: tar.update]
+
+Add files to an archive if they are newer than the entry already in
+the tarball archive.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+### tar.r(options, fileList, callback) [alias: tar.replace]
+
+Add files to an existing archive. Because later entries override
+earlier entries, this effectively replaces any existing entries.
+
+The `fileList` is an array of paths to add to the tarball. Adding a
+directory also adds its children recursively.
+
+An entry in `fileList` that starts with an `@` symbol is a tar archive
+whose entries will be added. To add a file that starts with `@`,
+prepend it with `./`.
+
+The following options are supported:
+
+- `file` Required. Write the tarball archive to the specified
+ filename. [Alias: `f`]
+- `sync` Act synchronously. If this is set, then any provided file
+ will be fully written after the call to `tar.c`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for adding entries to the
+ archive. Defaults to `process.cwd()`. [Alias: `C`]
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()` [Alias: `z`]
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths. [Alias: `P`]
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories. [Alias: `n`]
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such. [Alias: `L`, `h`]
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+ [Alias: `m`, `no-mtime`]
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+
+## Low-Level API
+
+### class tar.Pack
+
+A readable tar stream.
+
+Has all the standard readable stream interface stuff. `'data'` and
+`'end'` events, `read()` method, `pause()` and `resume()`, etc.
+
+#### constructor(options)
+
+The following options are supported:
+
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `prefix` A path portion to prefix onto the entries in the archive.
+- `gzip` Set to any truthy value to create a gzipped archive, or an
+ object with settings for `zlib.Gzip()`
+- `filter` A function that gets called with `(path, stat)` for each
+ entry being added. Return `true` to add the entry to the archive,
+ or `false` to omit it.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `readdirCache` A Map object that caches calls to `readdir`.
+- `jobs` A number specifying how many concurrent jobs to run.
+ Defaults to 4.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 16 MB.
+- `noDirRecurse` Do not recursively archive the contents of
+ directories.
+- `follow` Set to true to pack the targets of symbolic links. Without
+ this option, symbolic links are archived as such.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `mtime` Set to a `Date` object to force a specific `mtime` for
+ everything added to the archive. Overridden by `noMtime`.
+
+#### add(path)
+
+Adds an entry to the archive. Returns the Pack stream.
+
+#### write(path)
+
+Adds an entry to the archive. Returns true if flushed.
+
+#### end()
+
+Finishes the archive.
+
+### class tar.Pack.Sync
+
+Synchronous version of `tar.Pack`.
+
+### class tar.Unpack
+
+A writable stream that unpacks a tar archive onto the file system.
+
+All the normal writable stream stuff is supported. `write()` and
+`end()` methods, `'drain'` events, etc.
+
+Note that all directories that are created will be forced to be
+writable, readable, and listable by their owner, to avoid cases where
+a directory prevents extraction of child entries by virtue of its
+mode.
+
+`'close'` is emitted when it's done writing stuff to the file system.
+
+Most unpack errors will cause a `warn` event to be emitted. If the
+`cwd` is missing, or not a directory, then an error will be emitted.
+
+#### constructor(options)
+
+- `cwd` Extract files relative to the specified directory. Defaults
+ to `process.cwd()`. If provided, this must exist and must be a
+ directory.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being unpacked. Return `true` to unpack the entry from the
+ archive, or `false` to skip it.
+- `newer` Set to true to keep the existing file on disk if it's newer
+ than the file in the archive.
+- `keep` Do not overwrite existing files. In particular, if a file
+ appears more than once in an archive, later copies will not
+ overwrite earlier copies.
+- `preservePaths` Allow absolute paths, paths containing `..`, and
+ extracting through symbolic links. By default, `/` is stripped from
+ absolute paths, `..` paths are not extracted, and any file whose
+ location would be modified by a symbolic link is not extracted.
+- `unlink` Unlink files before creating them. Without this option,
+ tar overwrites existing files, which preserves existing hardlinks.
+ With this option, existing hardlinks will be broken, as will any
+ symlink that would affect the location of an extracted file.
+- `strip` Remove the specified number of leading path elements.
+ Pathnames with fewer elements will be silently skipped. Note that
+ the pathname is edited after applying the filter, but before
+ security checks.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `umask` Filter the modes of entries like `process.umask()`.
+- `dmode` Default mode for directories
+- `fmode` Default mode for files
+- `dirCache` A Map object of which directories exist.
+- `maxMetaEntrySize` The maximum size of meta entries that is
+ supported. Defaults to 1 MB.
+- `preserveOwner` If true, tar will set the `uid` and `gid` of
+ extracted entries to the `uid` and `gid` fields in the archive.
+ This defaults to true when run as root, and false otherwise. If
+ false, then files and directories will be set with the owner and
+ group of the user running the process. This is similar to `-p` in
+ `tar(1)`, but ACLs and other system-specific data is never unpacked
+ in this implementation, and modes are set by default already.
+- `win32` True if on a windows platform. Causes behavior where
+ filenames containing `<|>?` chars are converted to
+ windows-compatible values while being unpacked.
+- `uid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified user id, regardless of the `uid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `gid` option.
+- `gid` Set to a number to force ownership of all extracted files and
+ folders, and all implicitly created directories, to be owned by the
+ specified group id, regardless of the `gid` field in the archive.
+ Cannot be used along with `preserveOwner`. Requires also setting a
+ `uid` option.
+- `noMtime` Set to true to omit writing `mtime` value for extracted
+ entries.
+- `transform` Provide a function that takes an `entry` object, and
+ returns a stream, or any falsey value. If a stream is provided,
+ then that stream's data will be written instead of the contents of
+ the archive entry. If a falsey value is provided, then the entry is
+ written to disk as normal. (To exclude items from extraction, use
+ the `filter` option described above.)
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+
+### class tar.Unpack.Sync
+
+Synchronous version of `tar.Unpack`.
+
+Note that using an asynchronous stream type with the `transform`
+option will cause undefined behavior in sync unpack streams.
+[MiniPass](http://npm.im/minipass)-based streams are designed for this
+use case.
+
+### class tar.Parse
+
+A writable stream that parses a tar archive stream. All the standard
+writable stream stuff is supported.
+
+If the archive is gzipped, then tar will detect this and unzip it.
+
+Emits `'entry'` events with `tar.ReadEntry` objects, which are
+themselves readable streams that you can pipe wherever.
+
+Each `entry` will not emit until the one before it is flushed through,
+so make sure to either consume the data (with `on('data', ...)` or
+`.pipe(...)`) or throw it away with `.resume()` to keep the stream
+flowing.
+
+#### constructor(options)
+
+Returns an event emitter that emits `entry` events with
+`tar.ReadEntry` objects.
+
+The following options are supported:
+
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `filter` A function that gets called with `(path, entry)` for each
+ entry being listed. Return `true` to emit the entry from the
+ archive, or `false` to skip it.
+- `onentry` A function that gets called with `(entry)` for each entry
+ that passes the filter.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+
+#### abort(message, error)
+
+Stop all parsing activities. This is called when there are zlib
+errors. It also emits a warning with the message and error provided.
+
+### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being read out of a tar archive.
+
+It has the following fields:
+
+- `extended` The extended metadata object provided to the constructor.
+- `globalExtended` The global extended metadata object provided to the
+ constructor.
+- `remain` The number of bytes remaining to be written into the
+ stream.
+- `blockRemain` The number of 512-byte blocks remaining to be written
+ into the stream.
+- `ignore` Whether this entry should be ignored.
+- `meta` True if this represents metadata about the next entry, false
+ if it represents a filesystem object.
+- All the fields from the header, extended header, and global extended
+ header are added to the ReadEntry object. So it has `path`, `type`,
+ `size, `mode`, and so on.
+
+#### constructor(header, extended, globalExtended)
+
+Create a new ReadEntry object with the specified header, extended
+header, and global extended header values.
+
+### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass)
+
+A representation of an entry that is being written from the file
+system into a tar archive.
+
+Emits data for the Header, and for the Pax Extended Header if one is
+required, as well as any body data.
+
+Creating a WriteEntry for a directory does not also create
+WriteEntry objects for all of the directory contents.
+
+It has the following fields:
+
+- `path` The path field that will be written to the archive. By
+ default, this is also the path from the cwd to the file system
+ object.
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `myuid` If supported, the uid of the user running the current
+ process.
+- `myuser` The `env.USER` string if set, or `''`. Set as the entry
+ `uname` field if the file's `uid` matches `this.myuid`.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/` and filenames containing the windows-compatible
+ forms of `<|>?:` characters are converted to actual `<|>?:` characters
+ in the archive.
+- `noPax` Suppress pax extended headers. Note that this means that
+ long paths and linkpaths will be truncated, and large or negative
+ numeric values may be interpreted incorrectly.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+
+#### constructor(path, options)
+
+`path` is the path of the entry as it is written in the archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `maxReadSize` The maximum buffer size for `fs.read()` operations.
+ Defaults to 1 MB.
+- `linkCache` A Map object containing the device and inode value for
+ any file whose nlink is > 1, to identify hard links.
+- `statCache` A Map object that caches calls `lstat`.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `cwd` The current working directory for creating the archive.
+ Defaults to `process.cwd()`.
+- `absolute` The absolute path to the entry on the filesystem. By
+ default, this is `path.resolve(this.cwd, this.path)`, but it can be
+ overridden explicitly.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `win32` True if on a windows platform. Causes behavior where paths
+ replace `\` with `/`.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+- `umask` Set to restrict the modes on the entries in the archive,
+ somewhat like how umask works on file creation. Defaults to
+ `process.umask()` on unix systems, or `0o22` on Windows.
+
+#### warn(message, data)
+
+If strict, emit an error with the provided message.
+
+Othewise, emit a `'warn'` event with the provided message and data.
+
+### class tar.WriteEntry.Sync
+
+Synchronous version of tar.WriteEntry
+
+### class tar.WriteEntry.Tar
+
+A version of tar.WriteEntry that gets its data from a tar.ReadEntry
+instead of from the filesystem.
+
+#### constructor(readEntry, options)
+
+`readEntry` is the entry being read out of another archive.
+
+The following options are supported:
+
+- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
+ `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
+ that `mtime` is still included, because this is necessary other
+ time-based operations.
+- `preservePaths` Allow absolute paths. By default, `/` is stripped
+ from absolute paths.
+- `strict` Treat warnings as crash-worthy errors. Default false.
+- `onwarn` A function that will get called with `(message, data)` for
+ any warnings encountered.
+- `noMtime` Set to true to omit writing `mtime` values for entries.
+ Note that this prevents using other mtime-based features like
+ `tar.update` or the `keepNewer` option with the resulting tar archive.
+
+### class tar.Header
+
+A class for reading and writing header blocks.
+
+It has the following fields:
+
+- `nullBlock` True if decoding a block which is entirely composed of
+ `0x00` null bytes. (Useful because tar files are terminated by
+ at least 2 null blocks.)
+- `cksumValid` True if the checksum in the header is valid, false
+ otherwise.
+- `needPax` True if the values, as encoded, will require a Pax
+ extended header.
+- `path` The path of the entry.
+- `mode` The 4 lowest-order octal digits of the file mode. That is,
+ read/write/execute permissions for world, group, and owner, and the
+ setuid, setgid, and sticky bits.
+- `uid` Numeric user id of the file owner
+- `gid` Numeric group id of the file owner
+- `size` Size of the file in bytes
+- `mtime` Modified time of the file
+- `cksum` The checksum of the header. This is generated by adding all
+ the bytes of the header block, treating the checksum field itself as
+ all ascii space characters (that is, `0x20`).
+- `type` The human-readable name of the type of entry this represents,
+ or the alphanumeric key if unknown.
+- `typeKey` The alphanumeric key for the type of entry this header
+ represents.
+- `linkpath` The target of Link and SymbolicLink entries.
+- `uname` Human-readable user name of the file owner
+- `gname` Human-readable group name of the file owner
+- `devmaj` The major portion of the device number. Always `0` for
+ files, directories, and links.
+- `devmin` The minor portion of the device number. Always `0` for
+ files, directories, and links.
+- `atime` File access time.
+- `ctime` File change time.
+
+#### constructor(data, [offset=0])
+
+`data` is optional. It is either a Buffer that should be interpreted
+as a tar Header starting at the specified offset and continuing for
+512 bytes, or a data object of keys and values to set on the header
+object, and eventually encode as a tar Header.
+
+#### decode(block, offset)
+
+Decode the provided buffer starting at the specified offset.
+
+Buffer length must be greater than 512 bytes.
+
+#### set(data)
+
+Set the fields in the data object.
+
+#### encode(buffer, offset)
+
+Encode the header fields into the buffer at the specified offset.
+
+Returns `this.needPax` to indicate whether a Pax Extended Header is
+required to properly encode the specified data.
+
+### class tar.Pax
+
+An object representing a set of key-value pairs in an Pax extended
+header entry.
+
+It has the following fields. Where the same name is used, they have
+the same semantics as the tar.Header field of the same name.
+
+- `global` True if this represents a global extended header, or false
+ if it is for a single entry.
+- `atime`
+- `charset`
+- `comment`
+- `ctime`
+- `gid`
+- `gname`
+- `linkpath`
+- `mtime`
+- `path`
+- `size`
+- `uid`
+- `uname`
+- `dev`
+- `ino`
+- `nlink`
+
+#### constructor(object, global)
+
+Set the fields set in the object. `global` is a boolean that defaults
+to false.
+
+#### encode()
+
+Return a Buffer containing the header and body for the Pax extended
+header entry, or `null` if there is nothing to encode.
+
+#### encodeBody()
+
+Return a string representing the body of the pax extended header
+entry.
+
+#### encodeField(fieldName)
+
+Return a string representing the key/value encoding for the specified
+fieldName, or `''` if the field is unset.
+
+### tar.Pax.parse(string, extended, global)
+
+Return a new Pax object created by parsing the contents of the string
+provided.
+
+If the `extended` object is set, then also add the fields from that
+object. (This is necessary because multiple metadata entries can
+occur in sequence.)
+
+### tar.types
+
+A translation table for the `type` field in tar headers.
+
+#### tar.types.name.get(code)
+
+Get the human-readable name for a given alphanumeric code.
+
+#### tar.types.code.get(name)
+
+Get the alphanumeric code for a given human-readable name.
diff --git a/node_modules/pacote/node_modules/tar/index.js b/node_modules/pacote/node_modules/tar/index.js
new file mode 100644
index 000000000..c9ae06e79
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/index.js
@@ -0,0 +1,18 @@
+'use strict'
+
+// high-level commands
+exports.c = exports.create = require('./lib/create.js')
+exports.r = exports.replace = require('./lib/replace.js')
+exports.t = exports.list = require('./lib/list.js')
+exports.u = exports.update = require('./lib/update.js')
+exports.x = exports.extract = require('./lib/extract.js')
+
+// classes
+exports.Pack = require('./lib/pack.js')
+exports.Unpack = require('./lib/unpack.js')
+exports.Parse = require('./lib/parse.js')
+exports.ReadEntry = require('./lib/read-entry.js')
+exports.WriteEntry = require('./lib/write-entry.js')
+exports.Header = require('./lib/header.js')
+exports.Pax = require('./lib/pax.js')
+exports.types = require('./lib/types.js')
diff --git a/node_modules/pacote/node_modules/tar/lib/buffer.js b/node_modules/pacote/node_modules/tar/lib/buffer.js
new file mode 100644
index 000000000..7876d5b3e
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/buffer.js
@@ -0,0 +1,11 @@
+'use strict'
+
+// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
+// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
+// .M, this is fine .\^/M..
+let B = Buffer
+/* istanbul ignore next */
+if (!B.alloc) {
+ B = require('safe-buffer').Buffer
+}
+module.exports = B
diff --git a/node_modules/pacote/node_modules/tar/lib/create.js b/node_modules/pacote/node_modules/tar/lib/create.js
new file mode 100644
index 000000000..a37aa52e6
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/create.js
@@ -0,0 +1,105 @@
+'use strict'
+
+// tar -c
+const hlo = require('./high-level-opt.js')
+
+const Pack = require('./pack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+const c = module.exports = (opt_, files, cb) => {
+ if (typeof files === 'function')
+ cb = files
+
+ if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ return opt.file && opt.sync ? createFileSync(opt, files)
+ : opt.file ? createFile(opt, files, cb)
+ : opt.sync ? createSync(opt, files)
+ : create(opt, files)
+}
+
+const createFileSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const createFile = (opt, files, cb) => {
+ const p = new Pack(opt)
+ const stream = new fsm.WriteStream(opt.file, {
+ mode: opt.mode || 0o666
+ })
+ p.pipe(stream)
+
+ const promise = new Promise((res, rej) => {
+ stream.on('error', rej)
+ stream.on('close', res)
+ p.on('error', rej)
+ })
+
+ addFilesAsync(p, files)
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
+
+const createSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+ addFilesSync(p, files)
+ return p
+}
+
+const create = (opt, files) => {
+ const p = new Pack(opt)
+ addFilesAsync(p, files)
+ return p
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/extract.js b/node_modules/pacote/node_modules/tar/lib/extract.js
new file mode 100644
index 000000000..cbb458a0a
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/extract.js
@@ -0,0 +1,112 @@
+'use strict'
+
+// tar -x
+const hlo = require('./high-level-opt.js')
+const Unpack = require('./unpack.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const x = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ return opt.file && opt.sync ? extractFileSync(opt)
+ : opt.file ? extractFile(opt, cb)
+ : opt.sync ? extractSync(opt)
+ : extract(opt)
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const extractFileSync = opt => {
+ const u = new Unpack.Sync(opt)
+
+ const file = opt.file
+ let threw = true
+ let fd
+ const stat = fs.statSync(file)
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ const readSize = opt.maxReadSize || 16*1024*1024
+ const stream = new fsm.ReadStreamSync(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.pipe(u)
+}
+
+const extractFile = (opt, cb) => {
+ const u = new Unpack(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ u.on('error', reject)
+ u.on('close', resolve)
+
+ // This trades a zero-byte read() syscall for a stat
+ // However, it will usually result in less memory allocation
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(u)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const extractSync = opt => {
+ return new Unpack.Sync(opt)
+}
+
+const extract = opt => {
+ return new Unpack(opt)
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/header.js b/node_modules/pacote/node_modules/tar/lib/header.js
new file mode 100644
index 000000000..d29c3b990
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/header.js
@@ -0,0 +1,289 @@
+'use strict'
+// parse a 512-byte header block to a data object, or vice-versa
+// encode returns `true` if a pax extended header is needed, because
+// the data could not be faithfully encoded in a simple header.
+// (Also, check header.needPax to see if it needs a pax header.)
+
+const Buffer = require('./buffer.js')
+const types = require('./types.js')
+const pathModule = require('path').posix
+const large = require('./large-numbers.js')
+
+const SLURP = Symbol('slurp')
+const TYPE = Symbol('type')
+
+class Header {
+ constructor (data, off, ex, gex) {
+ this.cksumValid = false
+ this.needPax = false
+ this.nullBlock = false
+
+ this.block = null
+ this.path = null
+ this.mode = null
+ this.uid = null
+ this.gid = null
+ this.size = null
+ this.mtime = null
+ this.cksum = null
+ this[TYPE] = '0'
+ this.linkpath = null
+ this.uname = null
+ this.gname = null
+ this.devmaj = 0
+ this.devmin = 0
+ this.atime = null
+ this.ctime = null
+
+ if (Buffer.isBuffer(data))
+ this.decode(data, off || 0, ex, gex)
+ else if (data)
+ this.set(data)
+ }
+
+ decode (buf, off, ex, gex) {
+ if (!off)
+ off = 0
+
+ if (!buf || !(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ this.path = decString(buf, off, 100)
+ this.mode = decNumber(buf, off + 100, 8)
+ this.uid = decNumber(buf, off + 108, 8)
+ this.gid = decNumber(buf, off + 116, 8)
+ this.size = decNumber(buf, off + 124, 12)
+ this.mtime = decDate(buf, off + 136, 12)
+ this.cksum = decNumber(buf, off + 148, 12)
+
+ // if we have extended or global extended headers, apply them now
+ // See https://github.com/npm/node-tar/pull/187
+ this[SLURP](ex)
+ this[SLURP](gex, true)
+
+ // old tar versions marked dirs as a file with a trailing /
+ this[TYPE] = decString(buf, off + 156, 1)
+ if (this[TYPE] === '')
+ this[TYPE] = '0'
+ if (this[TYPE] === '0' && this.path.substr(-1) === '/')
+ this[TYPE] = '5'
+
+ // tar implementations sometimes incorrectly put the stat(dir).size
+ // as the size in the tarball, even though Directory entries are
+ // not able to have any body at all. In the very rare chance that
+ // it actually DOES have a body, we weren't going to do anything with
+ // it anyway, and it'll just be a warning about an invalid header.
+ if (this[TYPE] === '5')
+ this.size = 0
+
+ this.linkpath = decString(buf, off + 157, 100)
+ if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
+ this.uname = decString(buf, off + 265, 32)
+ this.gname = decString(buf, off + 297, 32)
+ this.devmaj = decNumber(buf, off + 329, 8)
+ this.devmin = decNumber(buf, off + 337, 8)
+ if (buf[off + 475] !== 0) {
+ // definitely a prefix, definitely >130 chars.
+ const prefix = decString(buf, off + 345, 155)
+ this.path = prefix + '/' + this.path
+ } else {
+ const prefix = decString(buf, off + 345, 130)
+ if (prefix)
+ this.path = prefix + '/' + this.path
+ this.atime = decDate(buf, off + 476, 12)
+ this.ctime = decDate(buf, off + 488, 12)
+ }
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksumValid = sum === this.cksum
+ if (this.cksum === null && sum === 8 * 0x20)
+ this.nullBlock = true
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+
+ encode (buf, off) {
+ if (!buf) {
+ buf = this.block = Buffer.alloc(512)
+ off = 0
+ }
+
+ if (!off)
+ off = 0
+
+ if (!(buf.length >= off + 512))
+ throw new Error('need 512 bytes for header')
+
+ const prefixSize = this.ctime || this.atime ? 130 : 155
+ const split = splitPrefix(this.path || '', prefixSize)
+ const path = split[0]
+ const prefix = split[1]
+ this.needPax = split[2]
+
+ this.needPax = encString(buf, off, 100, path) || this.needPax
+ this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
+ this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
+ this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
+ this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
+ this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
+ buf[off + 156] = this[TYPE].charCodeAt(0)
+ this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
+ buf.write('ustar\u000000', off + 257, 8)
+ this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
+ this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
+ this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
+ this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
+ this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
+ if (buf[off + 475] !== 0)
+ this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
+ else {
+ this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
+ this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
+ this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
+ }
+
+ let sum = 8 * 0x20
+ for (let i = off; i < off + 148; i++) {
+ sum += buf[i]
+ }
+ for (let i = off + 156; i < off + 512; i++) {
+ sum += buf[i]
+ }
+ this.cksum = sum
+ encNumber(buf, off + 148, 8, this.cksum)
+ this.cksumValid = true
+
+ return this.needPax
+ }
+
+ set (data) {
+ for (let i in data) {
+ if (data[i] !== null && data[i] !== undefined)
+ this[i] = data[i]
+ }
+ }
+
+ get type () {
+ return types.name.get(this[TYPE]) || this[TYPE]
+ }
+
+ get typeKey () {
+ return this[TYPE]
+ }
+
+ set type (type) {
+ if (types.code.has(type))
+ this[TYPE] = types.code.get(type)
+ else
+ this[TYPE] = type
+ }
+}
+
+const splitPrefix = (p, prefixSize) => {
+ const pathSize = 100
+ let pp = p
+ let prefix = ''
+ let ret
+ const root = pathModule.parse(p).root || '.'
+
+ if (Buffer.byteLength(pp) < pathSize)
+ ret = [pp, prefix, false]
+ else {
+ // first set prefix to the dir, and path to the base
+ prefix = pathModule.dirname(pp)
+ pp = pathModule.basename(pp)
+
+ do {
+ // both fit!
+ if (Buffer.byteLength(pp) <= pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp, prefix, false]
+
+ // prefix fits in prefix, but path doesn't fit in path
+ else if (Buffer.byteLength(pp) > pathSize &&
+ Buffer.byteLength(prefix) <= prefixSize)
+ ret = [pp.substr(0, pathSize - 1), prefix, true]
+
+ else {
+ // make path take a bit from prefix
+ pp = pathModule.join(pathModule.basename(prefix), pp)
+ prefix = pathModule.dirname(prefix)
+ }
+ } while (prefix !== root && !ret)
+
+ // at this point, found no resolution, just truncate
+ if (!ret)
+ ret = [p.substr(0, pathSize - 1), '', true]
+ }
+ return ret
+}
+
+const decString = (buf, off, size) =>
+ buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
+
+const decDate = (buf, off, size) =>
+ numToDate(decNumber(buf, off, size))
+
+const numToDate = num => num === null ? null : new Date(num * 1000)
+
+const decNumber = (buf, off, size) =>
+ buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
+ : decSmallNumber(buf, off, size)
+
+const nanNull = value => isNaN(value) ? null : value
+
+const decSmallNumber = (buf, off, size) =>
+ nanNull(parseInt(
+ buf.slice(off, off + size)
+ .toString('utf8').replace(/\0.*$/, '').trim(), 8))
+
+// the maximum encodable as a null-terminated octal, by field size
+const MAXNUM = {
+ 12: 0o77777777777,
+ 8 : 0o7777777
+}
+
+const encNumber = (buf, off, size, number) =>
+ number === null ? false :
+ number > MAXNUM[size] || number < 0
+ ? (large.encode(number, buf.slice(off, off + size)), true)
+ : (encSmallNumber(buf, off, size, number), false)
+
+const encSmallNumber = (buf, off, size, number) =>
+ buf.write(octalString(number, size), off, size, 'ascii')
+
+const octalString = (number, size) =>
+ padOctal(Math.floor(number).toString(8), size)
+
+const padOctal = (string, size) =>
+ (string.length === size - 1 ? string
+ : new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
+
+const encDate = (buf, off, size, date) =>
+ date === null ? false :
+ encNumber(buf, off, size, date.getTime() / 1000)
+
+// enough to fill the longest string we've got
+const NULLS = new Array(156).join('\0')
+// pad with nulls, return true if it's longer or non-ascii
+const encString = (buf, off, size, string) =>
+ string === null ? false :
+ (buf.write(string + NULLS, off, size, 'utf8'),
+ string.length !== Buffer.byteLength(string) || string.length > size)
+
+module.exports = Header
diff --git a/node_modules/pacote/node_modules/tar/lib/high-level-opt.js b/node_modules/pacote/node_modules/tar/lib/high-level-opt.js
new file mode 100644
index 000000000..7333db915
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/high-level-opt.js
@@ -0,0 +1,29 @@
+'use strict'
+
+// turn tar(1) style args like `C` into the more verbose things like `cwd`
+
+const argmap = new Map([
+ ['C', 'cwd'],
+ ['f', 'file'],
+ ['z', 'gzip'],
+ ['P', 'preservePaths'],
+ ['U', 'unlink'],
+ ['strip-components', 'strip'],
+ ['stripComponents', 'strip'],
+ ['keep-newer', 'newer'],
+ ['keepNewer', 'newer'],
+ ['keep-newer-files', 'newer'],
+ ['keepNewerFiles', 'newer'],
+ ['k', 'keep'],
+ ['keep-existing', 'keep'],
+ ['keepExisting', 'keep'],
+ ['m', 'noMtime'],
+ ['no-mtime', 'noMtime'],
+ ['p', 'preserveOwner'],
+ ['L', 'follow'],
+ ['h', 'follow']
+])
+
+const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [
+ argmap.has(k) ? argmap.get(k) : k, opt[k]
+]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
diff --git a/node_modules/pacote/node_modules/tar/lib/large-numbers.js b/node_modules/pacote/node_modules/tar/lib/large-numbers.js
new file mode 100644
index 000000000..3e5c99255
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/large-numbers.js
@@ -0,0 +1,97 @@
+'use strict'
+// Tar can encode large and negative numbers using a leading byte of
+// 0xff for negative, and 0x80 for positive.
+
+const encode = exports.encode = (num, buf) => {
+ if (!Number.isSafeInteger(num))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw TypeError('cannot encode number outside of javascript safe integer range')
+ else if (num < 0)
+ encodeNegative(num, buf)
+ else
+ encodePositive(num, buf)
+ return buf
+}
+
+const encodePositive = (num, buf) => {
+ buf[0] = 0x80
+
+ for (var i = buf.length; i > 1; i--) {
+ buf[i-1] = num & 0xff
+ num = Math.floor(num / 0x100)
+ }
+}
+
+const encodeNegative = (num, buf) => {
+ buf[0] = 0xff
+ var flipped = false
+ num = num * -1
+ for (var i = buf.length; i > 1; i--) {
+ var byte = num & 0xff
+ num = Math.floor(num / 0x100)
+ if (flipped)
+ buf[i-1] = onesComp(byte)
+ else if (byte === 0)
+ buf[i-1] = 0
+ else {
+ flipped = true
+ buf[i-1] = twosComp(byte)
+ }
+ }
+}
+
+const parse = exports.parse = (buf) => {
+ var post = buf[buf.length - 1]
+ var pre = buf[0]
+ var value;
+ if (pre === 0x80)
+ value = pos(buf.slice(1, buf.length))
+ else if (pre === 0xff)
+ value = twos(buf)
+ else
+ throw TypeError('invalid base256 encoding')
+
+ if (!Number.isSafeInteger(value))
+ // The number is so large that javascript cannot represent it with integer
+ // precision.
+ throw TypeError('parsed number outside of javascript safe integer range')
+
+ return value
+}
+
+const twos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ var flipped = false
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ var f
+ if (flipped)
+ f = onesComp(byte)
+ else if (byte === 0)
+ f = byte
+ else {
+ flipped = true
+ f = twosComp(byte)
+ }
+ if (f !== 0)
+ sum -= f * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const pos = (buf) => {
+ var len = buf.length
+ var sum = 0
+ for (var i = len - 1; i > -1; i--) {
+ var byte = buf[i]
+ if (byte !== 0)
+ sum += byte * Math.pow(256, len - i - 1)
+ }
+ return sum
+}
+
+const onesComp = byte => (0xff ^ byte) & 0xff
+
+const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
diff --git a/node_modules/pacote/node_modules/tar/lib/list.js b/node_modules/pacote/node_modules/tar/lib/list.js
new file mode 100644
index 000000000..250ebe001
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/list.js
@@ -0,0 +1,130 @@
+'use strict'
+
+const Buffer = require('./buffer.js')
+
+// XXX: This shares a lot in common with extract.js
+// maybe some DRY opportunity here?
+
+// tar -t
+const hlo = require('./high-level-opt.js')
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+
+const t = module.exports = (opt_, files, cb) => {
+ if (typeof opt_ === 'function')
+ cb = opt_, files = null, opt_ = {}
+ else if (Array.isArray(opt_))
+ files = opt_, opt_ = {}
+
+ if (typeof files === 'function')
+ cb = files, files = null
+
+ if (!files)
+ files = []
+ else
+ files = Array.from(files)
+
+ const opt = hlo(opt_)
+
+ if (opt.sync && typeof cb === 'function')
+ throw new TypeError('callback not supported for sync tar functions')
+
+ if (!opt.file && typeof cb === 'function')
+ throw new TypeError('callback only supported with file option')
+
+ if (files.length)
+ filesFilter(opt, files)
+
+ if (!opt.noResume)
+ onentryFunction(opt)
+
+ return opt.file && opt.sync ? listFileSync(opt)
+ : opt.file ? listFile(opt, cb)
+ : list(opt)
+}
+
+const onentryFunction = opt => {
+ const onentry = opt.onentry
+ opt.onentry = onentry ? e => {
+ onentry(e)
+ e.resume()
+ } : e => e.resume()
+}
+
+// construct a filter that limits the file entries listed
+// include child entries if a dir is included
+const filesFilter = (opt, files) => {
+ const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true]))
+ const filter = opt.filter
+
+ const mapHas = (file, r) => {
+ const root = r || path.parse(file).root || '.'
+ const ret = file === root ? false
+ : map.has(file) ? map.get(file)
+ : mapHas(path.dirname(file), root)
+
+ map.set(file, ret)
+ return ret
+ }
+
+ opt.filter = filter
+ ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, ''))
+ : file => mapHas(file.replace(/\/+$/, ''))
+}
+
+const listFileSync = opt => {
+ const p = list(opt)
+ const file = opt.file
+ let threw = true
+ let fd
+ try {
+ const stat = fs.statSync(file)
+ const readSize = opt.maxReadSize || 16*1024*1024
+ if (stat.size < readSize) {
+ p.end(fs.readFileSync(file))
+ } else {
+ let pos = 0
+ const buf = Buffer.allocUnsafe(readSize)
+ fd = fs.openSync(file, 'r')
+ while (pos < stat.size) {
+ let bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
+ pos += bytesRead
+ p.write(buf.slice(0, bytesRead))
+ }
+ p.end()
+ }
+ threw = false
+ } finally {
+ if (threw && fd)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const listFile = (opt, cb) => {
+ const parse = new Parser(opt)
+ const readSize = opt.maxReadSize || 16*1024*1024
+
+ const file = opt.file
+ const p = new Promise((resolve, reject) => {
+ parse.on('error', reject)
+ parse.on('end', resolve)
+
+ fs.stat(file, (er, stat) => {
+ if (er)
+ reject(er)
+ else {
+ const stream = new fsm.ReadStream(file, {
+ readSize: readSize,
+ size: stat.size
+ })
+ stream.on('error', reject)
+ stream.pipe(parse)
+ }
+ })
+ })
+ return cb ? p.then(cb, cb) : p
+}
+
+const list = opt => new Parser(opt)
diff --git a/node_modules/pacote/node_modules/tar/lib/mkdir.js b/node_modules/pacote/node_modules/tar/lib/mkdir.js
new file mode 100644
index 000000000..c6a154c24
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/mkdir.js
@@ -0,0 +1,206 @@
+'use strict'
+// wrapper around mkdirp for tar's needs.
+
+// TODO: This should probably be a class, not functionally
+// passing around state in a gazillion args.
+
+const mkdirp = require('mkdirp')
+const fs = require('fs')
+const path = require('path')
+const chownr = require('chownr')
+
+class SymlinkError extends Error {
+ constructor (symlink, path) {
+ super('Cannot extract through symbolic link')
+ this.path = path
+ this.symlink = symlink
+ }
+
+ get name () {
+ return 'SylinkError'
+ }
+}
+
+class CwdError extends Error {
+ constructor (path, code) {
+ super(code + ': Cannot cd into \'' + path + '\'')
+ this.path = path
+ this.code = code
+ }
+
+ get name () {
+ return 'CwdError'
+ }
+}
+
+const mkdir = module.exports = (dir, opt, cb) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (er, created) => {
+ if (er)
+ cb(er)
+ else {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr(created, uid, gid, er => done(er))
+ else if (needChmod)
+ fs.chmod(dir, mode, cb)
+ else
+ cb()
+ }
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd)
+ return fs.stat(dir, (er, st) => {
+ if (er || !st.isDirectory())
+ er = new CwdError(dir, er && er.code || 'ENOTDIR')
+ done(er)
+ })
+
+ if (preserve)
+ return mkdirp(dir, mode, done)
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
+}
+
+const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
+ if (!parts.length)
+ return cb(null, created)
+ const p = parts.shift()
+ const part = base + '/' + p
+ if (cache.get(part))
+ return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+}
+
+const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
+ if (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return cb(new CwdError(cwd, er.code))
+
+ fs.lstat(part, (statEr, st) => {
+ if (statEr)
+ cb(statEr)
+ else if (st.isDirectory())
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ else if (unlink)
+ fs.unlink(part, er => {
+ if (er)
+ return cb(er)
+ fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
+ })
+ else if (st.isSymbolicLink())
+ return cb(new SymlinkError(part, part + '/' + parts.join('/')))
+ else
+ cb(er)
+ })
+ } else {
+ created = created || part
+ mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
+ }
+}
+
+const mkdirSync = module.exports.sync = (dir, opt) => {
+ // if there's any overlap between mask and mode,
+ // then we'll need an explicit chmod
+ const umask = opt.umask
+ const mode = opt.mode | 0o0700
+ const needChmod = (mode & umask) !== 0
+
+ const uid = opt.uid
+ const gid = opt.gid
+ const doChown = typeof uid === 'number' &&
+ typeof gid === 'number' &&
+ ( uid !== opt.processUid || gid !== opt.processGid )
+
+ const preserve = opt.preserve
+ const unlink = opt.unlink
+ const cache = opt.cache
+ const cwd = opt.cwd
+
+ const done = (created) => {
+ cache.set(dir, true)
+ if (created && doChown)
+ chownr.sync(created, uid, gid)
+ if (needChmod)
+ fs.chmodSync(dir, mode)
+ }
+
+ if (cache && cache.get(dir) === true)
+ return done()
+
+ if (dir === cwd) {
+ let ok = false
+ let code = 'ENOTDIR'
+ try {
+ ok = fs.statSync(dir).isDirectory()
+ } catch (er) {
+ code = er.code
+ } finally {
+ if (!ok)
+ throw new CwdError(dir, code)
+ }
+ done()
+ return
+ }
+
+ if (preserve)
+ return done(mkdirp.sync(dir, mode))
+
+ const sub = path.relative(cwd, dir)
+ const parts = sub.split(/\/|\\/)
+ let created = null
+ for (let p = parts.shift(), part = cwd;
+ p && (part += '/' + p);
+ p = parts.shift()) {
+
+ if (cache.get(part))
+ continue
+
+ try {
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ } catch (er) {
+ if (er.path && path.dirname(er.path) === cwd &&
+ (er.code === 'ENOTDIR' || er.code === 'ENOENT'))
+ return new CwdError(cwd, er.code)
+
+ const st = fs.lstatSync(part)
+ if (st.isDirectory()) {
+ cache.set(part, true)
+ continue
+ } else if (unlink) {
+ fs.unlinkSync(part)
+ fs.mkdirSync(part, mode)
+ created = created || part
+ cache.set(part, true)
+ continue
+ } else if (st.isSymbolicLink())
+ return new SymlinkError(part, part + '/' + parts.join('/'))
+ }
+ }
+
+ return done(created)
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/mode-fix.js b/node_modules/pacote/node_modules/tar/lib/mode-fix.js
new file mode 100644
index 000000000..3363a3b15
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/mode-fix.js
@@ -0,0 +1,14 @@
+'use strict'
+module.exports = (mode, isDir) => {
+ mode &= 0o7777
+ // if dirs are readable, then they should be listable
+ if (isDir) {
+ if (mode & 0o400)
+ mode |= 0o100
+ if (mode & 0o40)
+ mode |= 0o10
+ if (mode & 0o4)
+ mode |= 0o1
+ }
+ return mode
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/pack.js b/node_modules/pacote/node_modules/tar/lib/pack.js
new file mode 100644
index 000000000..857cea910
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/pack.js
@@ -0,0 +1,404 @@
+'use strict'
+
+const Buffer = require('./buffer.js')
+
+// A readable tar stream creator
+// Technically, this is a transform stream that you write paths into,
+// and tar format comes out of.
+// The `add()` method is like `write()` but returns this,
+// and end() return `this` as well, so you can
+// do `new Pack(opt).add('files').add('dir').end().pipe(output)
+// You could also do something like:
+// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
+
+class PackJob {
+ constructor (path, absolute) {
+ this.path = path || './'
+ this.absolute = absolute
+ this.entry = null
+ this.stat = null
+ this.readdir = null
+ this.pending = false
+ this.ignore = false
+ this.piped = false
+ }
+}
+
+const MiniPass = require('minipass')
+const zlib = require('minizlib')
+const ReadEntry = require('./read-entry.js')
+const WriteEntry = require('./write-entry.js')
+const WriteEntrySync = WriteEntry.Sync
+const WriteEntryTar = WriteEntry.Tar
+const Yallist = require('yallist')
+const EOF = Buffer.alloc(1024)
+const ONSTAT = Symbol('onStat')
+const ENDED = Symbol('ended')
+const QUEUE = Symbol('queue')
+const CURRENT = Symbol('current')
+const PROCESS = Symbol('process')
+const PROCESSING = Symbol('processing')
+const PROCESSJOB = Symbol('processJob')
+const JOBS = Symbol('jobs')
+const JOBDONE = Symbol('jobDone')
+const ADDFSENTRY = Symbol('addFSEntry')
+const ADDTARENTRY = Symbol('addTarEntry')
+const STAT = Symbol('stat')
+const READDIR = Symbol('readdir')
+const ONREADDIR = Symbol('onreaddir')
+const PIPE = Symbol('pipe')
+const ENTRY = Symbol('entry')
+const ENTRYOPT = Symbol('entryOpt')
+const WRITEENTRYCLASS = Symbol('writeEntryClass')
+const WRITE = Symbol('write')
+const ONDRAIN = Symbol('ondrain')
+
+const fs = require('fs')
+const path = require('path')
+const warner = require('./warn-mixin.js')
+
+const Pack = warner(class Pack extends MiniPass {
+ constructor (opt) {
+ super(opt)
+ opt = opt || Object.create(null)
+ this.opt = opt
+ this.cwd = opt.cwd || process.cwd()
+ this.maxReadSize = opt.maxReadSize
+ this.preservePaths = !!opt.preservePaths
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '')
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.readdirCache = opt.readdirCache || new Map()
+
+ this[WRITEENTRYCLASS] = WriteEntry
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ this.zip = null
+ if (opt.gzip) {
+ if (typeof opt.gzip !== 'object')
+ opt.gzip = {}
+ this.zip = new zlib.Gzip(opt.gzip)
+ this.zip.on('data', chunk => super.write(chunk))
+ this.zip.on('end', _ => super.end())
+ this.zip.on('drain', _ => this[ONDRAIN]())
+ this.on('resume', _ => this.zip.resume())
+ } else
+ this.on('drain', this[ONDRAIN])
+
+ this.portable = !!opt.portable
+ this.noDirRecurse = !!opt.noDirRecurse
+ this.follow = !!opt.follow
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
+
+ this[QUEUE] = new Yallist
+ this[JOBS] = 0
+ this.jobs = +opt.jobs || 4
+ this[PROCESSING] = false
+ this[ENDED] = false
+ }
+
+ [WRITE] (chunk) {
+ return super.write(chunk)
+ }
+
+ add (path) {
+ this.write(path)
+ return this
+ }
+
+ end (path) {
+ if (path)
+ this.write(path)
+ this[ENDED] = true
+ this[PROCESS]()
+ return this
+ }
+
+ write (path) {
+ if (this[ENDED])
+ throw new Error('write after end')
+
+ if (path instanceof ReadEntry)
+ this[ADDTARENTRY](path)
+ else
+ this[ADDFSENTRY](path)
+ return this.flowing
+ }
+
+ [ADDTARENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p.path)
+ if (this.prefix)
+ p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '')
+
+ // in this case, we don't have to wait for the stat
+ if (!this.filter(p.path, p))
+ p.resume()
+ else {
+ const job = new PackJob(p.path, absolute, false)
+ job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
+ job.entry.on('end', _ => this[JOBDONE](job))
+ this[JOBS] += 1
+ this[QUEUE].push(job)
+ }
+
+ this[PROCESS]()
+ }
+
+ [ADDFSENTRY] (p) {
+ const absolute = path.resolve(this.cwd, p)
+ if (this.prefix)
+ p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '')
+
+ this[QUEUE].push(new PackJob(p, absolute))
+ this[PROCESS]()
+ }
+
+ [STAT] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ const stat = this.follow ? 'stat' : 'lstat'
+ fs[stat](job.absolute, (er, stat) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ this.emit('error', er)
+ else
+ this[ONSTAT](job, stat)
+ })
+ }
+
+ [ONSTAT] (job, stat) {
+ this.statCache.set(job.absolute, stat)
+ job.stat = stat
+
+ // now we have the stat, we can filter it.
+ if (!this.filter(job.path, stat))
+ job.ignore = true
+
+ this[PROCESS]()
+ }
+
+ [READDIR] (job) {
+ job.pending = true
+ this[JOBS] += 1
+ fs.readdir(job.absolute, (er, entries) => {
+ job.pending = false
+ this[JOBS] -= 1
+ if (er)
+ return this.emit('error', er)
+ this[ONREADDIR](job, entries)
+ })
+ }
+
+ [ONREADDIR] (job, entries) {
+ this.readdirCache.set(job.absolute, entries)
+ job.readdir = entries
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ if (this[PROCESSING])
+ return
+
+ this[PROCESSING] = true
+ for (let w = this[QUEUE].head;
+ w !== null && this[JOBS] < this.jobs;
+ w = w.next) {
+ this[PROCESSJOB](w.value)
+ if (w.value.ignore) {
+ const p = w.next
+ this[QUEUE].removeNode(w)
+ w.next = p
+ }
+ }
+
+ this[PROCESSING] = false
+
+ if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
+ if (this.zip)
+ this.zip.end(EOF)
+ else {
+ super.write(EOF)
+ super.end()
+ }
+ }
+ }
+
+ get [CURRENT] () {
+ return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
+ }
+
+ [JOBDONE] (job) {
+ this[QUEUE].shift()
+ this[JOBS] -= 1
+ this[PROCESS]()
+ }
+
+ [PROCESSJOB] (job) {
+ if (job.pending)
+ return
+
+ if (job.entry) {
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ return
+ }
+
+ if (!job.stat) {
+ if (this.statCache.has(job.absolute))
+ this[ONSTAT](job, this.statCache.get(job.absolute))
+ else
+ this[STAT](job)
+ }
+ if (!job.stat)
+ return
+
+ // filtered out!
+ if (job.ignore)
+ return
+
+ if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
+ if (this.readdirCache.has(job.absolute))
+ this[ONREADDIR](job, this.readdirCache.get(job.absolute))
+ else
+ this[READDIR](job)
+ if (!job.readdir)
+ return
+ }
+
+ // we know it doesn't have an entry, because that got checked above
+ job.entry = this[ENTRY](job)
+ if (!job.entry) {
+ job.ignore = true
+ return
+ }
+
+ if (job === this[CURRENT] && !job.piped)
+ this[PIPE](job)
+ }
+
+ [ENTRYOPT] (job) {
+ return {
+ onwarn: (msg, data) => {
+ this.warn(msg, data)
+ },
+ noPax: this.noPax,
+ cwd: this.cwd,
+ absolute: job.absolute,
+ preservePaths: this.preservePaths,
+ maxReadSize: this.maxReadSize,
+ strict: this.strict,
+ portable: this.portable,
+ linkCache: this.linkCache,
+ statCache: this.statCache,
+ noMtime: this.noMtime,
+ mtime: this.mtime
+ }
+ }
+
+ [ENTRY] (job) {
+ this[JOBS] += 1
+ try {
+ return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
+ .on('end', () => this[JOBDONE](job))
+ .on('error', er => this.emit('error', er))
+ } catch (er) {
+ this.emit('error', er)
+ }
+ }
+
+ [ONDRAIN] () {
+ if (this[CURRENT] && this[CURRENT].entry)
+ this[CURRENT].entry.resume()
+ }
+
+ // like .pipe() but using super, because our write() is special
+ [PIPE] (job) {
+ job.piped = true
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ const source = job.entry
+ const zip = this.zip
+
+ if (zip)
+ source.on('data', chunk => {
+ if (!zip.write(chunk))
+ source.pause()
+ })
+ else
+ source.on('data', chunk => {
+ if (!super.write(chunk))
+ source.pause()
+ })
+ }
+
+ pause () {
+ if (this.zip)
+ this.zip.pause()
+ return super.pause()
+ }
+})
+
+class PackSync extends Pack {
+ constructor (opt) {
+ super(opt)
+ this[WRITEENTRYCLASS] = WriteEntrySync
+ }
+
+ // pause/resume are no-ops in sync streams.
+ pause () {}
+ resume () {}
+
+ [STAT] (job) {
+ const stat = this.follow ? 'statSync' : 'lstatSync'
+ this[ONSTAT](job, fs[stat](job.absolute))
+ }
+
+ [READDIR] (job, stat) {
+ this[ONREADDIR](job, fs.readdirSync(job.absolute))
+ }
+
+ // gotta get it all in this tick
+ [PIPE] (job) {
+ const source = job.entry
+ const zip = this.zip
+
+ if (job.readdir)
+ job.readdir.forEach(entry => {
+ const p = this.prefix ?
+ job.path.slice(this.prefix.length + 1) || './'
+ : job.path
+
+ const base = p === './' ? '' : p.replace(/\/*$/, '/')
+ this[ADDFSENTRY](base + entry)
+ })
+
+ if (zip)
+ source.on('data', chunk => {
+ zip.write(chunk)
+ })
+ else
+ source.on('data', chunk => {
+ super[WRITE](chunk)
+ })
+ }
+}
+
+Pack.Sync = PackSync
+
+module.exports = Pack
diff --git a/node_modules/pacote/node_modules/tar/lib/parse.js b/node_modules/pacote/node_modules/tar/lib/parse.js
new file mode 100644
index 000000000..43d4383dd
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/parse.js
@@ -0,0 +1,428 @@
+'use strict'
+
+// this[BUFFER] is the remainder of a chunk if we're waiting for
+// the full 512 bytes of a header to come in. We will Buffer.concat()
+// it to the next write(), which is a mem copy, but a small one.
+//
+// this[QUEUE] is a Yallist of entries that haven't been emitted
+// yet this can only get filled up if the user keeps write()ing after
+// a write() returns false, or does a write() with more than one entry
+//
+// We don't buffer chunks, we always parse them and either create an
+// entry, or push it into the active entry. The ReadEntry class knows
+// to throw data away if .ignore=true
+//
+// Shift entry off the buffer when it emits 'end', and emit 'entry' for
+// the next one in the list.
+//
+// At any time, we're pushing body chunks into the entry at WRITEENTRY,
+// and waiting for 'end' on the entry at READENTRY
+//
+// ignored entries get .resume() called on them straight away
+
+const warner = require('./warn-mixin.js')
+const path = require('path')
+const Header = require('./header.js')
+const EE = require('events')
+const Yallist = require('yallist')
+const maxMetaEntrySize = 1024 * 1024
+const Entry = require('./read-entry.js')
+const Pax = require('./pax.js')
+const zlib = require('minizlib')
+const Buffer = require('./buffer.js')
+
+const gzipHeader = Buffer.from([0x1f, 0x8b])
+const STATE = Symbol('state')
+const WRITEENTRY = Symbol('writeEntry')
+const READENTRY = Symbol('readEntry')
+const NEXTENTRY = Symbol('nextEntry')
+const PROCESSENTRY = Symbol('processEntry')
+const EX = Symbol('extendedHeader')
+const GEX = Symbol('globalExtendedHeader')
+const META = Symbol('meta')
+const EMITMETA = Symbol('emitMeta')
+const BUFFER = Symbol('buffer')
+const QUEUE = Symbol('queue')
+const ENDED = Symbol('ended')
+const EMITTEDEND = Symbol('emittedEnd')
+const EMIT = Symbol('emit')
+const UNZIP = Symbol('unzip')
+const CONSUMECHUNK = Symbol('consumeChunk')
+const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
+const CONSUMEBODY = Symbol('consumeBody')
+const CONSUMEMETA = Symbol('consumeMeta')
+const CONSUMEHEADER = Symbol('consumeHeader')
+const CONSUMING = Symbol('consuming')
+const BUFFERCONCAT = Symbol('bufferConcat')
+const MAYBEEND = Symbol('maybeEnd')
+const WRITING = Symbol('writing')
+const ABORTED = Symbol('aborted')
+const DONE = Symbol('onDone')
+
+const noop = _ => true
+
+module.exports = warner(class Parser extends EE {
+ constructor (opt) {
+ opt = opt || {}
+ super(opt)
+
+ if (opt.ondone)
+ this.on(DONE, opt.ondone)
+ else
+ this.on(DONE, _ => {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ })
+
+ this.strict = !!opt.strict
+ this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
+ this.filter = typeof opt.filter === 'function' ? opt.filter : noop
+
+ // have to set this so that streams are ok piping into it
+ this.writable = true
+ this.readable = false
+
+ this[QUEUE] = new Yallist()
+ this[BUFFER] = null
+ this[READENTRY] = null
+ this[WRITEENTRY] = null
+ this[STATE] = 'begin'
+ this[META] = ''
+ this[EX] = null
+ this[GEX] = null
+ this[ENDED] = false
+ this[UNZIP] = null
+ this[ABORTED] = false
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+ if (typeof opt.onentry === 'function')
+ this.on('entry', opt.onentry)
+ }
+
+ [CONSUMEHEADER] (chunk, position) {
+ let header
+ try {
+ header = new Header(chunk, position, this[EX], this[GEX])
+ } catch (er) {
+ return this.warn('invalid entry', er)
+ }
+
+ if (header.nullBlock)
+ this[EMIT]('nullBlock')
+ else if (!header.cksumValid)
+ this.warn('invalid entry', header)
+ else if (!header.path)
+ this.warn('invalid: path is required', header)
+ else {
+ const type = header.type
+ if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
+ this.warn('invalid: linkpath required', header)
+ else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
+ this.warn('invalid: linkpath forbidden', header)
+ else {
+ const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
+
+ if (entry.meta) {
+ if (entry.size > this.maxMetaEntrySize) {
+ entry.ignore = true
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = 'ignore'
+ } else if (entry.size > 0) {
+ this[META] = ''
+ entry.on('data', c => this[META] += c)
+ this[STATE] = 'meta'
+ }
+ } else {
+
+ this[EX] = null
+ entry.ignore = entry.ignore || !this.filter(entry.path, entry)
+ if (entry.ignore) {
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = entry.remain ? 'ignore' : 'begin'
+ } else {
+ if (entry.remain)
+ this[STATE] = 'body'
+ else {
+ this[STATE] = 'begin'
+ entry.end()
+ }
+
+ if (!this[READENTRY]) {
+ this[QUEUE].push(entry)
+ this[NEXTENTRY]()
+ } else
+ this[QUEUE].push(entry)
+ }
+ }
+ }
+ }
+ }
+
+ [PROCESSENTRY] (entry) {
+ let go = true
+
+ if (!entry) {
+ this[READENTRY] = null
+ go = false
+ } else if (Array.isArray(entry))
+ this.emit.apply(this, entry)
+ else {
+ this[READENTRY] = entry
+ this.emit('entry', entry)
+ if (!entry.emittedEnd) {
+ entry.on('end', _ => this[NEXTENTRY]())
+ go = false
+ }
+ }
+
+ return go
+ }
+
+ [NEXTENTRY] () {
+ do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
+
+ if (!this[QUEUE].length) {
+ // At this point, there's nothing in the queue, but we may have an
+ // entry which is being consumed (readEntry).
+ // If we don't, then we definitely can handle more data.
+ // If we do, and either it's flowing, or it has never had any data
+ // written to it, then it needs more.
+ // The only other possibility is that it has returned false from a
+ // write() call, so we wait for the next drain to continue.
+ const re = this[READENTRY]
+ const drainNow = !re || re.flowing || re.size === re.remain
+ if (drainNow) {
+ if (!this[WRITING])
+ this.emit('drain')
+ } else
+ re.once('drain', _ => this.emit('drain'))
+ }
+ }
+
+ [CONSUMEBODY] (chunk, position) {
+ // write up to but no more than writeEntry.blockRemain
+ const entry = this[WRITEENTRY]
+ const br = entry.blockRemain
+ const c = (br >= chunk.length && position === 0) ? chunk
+ : chunk.slice(position, position + br)
+
+ entry.write(c)
+
+ if (!entry.blockRemain) {
+ this[STATE] = 'begin'
+ this[WRITEENTRY] = null
+ entry.end()
+ }
+
+ return c.length
+ }
+
+ [CONSUMEMETA] (chunk, position) {
+ const entry = this[WRITEENTRY]
+ const ret = this[CONSUMEBODY](chunk, position)
+
+ // if we finished, then the entry is reset
+ if (!this[WRITEENTRY])
+ this[EMITMETA](entry)
+
+ return ret
+ }
+
+ [EMIT] (ev, data, extra) {
+ if (!this[QUEUE].length && !this[READENTRY])
+ this.emit(ev, data, extra)
+ else
+ this[QUEUE].push([ev, data, extra])
+ }
+
+ [EMITMETA] (entry) {
+ this[EMIT]('meta', this[META])
+ switch (entry.type) {
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this[EX] = Pax.parse(this[META], this[EX], false)
+ break
+
+ case 'GlobalExtendedHeader':
+ this[GEX] = Pax.parse(this[META], this[GEX], true)
+ break
+
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].path = this[META].replace(/\0.*/, '')
+ break
+
+ case 'NextFileHasLongLinkpath':
+ this[EX] = this[EX] || Object.create(null)
+ this[EX].linkpath = this[META].replace(/\0.*/, '')
+ break
+
+ /* istanbul ignore next */
+ default: throw new Error('unknown meta: ' + entry.type)
+ }
+ }
+
+ abort (msg, error) {
+ this[ABORTED] = true
+ this.warn(msg, error)
+ this.emit('abort', error)
+ this.emit('error', error)
+ }
+
+ write (chunk) {
+ if (this[ABORTED])
+ return
+
+ // first write, might be gzipped
+ if (this[UNZIP] === null && chunk) {
+ if (this[BUFFER]) {
+ chunk = Buffer.concat([this[BUFFER], chunk])
+ this[BUFFER] = null
+ }
+ if (chunk.length < gzipHeader.length) {
+ this[BUFFER] = chunk
+ return true
+ }
+ for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
+ if (chunk[i] !== gzipHeader[i])
+ this[UNZIP] = false
+ }
+ if (this[UNZIP] === null) {
+ const ended = this[ENDED]
+ this[ENDED] = false
+ this[UNZIP] = new zlib.Unzip()
+ this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
+ this[UNZIP].on('error', er =>
+ this.abort(er.message, er))
+ this[UNZIP].on('end', _ => {
+ this[ENDED] = true
+ this[CONSUMECHUNK]()
+ })
+ this[WRITING] = true
+ const ret = this[UNZIP][ended ? 'end' : 'write' ](chunk)
+ this[WRITING] = false
+ return ret
+ }
+ }
+
+ this[WRITING] = true
+ if (this[UNZIP])
+ this[UNZIP].write(chunk)
+ else
+ this[CONSUMECHUNK](chunk)
+ this[WRITING] = false
+
+ // return false if there's a queue, or if the current entry isn't flowing
+ const ret =
+ this[QUEUE].length ? false :
+ this[READENTRY] ? this[READENTRY].flowing :
+ true
+
+ // if we have no queue, then that means a clogged READENTRY
+ if (!ret && !this[QUEUE].length)
+ this[READENTRY].once('drain', _ => this.emit('drain'))
+
+ return ret
+ }
+
+ [BUFFERCONCAT] (c) {
+ if (c && !this[ABORTED])
+ this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
+ }
+
+ [MAYBEEND] () {
+ if (this[ENDED] &&
+ !this[EMITTEDEND] &&
+ !this[ABORTED] &&
+ !this[CONSUMING]) {
+ this[EMITTEDEND] = true
+ const entry = this[WRITEENTRY]
+ if (entry && entry.blockRemain) {
+ const have = this[BUFFER] ? this[BUFFER].length : 0
+ this.warn('Truncated input (needed ' + entry.blockRemain +
+ ' more bytes, only ' + have + ' available)', entry)
+ if (this[BUFFER])
+ entry.write(this[BUFFER])
+ entry.end()
+ }
+ this[EMIT](DONE)
+ }
+ }
+
+ [CONSUMECHUNK] (chunk) {
+ if (this[CONSUMING]) {
+ this[BUFFERCONCAT](chunk)
+ } else if (!chunk && !this[BUFFER]) {
+ this[MAYBEEND]()
+ } else {
+ this[CONSUMING] = true
+ if (this[BUFFER]) {
+ this[BUFFERCONCAT](chunk)
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ } else {
+ this[CONSUMECHUNKSUB](chunk)
+ }
+
+ while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) {
+ const c = this[BUFFER]
+ this[BUFFER] = null
+ this[CONSUMECHUNKSUB](c)
+ }
+ this[CONSUMING] = false
+ }
+
+ if (!this[BUFFER] || this[ENDED])
+ this[MAYBEEND]()
+ }
+
+ [CONSUMECHUNKSUB] (chunk) {
+ // we know that we are in CONSUMING mode, so anything written goes into
+ // the buffer. Advance the position and put any remainder in the buffer.
+ let position = 0
+ let length = chunk.length
+ while (position + 512 <= length && !this[ABORTED]) {
+ switch (this[STATE]) {
+ case 'begin':
+ this[CONSUMEHEADER](chunk, position)
+ position += 512
+ break
+
+ case 'ignore':
+ case 'body':
+ position += this[CONSUMEBODY](chunk, position)
+ break
+
+ case 'meta':
+ position += this[CONSUMEMETA](chunk, position)
+ break
+
+ /* istanbul ignore next */
+ default:
+ throw new Error('invalid state: ' + this[STATE])
+ }
+ }
+
+ if (position < length) {
+ if (this[BUFFER])
+ this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
+ else
+ this[BUFFER] = chunk.slice(position)
+ }
+ }
+
+ end (chunk) {
+ if (!this[ABORTED]) {
+ if (this[UNZIP])
+ this[UNZIP].end(chunk)
+ else {
+ this[ENDED] = true
+ this.write(chunk)
+ }
+ }
+ }
+})
diff --git a/node_modules/pacote/node_modules/tar/lib/pax.js b/node_modules/pacote/node_modules/tar/lib/pax.js
new file mode 100644
index 000000000..9d7e4aba5
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/pax.js
@@ -0,0 +1,146 @@
+'use strict'
+const Buffer = require('./buffer.js')
+const Header = require('./header.js')
+const path = require('path')
+
+class Pax {
+ constructor (obj, global) {
+ this.atime = obj.atime || null
+ this.charset = obj.charset || null
+ this.comment = obj.comment || null
+ this.ctime = obj.ctime || null
+ this.gid = obj.gid || null
+ this.gname = obj.gname || null
+ this.linkpath = obj.linkpath || null
+ this.mtime = obj.mtime || null
+ this.path = obj.path || null
+ this.size = obj.size || null
+ this.uid = obj.uid || null
+ this.uname = obj.uname || null
+ this.dev = obj.dev || null
+ this.ino = obj.ino || null
+ this.nlink = obj.nlink || null
+ this.global = global || false
+ }
+
+ encode () {
+ const body = this.encodeBody()
+ if (body === '')
+ return null
+
+ const bodyLen = Buffer.byteLength(body)
+ // round up to 512 bytes
+ // add 512 for header
+ const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
+ const buf = Buffer.allocUnsafe(bufLen)
+
+ // 0-fill the header section, it might not hit every field
+ for (let i = 0; i < 512; i++) {
+ buf[i] = 0
+ }
+
+ new Header({
+ // XXX split the path
+ // then the path should be PaxHeader + basename, but less than 99,
+ // prepend with the dirname
+ path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
+ mode: this.mode || 0o644,
+ uid: this.uid || null,
+ gid: this.gid || null,
+ size: bodyLen,
+ mtime: this.mtime || null,
+ type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
+ linkpath: '',
+ uname: this.uname || '',
+ gname: this.gname || '',
+ devmaj: 0,
+ devmin: 0,
+ atime: this.atime || null,
+ ctime: this.ctime || null
+ }).encode(buf)
+
+ buf.write(body, 512, bodyLen, 'utf8')
+
+ // null pad after the body
+ for (let i = bodyLen + 512; i < buf.length; i++) {
+ buf[i] = 0
+ }
+
+ return buf
+ }
+
+ encodeBody () {
+ return (
+ this.encodeField('path') +
+ this.encodeField('ctime') +
+ this.encodeField('atime') +
+ this.encodeField('dev') +
+ this.encodeField('ino') +
+ this.encodeField('nlink') +
+ this.encodeField('charset') +
+ this.encodeField('comment') +
+ this.encodeField('gid') +
+ this.encodeField('gname') +
+ this.encodeField('linkpath') +
+ this.encodeField('mtime') +
+ this.encodeField('size') +
+ this.encodeField('uid') +
+ this.encodeField('uname')
+ )
+ }
+
+ encodeField (field) {
+ if (this[field] === null || this[field] === undefined)
+ return ''
+ const v = this[field] instanceof Date ? this[field].getTime() / 1000
+ : this[field]
+ const s = ' ' +
+ (field === 'dev' || field === 'ino' || field === 'nlink'
+ ? 'SCHILY.' : '') +
+ field + '=' + v + '\n'
+ const byteLen = Buffer.byteLength(s)
+ // the digits includes the length of the digits in ascii base-10
+ // so if it's 9 characters, then adding 1 for the 9 makes it 10
+ // which makes it 11 chars.
+ let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
+ if (byteLen + digits >= Math.pow(10, digits))
+ digits += 1
+ const len = digits + byteLen
+ return len + s
+ }
+}
+
+Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
+
+const merge = (a, b) =>
+ b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
+
+const parseKV = string =>
+ string
+ .replace(/\n$/, '')
+ .split('\n')
+ .reduce(parseKVLine, Object.create(null))
+
+const parseKVLine = (set, line) => {
+ const n = parseInt(line, 10)
+
+ // XXX Values with \n in them will fail this.
+ // Refactor to not be a naive line-by-line parse.
+ if (n !== Buffer.byteLength(line) + 1)
+ return set
+
+ line = line.substr((n + ' ').length)
+ const kv = line.split('=')
+ const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
+ if (!k)
+ return set
+
+ const v = kv.join('=')
+ set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
+ ? new Date(v * 1000)
+ : /^[0-9]+$/.test(v) ? +v
+ : v
+ return set
+}
+
+module.exports = Pax
diff --git a/node_modules/pacote/node_modules/tar/lib/read-entry.js b/node_modules/pacote/node_modules/tar/lib/read-entry.js
new file mode 100644
index 000000000..8acee94ba
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/read-entry.js
@@ -0,0 +1,98 @@
+'use strict'
+const types = require('./types.js')
+const MiniPass = require('minipass')
+
+const SLURP = Symbol('slurp')
+module.exports = class ReadEntry extends MiniPass {
+ constructor (header, ex, gex) {
+ super()
+ // read entries always start life paused. this is to avoid the
+ // situation where Minipass's auto-ending empty streams results
+ // in an entry ending before we're ready for it.
+ this.pause()
+ this.extended = ex
+ this.globalExtended = gex
+ this.header = header
+ this.startBlockSize = 512 * Math.ceil(header.size / 512)
+ this.blockRemain = this.startBlockSize
+ this.remain = header.size
+ this.type = header.type
+ this.meta = false
+ this.ignore = false
+ switch (this.type) {
+ case 'File':
+ case 'OldFile':
+ case 'Link':
+ case 'SymbolicLink':
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'Directory':
+ case 'FIFO':
+ case 'ContiguousFile':
+ case 'GNUDumpDir':
+ break
+
+ case 'NextFileHasLongLinkpath':
+ case 'NextFileHasLongPath':
+ case 'OldGnuLongPath':
+ case 'GlobalExtendedHeader':
+ case 'ExtendedHeader':
+ case 'OldExtendedHeader':
+ this.meta = true
+ break
+
+ // NOTE: gnutar and bsdtar treat unrecognized types as 'File'
+ // it may be worth doing the same, but with a warning.
+ default:
+ this.ignore = true
+ }
+
+ this.path = header.path
+ this.mode = header.mode
+ if (this.mode)
+ this.mode = this.mode & 0o7777
+ this.uid = header.uid
+ this.gid = header.gid
+ this.uname = header.uname
+ this.gname = header.gname
+ this.size = header.size
+ this.mtime = header.mtime
+ this.atime = header.atime
+ this.ctime = header.ctime
+ this.linkpath = header.linkpath
+ this.uname = header.uname
+ this.gname = header.gname
+
+ if (ex) this[SLURP](ex)
+ if (gex) this[SLURP](gex, true)
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+
+ const r = this.remain
+ const br = this.blockRemain
+ this.remain = Math.max(0, r - writeLen)
+ this.blockRemain = Math.max(0, br - writeLen)
+ if (this.ignore)
+ return true
+
+ if (r >= writeLen)
+ return super.write(data)
+
+ // r < writeLen
+ return super.write(data.slice(0, r))
+ }
+
+ [SLURP] (ex, global) {
+ for (let k in ex) {
+ // we slurp in everything except for the path attribute in
+ // a global extended header, because that's weird.
+ if (ex[k] !== null && ex[k] !== undefined &&
+ !(global && k === 'path'))
+ this[k] = ex[k]
+ }
+ }
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/replace.js b/node_modules/pacote/node_modules/tar/lib/replace.js
new file mode 100644
index 000000000..571cee94a
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/replace.js
@@ -0,0 +1,220 @@
+'use strict'
+const Buffer = require('./buffer.js')
+
+// tar -r
+const hlo = require('./high-level-opt.js')
+const Pack = require('./pack.js')
+const Parse = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const t = require('./list.js')
+const path = require('path')
+
+// starting at the head of the file, read a Header
+// If the checksum is invalid, that's our position to start writing
+// If it is, jump forward by the specified size (round up to 512)
+// and try again.
+// Write the new Pack stream starting there.
+
+const Header = require('./header.js')
+
+const r = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ return opt.sync ? replaceSync(opt, files)
+ : replace(opt, files, cb)
+}
+
+const replaceSync = (opt, files) => {
+ const p = new Pack.Sync(opt)
+
+ let threw = true
+ let fd
+ let position
+
+ try {
+ try {
+ fd = fs.openSync(opt.file, 'r+')
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ fd = fs.openSync(opt.file, 'w+')
+ else
+ throw er
+ }
+
+ const st = fs.fstatSync(fd)
+ const headBuf = Buffer.alloc(512)
+
+ POSITION: for (position = 0; position < st.size; position += 512) {
+ for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
+ bytes = fs.readSync(
+ fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ throw new Error('cannot append to compressed archives')
+
+ if (!bytes)
+ break POSITION
+ }
+
+ let h = new Header(headBuf)
+ if (!h.cksumValid)
+ break
+ let entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > st.size)
+ break
+ // the 512 for the header we just parsed will be added as well
+ // also jump ahead all the blocks for the body
+ position += entryBlockSize
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ }
+ threw = false
+
+ streamSync(opt, p, position, fd, files)
+ } finally {
+ if (threw)
+ try { fs.closeSync(fd) } catch (er) {}
+ }
+}
+
+const streamSync = (opt, p, position, fd, files) => {
+ const stream = new fsm.WriteStreamSync(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ addFilesSync(p, files)
+}
+
+const replace = (opt, files, cb) => {
+ files = Array.from(files)
+ const p = new Pack(opt)
+
+ const getPos = (fd, size, cb_) => {
+ const cb = (er, pos) => {
+ if (er)
+ fs.close(fd, _ => cb_(er))
+ else
+ cb_(null, pos)
+ }
+
+ let position = 0
+ if (size === 0)
+ return cb(null, 0)
+
+ let bufPos = 0
+ const headBuf = Buffer.alloc(512)
+ const onread = (er, bytes) => {
+ if (er)
+ return cb(er)
+ bufPos += bytes
+ if (bufPos < 512 && bytes)
+ return fs.read(
+ fd, headBuf, bufPos, headBuf.length - bufPos,
+ position + bufPos, onread
+ )
+
+ if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
+ return cb(new Error('cannot append to compressed archives'))
+
+ // truncated header
+ if (bufPos < 512)
+ return cb(null, position)
+
+ const h = new Header(headBuf)
+ if (!h.cksumValid)
+ return cb(null, position)
+
+ const entryBlockSize = 512 * Math.ceil(h.size / 512)
+ if (position + entryBlockSize + 512 > size)
+ return cb(null, position)
+
+ position += entryBlockSize + 512
+ if (position >= size)
+ return cb(null, position)
+
+ if (opt.mtimeCache)
+ opt.mtimeCache.set(h.path, h.mtime)
+ bufPos = 0
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+ fs.read(fd, headBuf, 0, 512, position, onread)
+ }
+
+ const promise = new Promise((resolve, reject) => {
+ p.on('error', reject)
+ let flag = 'r+'
+ const onopen = (er, fd) => {
+ if (er && er.code === 'ENOENT' && flag === 'r+') {
+ flag = 'w+'
+ return fs.open(opt.file, flag, onopen)
+ }
+
+ if (er)
+ return reject(er)
+
+ fs.fstat(fd, (er, st) => {
+ if (er)
+ return reject(er)
+ getPos(fd, st.size, (er, position) => {
+ if (er)
+ return reject(er)
+ const stream = new fsm.WriteStream(opt.file, {
+ fd: fd,
+ start: position
+ })
+ p.pipe(stream)
+ stream.on('error', reject)
+ stream.on('close', resolve)
+ addFilesAsync(p, files)
+ })
+ })
+ }
+ fs.open(opt.file, flag, onopen)
+ })
+
+ return cb ? promise.then(cb, cb) : promise
+}
+
+const addFilesSync = (p, files) => {
+ files.forEach(file => {
+ if (file.charAt(0) === '@')
+ t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ sync: true,
+ noResume: true,
+ onentry: entry => p.add(entry)
+ })
+ else
+ p.add(file)
+ })
+ p.end()
+}
+
+const addFilesAsync = (p, files) => {
+ while (files.length) {
+ const file = files.shift()
+ if (file.charAt(0) === '@')
+ return t({
+ file: path.resolve(p.cwd, file.substr(1)),
+ noResume: true,
+ onentry: entry => p.add(entry)
+ }).then(_ => addFilesAsync(p, files))
+ else
+ p.add(file)
+ }
+ p.end()
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/types.js b/node_modules/pacote/node_modules/tar/lib/types.js
new file mode 100644
index 000000000..df425652b
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/types.js
@@ -0,0 +1,44 @@
+'use strict'
+// map types from key to human-friendly name
+exports.name = new Map([
+ ['0', 'File'],
+ // same as File
+ ['', 'OldFile'],
+ ['1', 'Link'],
+ ['2', 'SymbolicLink'],
+ // Devices and FIFOs aren't fully supported
+ // they are parsed, but skipped when unpacking
+ ['3', 'CharacterDevice'],
+ ['4', 'BlockDevice'],
+ ['5', 'Directory'],
+ ['6', 'FIFO'],
+ // same as File
+ ['7', 'ContiguousFile'],
+ // pax headers
+ ['g', 'GlobalExtendedHeader'],
+ ['x', 'ExtendedHeader'],
+ // vendor-specific stuff
+ // skip
+ ['A', 'SolarisACL'],
+ // like 5, but with data, which should be skipped
+ ['D', 'GNUDumpDir'],
+ // metadata only, skip
+ ['I', 'Inode'],
+ // data = link path of next file
+ ['K', 'NextFileHasLongLinkpath'],
+ // data = path of next file
+ ['L', 'NextFileHasLongPath'],
+ // skip
+ ['M', 'ContinuationFile'],
+ // like L
+ ['N', 'OldGnuLongPath'],
+ // skip
+ ['S', 'SparseFile'],
+ // skip
+ ['V', 'TapeVolumeHeader'],
+ // like x
+ ['X', 'OldExtendedHeader']
+])
+
+// map the other direction
+exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
diff --git a/node_modules/pacote/node_modules/tar/lib/unpack.js b/node_modules/pacote/node_modules/tar/lib/unpack.js
new file mode 100644
index 000000000..fc765096e
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/unpack.js
@@ -0,0 +1,621 @@
+'use strict'
+
+const assert = require('assert')
+const EE = require('events').EventEmitter
+const Parser = require('./parse.js')
+const fs = require('fs')
+const fsm = require('fs-minipass')
+const path = require('path')
+const mkdir = require('./mkdir.js')
+const mkdirSync = mkdir.sync
+const wc = require('./winchars.js')
+
+const ONENTRY = Symbol('onEntry')
+const CHECKFS = Symbol('checkFs')
+const ISREUSABLE = Symbol('isReusable')
+const MAKEFS = Symbol('makeFs')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const LINK = Symbol('link')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const UNSUPPORTED = Symbol('unsupported')
+const UNKNOWN = Symbol('unknown')
+const CHECKPATH = Symbol('checkPath')
+const MKDIR = Symbol('mkdir')
+const ONERROR = Symbol('onError')
+const PENDING = Symbol('pending')
+const PEND = Symbol('pend')
+const UNPEND = Symbol('unpend')
+const ENDED = Symbol('ended')
+const MAYBECLOSE = Symbol('maybeClose')
+const SKIP = Symbol('skip')
+const DOCHOWN = Symbol('doChown')
+const UID = Symbol('uid')
+const GID = Symbol('gid')
+const crypto = require('crypto')
+
+// Unlinks on Windows are not atomic.
+//
+// This means that if you have a file entry, followed by another
+// file entry with an identical name, and you cannot re-use the file
+// (because it's a hardlink, or because unlink:true is set, or it's
+// Windows, which does not have useful nlink values), then the unlink
+// will be committed to the disk AFTER the new file has been written
+// over the old one, deleting the new file.
+//
+// To work around this, on Windows systems, we rename the file and then
+// delete the renamed file. It's a sloppy kludge, but frankly, I do not
+// know of a better way to do this, given windows' non-atomic unlink
+// semantics.
+//
+// See: https://github.com/npm/node-tar/issues/183
+/* istanbul ignore next */
+const unlinkFile = (path, cb) => {
+ if (process.platform !== 'win32')
+ return fs.unlink(path, cb)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.rename(path, name, er => {
+ if (er)
+ return cb(er)
+ fs.unlink(name, cb)
+ })
+}
+
+/* istanbul ignore next */
+const unlinkFileSync = path => {
+ if (process.platform !== 'win32')
+ return fs.unlinkSync(path)
+
+ const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
+ fs.renameSync(path, name)
+ fs.unlinkSync(name)
+}
+
+// this.gid, entry.gid, this.processUid
+const uint32 = (a, b, c) =>
+ a === a >>> 0 ? a
+ : b === b >>> 0 ? b
+ : c
+
+class Unpack extends Parser {
+ constructor (opt) {
+ if (!opt)
+ opt = {}
+
+ opt.ondone = _ => {
+ this[ENDED] = true
+ this[MAYBECLOSE]()
+ }
+
+ super(opt)
+
+ this.transform = typeof opt.transform === 'function' ? opt.transform : null
+
+ this.writable = true
+ this.readable = false
+
+ this[PENDING] = 0
+ this[ENDED] = false
+
+ this.dirCache = opt.dirCache || new Map()
+
+ if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
+ // need both or neither
+ if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number')
+ throw new TypeError('cannot set owner without number uid and gid')
+ if (opt.preserveOwner)
+ throw new TypeError(
+ 'cannot preserve owner in archive and also set owner explicitly')
+ this.uid = opt.uid
+ this.gid = opt.gid
+ this.setOwner = true
+ } else {
+ this.uid = null
+ this.gid = null
+ this.setOwner = false
+ }
+
+ // default true for root
+ if (opt.preserveOwner === undefined && typeof opt.uid !== 'number')
+ this.preserveOwner = process.getuid && process.getuid() === 0
+ else
+ this.preserveOwner = !!opt.preserveOwner
+
+ this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
+ process.getuid() : null
+ this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
+ process.getgid() : null
+
+ // mostly just for testing, but useful in some cases.
+ // Forcibly trigger a chown on every entry, no matter what
+ this.forceChown = opt.forceChown === true
+
+ // turn ><?| in filenames into 0xf000-higher encoded forms
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+
+ // do not unpack over files that are newer than what's in the archive
+ this.newer = !!opt.newer
+
+ // do not unpack over ANY files
+ this.keep = !!opt.keep
+
+ // do not set mtime/atime of extracted entries
+ this.noMtime = !!opt.noMtime
+
+ // allow .., absolute path entries, and unpacking through symlinks
+ // without this, warn and skip .., relativize absolutes, and error
+ // on symlinks in extraction path
+ this.preservePaths = !!opt.preservePaths
+
+ // unlink files and links before writing. This breaks existing hard
+ // links, and removes symlink directories rather than erroring
+ this.unlink = !!opt.unlink
+
+ this.cwd = path.resolve(opt.cwd || process.cwd())
+ this.strip = +opt.strip || 0
+ this.processUmask = process.umask()
+ this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
+ // default mode for dirs created as parents
+ this.dmode = opt.dmode || (0o0777 & (~this.umask))
+ this.fmode = opt.fmode || (0o0666 & (~this.umask))
+ this.on('entry', entry => this[ONENTRY](entry))
+ }
+
+ [MAYBECLOSE] () {
+ if (this[ENDED] && this[PENDING] === 0) {
+ this.emit('prefinish')
+ this.emit('finish')
+ this.emit('end')
+ this.emit('close')
+ }
+ }
+
+ [CHECKPATH] (entry) {
+ if (this.strip) {
+ const parts = entry.path.split(/\/|\\/)
+ if (parts.length < this.strip)
+ return false
+ entry.path = parts.slice(this.strip).join('/')
+
+ if (entry.type === 'Link') {
+ const linkparts = entry.linkpath.split(/\/|\\/)
+ if (linkparts.length >= this.strip)
+ entry.linkpath = linkparts.slice(this.strip).join('/')
+ }
+ }
+
+ if (!this.preservePaths) {
+ const p = entry.path
+ if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
+ this.warn('path contains \'..\'', p)
+ return false
+ }
+
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ if (path.win32.isAbsolute(p)) {
+ const parsed = path.win32.parse(p)
+ this.warn('stripping ' + parsed.root + ' from absolute path', p)
+ entry.path = p.substr(parsed.root.length)
+ }
+ }
+
+ // only encode : chars that aren't drive letter indicators
+ if (this.win32) {
+ const parsed = path.win32.parse(entry.path)
+ entry.path = parsed.root === '' ? wc.encode(entry.path)
+ : parsed.root + wc.encode(entry.path.substr(parsed.root.length))
+ }
+
+ if (path.isAbsolute(entry.path))
+ entry.absolute = entry.path
+ else
+ entry.absolute = path.resolve(this.cwd, entry.path)
+
+ return true
+ }
+
+ [ONENTRY] (entry) {
+ if (!this[CHECKPATH](entry))
+ return entry.resume()
+
+ assert.equal(typeof entry.absolute, 'string')
+
+ switch (entry.type) {
+ case 'Directory':
+ case 'GNUDumpDir':
+ if (entry.mode)
+ entry.mode = entry.mode | 0o700
+
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ case 'Link':
+ case 'SymbolicLink':
+ return this[CHECKFS](entry)
+
+ case 'CharacterDevice':
+ case 'BlockDevice':
+ case 'FIFO':
+ return this[UNSUPPORTED](entry)
+ }
+ }
+
+ [ONERROR] (er, entry) {
+ // Cwd has to exist, or else nothing works. That's serious.
+ // Other errors are warnings, which raise the error in strict
+ // mode, but otherwise continue on.
+ if (er.name === 'CwdError')
+ this.emit('error', er)
+ else {
+ this.warn(er.message, er)
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ [MKDIR] (dir, mode, cb) {
+ mkdir(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ }, cb)
+ }
+
+ [DOCHOWN] (entry) {
+ // in preserve owner mode, chown if the entry doesn't match process
+ // in set owner mode, chown if setting doesn't match process
+ return this.forceChown ||
+ this.preserveOwner &&
+ ( typeof entry.uid === 'number' && entry.uid !== this.processUid ||
+ typeof entry.gid === 'number' && entry.gid !== this.processGid )
+ ||
+ ( typeof this.uid === 'number' && this.uid !== this.processUid ||
+ typeof this.gid === 'number' && this.gid !== this.processGid )
+ }
+
+ [UID] (entry) {
+ return uint32(this.uid, entry.uid, this.processUid)
+ }
+
+ [GID] (entry) {
+ return uint32(this.gid, entry.gid, this.processGid)
+ }
+
+ [FILE] (entry) {
+ const mode = entry.mode & 0o7777 || this.fmode
+ const stream = new fsm.WriteStream(entry.absolute, {
+ mode: mode,
+ autoClose: false
+ })
+ stream.on('error', er => this[ONERROR](er, entry))
+
+ let actions = 1
+ const done = er => {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ if (--actions === 0)
+ fs.close(stream.fd, _ => this[UNPEND]())
+ }
+
+ stream.on('finish', _ => {
+ // if futimes fails, try utimes
+ // if utimes fails, fail with the original error
+ // same for fchown/chown
+ const abs = entry.absolute
+ const fd = stream.fd
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ fs.futimes(fd, atime, mtime, er =>
+ er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
+ : done())
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+ fs.fchown(fd, uid, gid, er =>
+ er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
+ : done())
+ }
+
+ done()
+ })
+
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+ tx.pipe(stream)
+ }
+
+ [DIRECTORY] (entry) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ this[MKDIR](entry.absolute, mode, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ let actions = 1
+ const done = _ => {
+ if (--actions === 0) {
+ this[UNPEND]()
+ entry.resume()
+ }
+ }
+
+ if (entry.mtime && !this.noMtime) {
+ actions++
+ fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
+ }
+
+ if (this[DOCHOWN](entry)) {
+ actions++
+ fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
+ }
+
+ done()
+ })
+ }
+
+ [UNSUPPORTED] (entry) {
+ this.warn('unsupported entry type: ' + entry.type, entry)
+ entry.resume()
+ }
+
+ [SYMLINK] (entry) {
+ this[LINK](entry, entry.linkpath, 'symlink')
+ }
+
+ [HARDLINK] (entry) {
+ this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link')
+ }
+
+ [PEND] () {
+ this[PENDING]++
+ }
+
+ [UNPEND] () {
+ this[PENDING]--
+ this[MAYBECLOSE]()
+ }
+
+ [SKIP] (entry) {
+ this[UNPEND]()
+ entry.resume()
+ }
+
+ // Check if we can reuse an existing filesystem entry safely and
+ // overwrite it, rather than unlinking and recreating
+ // Windows doesn't report a useful nlink, so we just never reuse entries
+ [ISREUSABLE] (entry, st) {
+ return entry.type === 'File' &&
+ !this.unlink &&
+ st.isFile() &&
+ st.nlink <= 1 &&
+ process.platform !== 'win32'
+ }
+
+ // check if a thing is there, and if so, try to clobber it
+ [CHECKFS] (entry) {
+ this[PEND]()
+ this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+ fs.lstat(entry.absolute, (er, st) => {
+ if (st && (this.keep || this.newer && st.mtime > entry.mtime))
+ this[SKIP](entry)
+ else if (er || this[ISREUSABLE](entry, st))
+ this[MAKEFS](null, entry)
+ else if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (!entry.mode || (st.mode & 0o7777) === entry.mode)
+ this[MAKEFS](null, entry)
+ else
+ fs.chmod(entry.absolute, entry.mode, er => this[MAKEFS](er, entry))
+ } else
+ fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry))
+ } else
+ unlinkFile(entry.absolute, er => this[MAKEFS](er, entry))
+ })
+ })
+ }
+
+ [MAKEFS] (er, entry) {
+ if (er)
+ return this[ONERROR](er, entry)
+
+ switch (entry.type) {
+ case 'File':
+ case 'OldFile':
+ case 'ContiguousFile':
+ return this[FILE](entry)
+
+ case 'Link':
+ return this[HARDLINK](entry)
+
+ case 'SymbolicLink':
+ return this[SYMLINK](entry)
+
+ case 'Directory':
+ case 'GNUDumpDir':
+ return this[DIRECTORY](entry)
+ }
+ }
+
+ [LINK] (entry, linkpath, link) {
+ // XXX: get the type ('file' or 'dir') for windows
+ fs[link](linkpath, entry.absolute, er => {
+ if (er)
+ return this[ONERROR](er, entry)
+ this[UNPEND]()
+ entry.resume()
+ })
+ }
+}
+
+class UnpackSync extends Unpack {
+ constructor (opt) {
+ super(opt)
+ }
+
+ [CHECKFS] (entry) {
+ const er = this[MKDIR](path.dirname(entry.absolute), this.dmode)
+ if (er)
+ return this[ONERROR](er, entry)
+ try {
+ const st = fs.lstatSync(entry.absolute)
+ if (this.keep || this.newer && st.mtime > entry.mtime)
+ return this[SKIP](entry)
+ else if (this[ISREUSABLE](entry, st))
+ return this[MAKEFS](null, entry)
+ else {
+ try {
+ if (st.isDirectory()) {
+ if (entry.type === 'Directory') {
+ if (entry.mode && (st.mode & 0o7777) !== entry.mode)
+ fs.chmodSync(entry.absolute, entry.mode)
+ } else
+ fs.rmdirSync(entry.absolute)
+ } else
+ unlinkFileSync(entry.absolute)
+ return this[MAKEFS](null, entry)
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+ } catch (er) {
+ return this[MAKEFS](null, entry)
+ }
+ }
+
+ [FILE] (entry) {
+ const mode = entry.mode & 0o7777 || this.fmode
+
+ const oner = er => {
+ try { fs.closeSync(fd) } catch (_) {}
+ if (er)
+ this[ONERROR](er, entry)
+ }
+
+ let stream
+ let fd
+ try {
+ fd = fs.openSync(entry.absolute, 'w', mode)
+ } catch (er) {
+ return oner(er)
+ }
+ const tx = this.transform ? this.transform(entry) || entry : entry
+ if (tx !== entry) {
+ tx.on('error', er => this[ONERROR](er, entry))
+ entry.pipe(tx)
+ }
+
+ tx.on('data', chunk => {
+ try {
+ fs.writeSync(fd, chunk, 0, chunk.length)
+ } catch (er) {
+ oner(er)
+ }
+ })
+
+ tx.on('end', _ => {
+ let er = null
+ // try both, falling futimes back to utimes
+ // if either fails, handle the first error
+ if (entry.mtime && !this.noMtime) {
+ const atime = entry.atime || new Date()
+ const mtime = entry.mtime
+ try {
+ fs.futimesSync(fd, atime, mtime)
+ } catch (futimeser) {
+ try {
+ fs.utimesSync(entry.absolute, atime, mtime)
+ } catch (utimeser) {
+ er = futimeser
+ }
+ }
+ }
+
+ if (this[DOCHOWN](entry)) {
+ const uid = this[UID](entry)
+ const gid = this[GID](entry)
+
+ try {
+ fs.fchownSync(fd, uid, gid)
+ } catch (fchowner) {
+ try {
+ fs.chownSync(entry.absolute, uid, gid)
+ } catch (chowner) {
+ er = er || fchowner
+ }
+ }
+ }
+
+ oner(er)
+ })
+ }
+
+ [DIRECTORY] (entry) {
+ const mode = entry.mode & 0o7777 || this.dmode
+ const er = this[MKDIR](entry.absolute, mode)
+ if (er)
+ return this[ONERROR](er, entry)
+ if (entry.mtime && !this.noMtime) {
+ try {
+ fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
+ } catch (er) {}
+ }
+ if (this[DOCHOWN](entry)) {
+ try {
+ fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
+ } catch (er) {}
+ }
+ entry.resume()
+ }
+
+ [MKDIR] (dir, mode) {
+ try {
+ return mkdir.sync(dir, {
+ uid: this.uid,
+ gid: this.gid,
+ processUid: this.processUid,
+ processGid: this.processGid,
+ umask: this.processUmask,
+ preserve: this.preservePaths,
+ unlink: this.unlink,
+ cache: this.dirCache,
+ cwd: this.cwd,
+ mode: mode
+ })
+ } catch (er) {
+ return er
+ }
+ }
+
+ [LINK] (entry, linkpath, link) {
+ try {
+ fs[link + 'Sync'](linkpath, entry.absolute)
+ entry.resume()
+ } catch (er) {
+ return this[ONERROR](er, entry)
+ }
+ }
+}
+
+Unpack.Sync = UnpackSync
+module.exports = Unpack
diff --git a/node_modules/pacote/node_modules/tar/lib/update.js b/node_modules/pacote/node_modules/tar/lib/update.js
new file mode 100644
index 000000000..16c3e93ed
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/update.js
@@ -0,0 +1,36 @@
+'use strict'
+
+// tar -u
+
+const hlo = require('./high-level-opt.js')
+const r = require('./replace.js')
+// just call tar.r with the filter and mtimeCache
+
+const u = module.exports = (opt_, files, cb) => {
+ const opt = hlo(opt_)
+
+ if (!opt.file)
+ throw new TypeError('file is required')
+
+ if (opt.gzip)
+ throw new TypeError('cannot append to compressed archives')
+
+ if (!files || !Array.isArray(files) || !files.length)
+ throw new TypeError('no files or directories specified')
+
+ files = Array.from(files)
+
+ mtimeFilter(opt)
+ return r(opt, files, cb)
+}
+
+const mtimeFilter = opt => {
+ const filter = opt.filter
+
+ if (!opt.mtimeCache)
+ opt.mtimeCache = new Map()
+
+ opt.filter = filter ? (path, stat) =>
+ filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
+ : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/warn-mixin.js b/node_modules/pacote/node_modules/tar/lib/warn-mixin.js
new file mode 100644
index 000000000..94a4b9b99
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/warn-mixin.js
@@ -0,0 +1,14 @@
+'use strict'
+module.exports = Base => class extends Base {
+ warn (msg, data) {
+ if (!this.strict)
+ this.emit('warn', msg, data)
+ else if (data instanceof Error)
+ this.emit('error', data)
+ else {
+ const er = new Error(msg)
+ er.data = data
+ this.emit('error', er)
+ }
+ }
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/winchars.js b/node_modules/pacote/node_modules/tar/lib/winchars.js
new file mode 100644
index 000000000..cf6ea0606
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/winchars.js
@@ -0,0 +1,23 @@
+'use strict'
+
+// When writing files on Windows, translate the characters to their
+// 0xf000 higher-encoded versions.
+
+const raw = [
+ '|',
+ '<',
+ '>',
+ '?',
+ ':'
+]
+
+const win = raw.map(char =>
+ String.fromCharCode(0xf000 + char.charCodeAt(0)))
+
+const toWin = new Map(raw.map((char, i) => [char, win[i]]))
+const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
+
+module.exports = {
+ encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
+ decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s)
+}
diff --git a/node_modules/pacote/node_modules/tar/lib/write-entry.js b/node_modules/pacote/node_modules/tar/lib/write-entry.js
new file mode 100644
index 000000000..0c019006f
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/lib/write-entry.js
@@ -0,0 +1,422 @@
+'use strict'
+const Buffer = require('./buffer.js')
+const MiniPass = require('minipass')
+const Pax = require('./pax.js')
+const Header = require('./header.js')
+const ReadEntry = require('./read-entry.js')
+const fs = require('fs')
+const path = require('path')
+
+const types = require('./types.js')
+const maxReadSize = 16 * 1024 * 1024
+const PROCESS = Symbol('process')
+const FILE = Symbol('file')
+const DIRECTORY = Symbol('directory')
+const SYMLINK = Symbol('symlink')
+const HARDLINK = Symbol('hardlink')
+const HEADER = Symbol('header')
+const READ = Symbol('read')
+const LSTAT = Symbol('lstat')
+const ONLSTAT = Symbol('onlstat')
+const ONREAD = Symbol('onread')
+const ONREADLINK = Symbol('onreadlink')
+const OPENFILE = Symbol('openfile')
+const ONOPENFILE = Symbol('onopenfile')
+const CLOSE = Symbol('close')
+const MODE = Symbol('mode')
+const warner = require('./warn-mixin.js')
+const winchars = require('./winchars.js')
+
+const modeFix = require('./mode-fix.js')
+
+const WriteEntry = warner(class WriteEntry extends MiniPass {
+ constructor (p, opt) {
+ opt = opt || {}
+ super(opt)
+ if (typeof p !== 'string')
+ throw new TypeError('path is required')
+ this.path = p
+ // suppress atime, ctime, uid, gid, uname, gname
+ this.portable = !!opt.portable
+ // until node has builtin pwnam functions, this'll have to do
+ this.myuid = process.getuid && process.getuid()
+ this.myuser = process.env.USER || ''
+ this.maxReadSize = opt.maxReadSize || maxReadSize
+ this.linkCache = opt.linkCache || new Map()
+ this.statCache = opt.statCache || new Map()
+ this.preservePaths = !!opt.preservePaths
+ this.cwd = opt.cwd || process.cwd()
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+ this.mtime = opt.mtime || null
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ if (!this.preservePaths && path.win32.isAbsolute(p)) {
+ // absolutes on posix are also absolutes on win32
+ // so we only need to test this one to get both
+ const parsed = path.win32.parse(p)
+ this.warn('stripping ' + parsed.root + ' from absolute path', p)
+ this.path = p.substr(parsed.root.length)
+ }
+
+ this.win32 = !!opt.win32 || process.platform === 'win32'
+ if (this.win32) {
+ this.path = winchars.decode(this.path.replace(/\\/g, '/'))
+ p = p.replace(/\\/g, '/')
+ }
+
+ this.absolute = opt.absolute || path.resolve(this.cwd, p)
+
+ if (this.path === '')
+ this.path = './'
+
+ if (this.statCache.has(this.absolute))
+ this[ONLSTAT](this.statCache.get(this.absolute))
+ else
+ this[LSTAT]()
+ }
+
+ [LSTAT] () {
+ fs.lstat(this.absolute, (er, stat) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONLSTAT](stat)
+ })
+ }
+
+ [ONLSTAT] (stat) {
+ this.statCache.set(this.absolute, stat)
+ this.stat = stat
+ if (!stat.isFile())
+ stat.size = 0
+ this.type = getType(stat)
+ this.emit('stat', stat)
+ this[PROCESS]()
+ }
+
+ [PROCESS] () {
+ switch (this.type) {
+ case 'File': return this[FILE]()
+ case 'Directory': return this[DIRECTORY]()
+ case 'SymbolicLink': return this[SYMLINK]()
+ // unsupported types are ignored.
+ default: return this.end()
+ }
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory')
+ }
+
+ [HEADER] () {
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this[MODE](this.stat.mode),
+ uid: this.portable ? null : this.stat.uid,
+ gid: this.portable ? null : this.stat.gid,
+ size: this.stat.size,
+ mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
+ type: this.type,
+ uname: this.portable ? null :
+ this.stat.uid === this.myuid ? this.myuser : '',
+ atime: this.portable ? null : this.stat.atime,
+ ctime: this.portable ? null : this.stat.ctime
+ })
+
+ if (this.header.encode() && !this.noPax)
+ this.write(new Pax({
+ atime: this.portable ? null : this.header.atime,
+ ctime: this.portable ? null : this.header.ctime,
+ gid: this.portable ? null : this.header.gid,
+ mtime: this.noMtime ? null : this.mtime || this.header.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.header.size,
+ uid: this.portable ? null : this.header.uid,
+ uname: this.portable ? null : this.header.uname,
+ dev: this.portable ? null : this.stat.dev,
+ ino: this.portable ? null : this.stat.ino,
+ nlink: this.portable ? null : this.stat.nlink
+ }).encode())
+ this.write(this.header.block)
+ }
+
+ [DIRECTORY] () {
+ if (this.path.substr(-1) !== '/')
+ this.path += '/'
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [SYMLINK] () {
+ fs.readlink(this.absolute, (er, linkpath) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONREADLINK](linkpath)
+ })
+ }
+
+ [ONREADLINK] (linkpath) {
+ this.linkpath = linkpath
+ this[HEADER]()
+ this.end()
+ }
+
+ [HARDLINK] (linkpath) {
+ this.type = 'Link'
+ this.linkpath = path.relative(this.cwd, linkpath)
+ this.stat.size = 0
+ this[HEADER]()
+ this.end()
+ }
+
+ [FILE] () {
+ if (this.stat.nlink > 1) {
+ const linkKey = this.stat.dev + ':' + this.stat.ino
+ if (this.linkCache.has(linkKey)) {
+ const linkpath = this.linkCache.get(linkKey)
+ if (linkpath.indexOf(this.cwd) === 0)
+ return this[HARDLINK](linkpath)
+ }
+ this.linkCache.set(linkKey, this.absolute)
+ }
+
+ this[HEADER]()
+ if (this.stat.size === 0)
+ return this.end()
+
+ this[OPENFILE]()
+ }
+
+ [OPENFILE] () {
+ fs.open(this.absolute, 'r', (er, fd) => {
+ if (er)
+ return this.emit('error', er)
+ this[ONOPENFILE](fd)
+ })
+ }
+
+ [ONOPENFILE] (fd) {
+ const blockLen = 512 * Math.ceil(this.stat.size / 512)
+ const bufLen = Math.min(blockLen, this.maxReadSize)
+ const buf = Buffer.allocUnsafe(bufLen)
+ this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen)
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
+ if (er)
+ return this[CLOSE](fd, _ => this.emit('error', er))
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ })
+ }
+
+ [CLOSE] (fd, cb) {
+ fs.close(fd, cb)
+ }
+
+ [ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) {
+ if (bytesRead <= 0 && remain > 0) {
+ const er = new Error('encountered unexpected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ this[CLOSE](fd, _ => _)
+ return this.emit('error', er)
+ }
+
+ if (bytesRead > remain) {
+ const er = new Error('did not encounter expected EOF')
+ er.path = this.absolute
+ er.syscall = 'read'
+ er.code = 'EOF'
+ this[CLOSE](fd, _ => _)
+ return this.emit('error', er)
+ }
+
+ // null out the rest of the buffer, if we could fit the block padding
+ if (bytesRead === remain) {
+ for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) {
+ buf[i + offset] = 0
+ bytesRead ++
+ remain ++
+ }
+ }
+
+ const writeBuf = offset === 0 && bytesRead === buf.length ?
+ buf : buf.slice(offset, offset + bytesRead)
+ remain -= bytesRead
+ blockRemain -= bytesRead
+ pos += bytesRead
+ offset += bytesRead
+
+ this.write(writeBuf)
+
+ if (!remain) {
+ if (blockRemain)
+ this.write(Buffer.alloc(blockRemain))
+ this.end()
+ this[CLOSE](fd, _ => _)
+ return
+ }
+
+ if (offset >= length) {
+ buf = Buffer.allocUnsafe(length)
+ offset = 0
+ }
+ length = buf.length - offset
+ this[READ](fd, buf, offset, length, pos, remain, blockRemain)
+ }
+})
+
+class WriteEntrySync extends WriteEntry {
+ constructor (path, opt) {
+ super(path, opt)
+ }
+
+ [LSTAT] () {
+ this[ONLSTAT](fs.lstatSync(this.absolute))
+ }
+
+ [SYMLINK] () {
+ this[ONREADLINK](fs.readlinkSync(this.absolute))
+ }
+
+ [OPENFILE] () {
+ this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
+ }
+
+ [READ] (fd, buf, offset, length, pos, remain, blockRemain) {
+ let threw = true
+ try {
+ const bytesRead = fs.readSync(fd, buf, offset, length, pos)
+ this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
+ threw = false
+ } finally {
+ if (threw)
+ try { this[CLOSE](fd) } catch (er) {}
+ }
+ }
+
+ [CLOSE] (fd) {
+ fs.closeSync(fd)
+ }
+}
+
+const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
+ constructor (readEntry, opt) {
+ opt = opt || {}
+ super(opt)
+ this.preservePaths = !!opt.preservePaths
+ this.portable = !!opt.portable
+ this.strict = !!opt.strict
+ this.noPax = !!opt.noPax
+ this.noMtime = !!opt.noMtime
+
+ this.readEntry = readEntry
+ this.type = readEntry.type
+ if (this.type === 'Directory' && this.portable)
+ this.noMtime = true
+
+ this.path = readEntry.path
+ this.mode = this[MODE](readEntry.mode)
+ this.uid = this.portable ? null : readEntry.uid
+ this.gid = this.portable ? null : readEntry.gid
+ this.uname = this.portable ? null : readEntry.uname
+ this.gname = this.portable ? null : readEntry.gname
+ this.size = readEntry.size
+ this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
+ this.atime = this.portable ? null : readEntry.atime
+ this.ctime = this.portable ? null : readEntry.ctime
+ this.linkpath = readEntry.linkpath
+
+ if (typeof opt.onwarn === 'function')
+ this.on('warn', opt.onwarn)
+
+ if (path.isAbsolute(this.path) && !this.preservePaths) {
+ const parsed = path.parse(this.path)
+ this.warn(
+ 'stripping ' + parsed.root + ' from absolute path',
+ this.path
+ )
+ this.path = this.path.substr(parsed.root.length)
+ }
+
+ this.remain = readEntry.size
+ this.blockRemain = readEntry.startBlockSize
+
+ this.header = new Header({
+ path: this.path,
+ linkpath: this.linkpath,
+ // only the permissions and setuid/setgid/sticky bitflags
+ // not the higher-order bits that specify file type
+ mode: this.mode,
+ uid: this.portable ? null : this.uid,
+ gid: this.portable ? null : this.gid,
+ size: this.size,
+ mtime: this.noMtime ? null : this.mtime,
+ type: this.type,
+ uname: this.portable ? null : this.uname,
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime
+ })
+
+ if (this.header.encode() && !this.noPax)
+ super.write(new Pax({
+ atime: this.portable ? null : this.atime,
+ ctime: this.portable ? null : this.ctime,
+ gid: this.portable ? null : this.gid,
+ mtime: this.noMtime ? null : this.mtime,
+ path: this.path,
+ linkpath: this.linkpath,
+ size: this.size,
+ uid: this.portable ? null : this.uid,
+ uname: this.portable ? null : this.uname,
+ dev: this.portable ? null : this.readEntry.dev,
+ ino: this.portable ? null : this.readEntry.ino,
+ nlink: this.portable ? null : this.readEntry.nlink
+ }).encode())
+
+ super.write(this.header.block)
+ readEntry.pipe(this)
+ }
+
+ [MODE] (mode) {
+ return modeFix(mode, this.type === 'Directory')
+ }
+
+ write (data) {
+ const writeLen = data.length
+ if (writeLen > this.blockRemain)
+ throw new Error('writing more to entry than is appropriate')
+ this.blockRemain -= writeLen
+ return super.write(data)
+ }
+
+ end () {
+ if (this.blockRemain)
+ this.write(Buffer.alloc(this.blockRemain))
+ return super.end()
+ }
+})
+
+WriteEntry.Sync = WriteEntrySync
+WriteEntry.Tar = WriteEntryTar
+
+const getType = stat =>
+ stat.isFile() ? 'File'
+ : stat.isDirectory() ? 'Directory'
+ : stat.isSymbolicLink() ? 'SymbolicLink'
+ : 'Unsupported'
+
+module.exports = WriteEntry
diff --git a/node_modules/pacote/node_modules/tar/package.json b/node_modules/pacote/node_modules/tar/package.json
new file mode 100644
index 000000000..a06234e72
--- /dev/null
+++ b/node_modules/pacote/node_modules/tar/package.json
@@ -0,0 +1,82 @@
+{
+ "_from": "tar@^4.4.10",
+ "_id": "tar@4.4.13",
+ "_inBundle": false,
+ "_integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
+ "_location": "/pacote/tar",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "tar@^4.4.10",
+ "name": "tar",
+ "escapedName": "tar",
+ "rawSpec": "^4.4.10",
+ "saveSpec": null,
+ "fetchSpec": "^4.4.10"
+ },
+ "_requiredBy": [
+ "/pacote"
+ ],
+ "_resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
+ "_shasum": "43b364bc52888d555298637b10d60790254ab525",
+ "_spec": "tar@^4.4.10",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/pacote",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/npm/node-tar/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {
+ "chownr": "^1.1.1",
+ "fs-minipass": "^1.2.5",
+ "minipass": "^2.8.6",
+ "minizlib": "^1.2.1",
+ "mkdirp": "^0.5.0",
+ "safe-buffer": "^5.1.2",
+ "yallist": "^3.0.3"
+ },
+ "deprecated": false,
+ "description": "tar for node",
+ "devDependencies": {
+ "chmodr": "^1.2.0",
+ "end-of-stream": "^1.4.1",
+ "events-to-array": "^1.1.2",
+ "mutate-fs": "^2.1.1",
+ "rimraf": "^2.6.3",
+ "tap": "^14.6.5",
+ "tar-fs": "^1.16.3",
+ "tar-stream": "^1.6.2"
+ },
+ "engines": {
+ "node": ">=4.5"
+ },
+ "files": [
+ "index.js",
+ "lib/"
+ ],
+ "homepage": "https://github.com/npm/node-tar#readme",
+ "license": "ISC",
+ "name": "tar",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/npm/node-tar.git"
+ },
+ "scripts": {
+ "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done",
+ "genparse": "node scripts/generate-parse-fixtures.js",
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap"
+ },
+ "tap": {
+ "coverage-map": "map.js",
+ "check-coverage": true
+ },
+ "version": "4.4.13"
+}
diff --git a/node_modules/tar/CHANGELOG.md b/node_modules/tar/CHANGELOG.md
new file mode 100644
index 000000000..14b5c4ee8
--- /dev/null
+++ b/node_modules/tar/CHANGELOG.md
@@ -0,0 +1,66 @@
+# Charge Long
+
+## 6.0
+
+- Drop support for node 6 and 8
+- fix symlinks and hardlinks on windows being packed with `\`-style path
+ targets
+
+## 5.0
+
+- Address unpack race conditions using path reservations
+- Change large-numbers errors from TypeError to Error
+- Add `TAR_*` error codes
+- do not treat ignored entries as an invalid archive
+- drop support for node v4
+- unpack: conditionally use a file mapping to write files on Windows
+- Set more portable 'mode' value in portable mode
+- Set `portable` gzip option in portable mode
+
+## 4.4
+
+- Add 'mtime' option to tar creation to force mtime
+- unpack: only reuse file fs entries if nlink = 1
+- unpack: rename before unlinking files on Windows
+- Fix encoding/decoding of base-256 numbers
+- Use `stat` instead of `lstat` when checking CWD
+- Always provide a callback to fs.close()
+
+## 4.3
+
+- Add 'transform' unpack option
+
+## 4.2
+
+- Fail when zlib fails
+
+## 4.1
+
+- Add noMtime flag for tar creation
+
+## 4.0
+
+- unpack: raise error if cwd is missing or not a dir
+- pack: don't drop dots from dotfiles when prefixing
+
+## 3.1
+
+- Support `@file.tar` as an entry argument to copy entries from one tar
+ file to another.
+- Add `noPax` option
+- `noResume` option for tar.t
+- win32: convert `>|<?:` chars to windows-friendly form
+- Exclude mtime for dirs in portable mode
+
+## 3.0
+
+- Minipass-based implementation
+- Entirely new API surface, `tar.c()`, `tar.x()` etc., much closer to
+ system tar semantics
+- Massive performance improvement
+- Require node 4.x and higher
+
+## 0.x, 1.x, 2.x - 2011-2014
+
+- fstream-based implementation
+- slow and kinda bad, but better than npm shelling out to the system `tar`
diff --git a/node_modules/tar/README.md b/node_modules/tar/README.md
index 034e4865c..5e635e622 100644
--- a/node_modules/tar/README.md
+++ b/node_modules/tar/README.md
@@ -63,23 +63,93 @@ that all of the data is immediately available by calling
`stream.read()`. For writable streams, it will be acted upon as soon
as it is provided, but this can be at any time.
-### Warnings
-
-Some things cause tar to emit a warning, but should usually not cause
-the entire operation to fail. There are three ways to handle
-warnings:
-
-1. **Ignore them** (default) Invalid entries won't be put in the
- archive, and invalid entries won't be unpacked. This is usually
- fine, but can hide failures that you might care about.
-2. **Notice them** Add an `onwarn` function to the options, or listen
- to the `'warn'` event on any tar stream. The function will get
- called as `onwarn(message, data)`. Handle as appropriate.
-3. **Explode them.** Set `strict: true` in the options object, and
- `warn` messages will be emitted as `'error'` events instead. If
- there's no `error` handler, this causes the program to crash. If
- used with a promise-returning/callback-taking method, then it'll
- send the error to the promise/callback.
+### Warnings and Errors
+
+Tar emits warnings and errors for recoverable and unrecoverable situations,
+respectively. In many cases, a warning only affects a single entry in an
+archive, or is simply informing you that it's modifying an entry to comply
+with the settings provided.
+
+Unrecoverable warnings will always raise an error (ie, emit `'error'` on
+streaming actions, throw for non-streaming sync actions, reject the
+returned Promise for non-streaming async operations, or call a provided
+callback with an `Error` as the first argument). Recoverable errors will
+raise an error only if `strict: true` is set in the options.
+
+Respond to (recoverable) warnings by listening to the `warn` event.
+Handlers receive 3 arguments:
+
+- `code` String. One of the error codes below. This may not match
+ `data.code`, which preserves the original error code from fs and zlib.
+- `message` String. More details about the error.
+- `data` Metadata about the error. An `Error` object for errors raised by
+ fs and zlib. All fields are attached to errors raisd by tar. Typically
+ contains the following fields, as relevant:
+ - `tarCode` The tar error code.
+ - `code` Either the tar error code, or the error code set by the
+ underlying system.
+ - `file` The archive file being read or written.
+ - `cwd` Working directory for creation and extraction operations.
+ - `entry` The entry object (if it could be created) for `TAR_ENTRY_INFO`,
+ `TAR_ENTRY_INVALID`, and `TAR_ENTRY_ERROR` warnings.
+ - `header` The header object (if it could be created, and the entry could
+ not be created) for `TAR_ENTRY_INFO` and `TAR_ENTRY_INVALID` warnings.
+ - `recoverable` Boolean. If `false`, then the warning will emit an
+ `error`, even in non-strict mode.
+
+#### Error Codes
+
+* `TAR_ENTRY_INFO` An informative error indicating that an entry is being
+ modified, but otherwise processed normally. For example, removing `/` or
+ `C:\` from absolute paths if `preservePaths` is not set.
+
+* `TAR_ENTRY_INVALID` An indication that a given entry is not a valid tar
+ archive entry, and will be skipped. This occurs when:
+ - a checksum fails,
+ - a `linkpath` is missing for a link type, or
+ - a `linkpath` is provided for a non-link type.
+
+ If every entry in a parsed archive raises an `TAR_ENTRY_INVALID` error,
+ then the archive is presumed to be unrecoverably broken, and
+ `TAR_BAD_ARCHIVE` will be raised.
+
+* `TAR_ENTRY_ERROR` The entry appears to be a valid tar archive entry, but
+ encountered an error which prevented it from being unpacked. This occurs
+ when:
+ - an unrecoverable fs error happens during unpacking,
+ - an entry has `..` in the path and `preservePaths` is not set, or
+ - an entry is extracting through a symbolic link, when `preservePaths` is
+ not set.
+
+* `TAR_ENTRY_UNSUPPORTED` An indication that a given entry is
+ a valid archive entry, but of a type that is unsupported, and so will be
+ skipped in archive creation or extracting.
+
+* `TAR_ABORT` When parsing gzipped-encoded archives, the parser will
+ abort the parse process raise a warning for any zlib errors encountered.
+ Aborts are considered unrecoverable for both parsing and unpacking.
+
+* `TAR_BAD_ARCHIVE` The archive file is totally hosed. This can happen for
+ a number of reasons, and always occurs at the end of a parse or extract:
+
+ - An entry body was truncated before seeing the full number of bytes.
+ - The archive contained only invalid entries, indicating that it is
+ likely not an archive, or at least, not an archive this library can
+ parse.
+
+ `TAR_BAD_ARCHIVE` is considered informative for parse operations, but
+ unrecoverable for extraction. Note that, if encountered at the end of an
+ extraction, tar WILL still have extracted as much it could from the
+ archive, so there may be some garbage files to clean up.
+
+Errors that occur deeper in the system (ie, either the filesystem or zlib)
+will have their error codes left intact, and a `tarCode` matching one of
+the above will be added to the warning metadata or the raised error object.
+
+Errors generated by tar will have one of the above codes set as the
+`error.code` field as well, but since errors originating in zlib or fs will
+have their original codes, it's better to read `error.tarCode` if you wish
+to see how tar is handling the issue.
### Examples
@@ -201,8 +271,8 @@ The following options are supported:
and a file is not provided, then the resulting stream will already
have the data ready to `read` or `emit('data')` as soon as you
request it.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`. [Alias: `C`]
@@ -214,8 +284,9 @@ The following options are supported:
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `mode` The mode to set on the created file archive
@@ -297,8 +368,8 @@ The following options are supported:
Pathnames with fewer elements will be silently skipped. Note that
the pathname is edited after applying the filter, but before
security checks. [Alias: `strip-components`, `stripComponents`]
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `preserveOwner` If true, tar will set the `uid` and `gid` of
extracted entries to the `uid` and `gid` fields in the archive.
This defaults to true when run as root, and false otherwise. If
@@ -401,8 +472,8 @@ The following options are supported:
filename. [Alias: `f`]
- `sync` Act synchronously. If this is set, then any provided file
will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for adding entries to the
archive. Defaults to `process.cwd()`. [Alias: `C`]
@@ -414,8 +485,9 @@ The following options are supported:
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
@@ -452,8 +524,8 @@ The following options are supported:
filename. [Alias: `f`]
- `sync` Act synchronously. If this is set, then any provided file
will be fully written after the call to `tar.c`.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for adding entries to the
archive. Defaults to `process.cwd()`. [Alias: `C`]
@@ -465,8 +537,9 @@ The following options are supported:
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths. [Alias: `P`]
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
@@ -499,8 +572,8 @@ Has all the standard readable stream interface stuff. `'data'` and
The following options are supported:
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `strict` Treat warnings as crash-worthy errors. Default false.
- `cwd` The current working directory for creating the archive.
Defaults to `process.cwd()`.
@@ -512,8 +585,9 @@ The following options are supported:
or `false` to omit it.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `linkCache` A Map object containing the device and inode value for
@@ -595,8 +669,8 @@ Most unpack errors will cause a `warn` event to be emitted. If the
Pathnames with fewer elements will be silently skipped. Note that
the pathname is edited after applying the filter, but before
security checks.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `umask` Filter the modes of entries like `process.umask()`.
- `dmode` Default mode for directories
- `fmode` Default mode for files
@@ -634,8 +708,8 @@ Most unpack errors will cause a `warn` event to be emitted. If the
- `strict` Treat warnings as crash-worthy errors. Default false.
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
### class tar.Unpack.Sync
@@ -674,13 +748,13 @@ The following options are supported:
archive, or `false` to skip it.
- `onentry` A function that gets called with `(entry)` for each entry
that passes the filter.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
-#### abort(message, error)
+#### abort(error)
Stop all parsing activities. This is called when there are zlib
-errors. It also emits a warning with the message and error provided.
+errors. It also emits an unrecoverable warning with the error provided.
### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass)
@@ -725,8 +799,9 @@ It has the following fields:
object.
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `myuid` If supported, the uid of the user running the current
process.
- `myuser` The `env.USER` string if set, or `''`. Set as the entry
@@ -764,8 +839,9 @@ The following options are supported:
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `maxReadSize` The maximum buffer size for `fs.read()` operations.
Defaults to 1 MB.
- `linkCache` A Map object containing the device and inode value for
@@ -781,8 +857,8 @@ The following options are supported:
- `strict` Treat warnings as crash-worthy errors. Default false.
- `win32` True if on a windows platform. Causes behavior where paths
replace `\` with `/`.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
@@ -813,13 +889,14 @@ The following options are supported:
- `portable` Omit metadata that is system-specific: `ctime`, `atime`,
`uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note
- that `mtime` is still included, because this is necessary other
- time-based operations.
+ that `mtime` is still included, because this is necessary for other
+ time-based operations. Additionally, `mode` is set to a "reasonable
+ default" for most unix systems, based on a `umask` value of `0o22`.
- `preservePaths` Allow absolute paths. By default, `/` is stripped
from absolute paths.
- `strict` Treat warnings as crash-worthy errors. Default false.
-- `onwarn` A function that will get called with `(message, data)` for
- any warnings encountered.
+- `onwarn` A function that will get called with `(code, message, data)` for
+ any warnings encountered. (See "Warnings and Errors")
- `noMtime` Set to true to omit writing `mtime` values for entries.
Note that this prevents using other mtime-based features like
`tar.update` or the `keepNewer` option with the resulting tar archive.
diff --git a/node_modules/tar/lib/get-write-flag.js b/node_modules/tar/lib/get-write-flag.js
new file mode 100644
index 000000000..e86959996
--- /dev/null
+++ b/node_modules/tar/lib/get-write-flag.js
@@ -0,0 +1,20 @@
+// Get the appropriate flag to use for creating files
+// We use fmap on Windows platforms for files less than
+// 512kb. This is a fairly low limit, but avoids making
+// things slower in some cases. Since most of what this
+// library is used for is extracting tarballs of many
+// relatively small files in npm packages and the like,
+// it can be a big boost on Windows platforms.
+// Only supported in Node v12.9.0 and above.
+const platform = process.env.__FAKE_PLATFORM__ || process.platform
+const isWindows = platform === 'win32'
+const fs = global.__FAKE_TESTING_FS__ || require('fs')
+
+/* istanbul ignore next */
+const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
+
+const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
+const fMapLimit = 512 * 1024
+const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
+module.exports = !fMapEnabled ? () => 'w'
+ : size => size < fMapLimit ? fMapFlag : 'w'
diff --git a/node_modules/tar/lib/header.js b/node_modules/tar/lib/header.js
index d29c3b990..5d88f6cf8 100644
--- a/node_modules/tar/lib/header.js
+++ b/node_modules/tar/lib/header.js
@@ -4,7 +4,6 @@
// the data could not be faithfully encoded in a simple header.
// (Also, check header.needPax to see if it needs a pax header.)
-const Buffer = require('./buffer.js')
const types = require('./types.js')
const pathModule = require('path').posix
const large = require('./large-numbers.js')
diff --git a/node_modules/tar/lib/large-numbers.js b/node_modules/tar/lib/large-numbers.js
index 3e5c99255..ad30bc350 100644
--- a/node_modules/tar/lib/large-numbers.js
+++ b/node_modules/tar/lib/large-numbers.js
@@ -6,7 +6,7 @@ const encode = exports.encode = (num, buf) => {
if (!Number.isSafeInteger(num))
// The number is so large that javascript cannot represent it with integer
// precision.
- throw TypeError('cannot encode number outside of javascript safe integer range')
+ throw Error('cannot encode number outside of javascript safe integer range')
else if (num < 0)
encodeNegative(num, buf)
else
@@ -50,12 +50,12 @@ const parse = exports.parse = (buf) => {
else if (pre === 0xff)
value = twos(buf)
else
- throw TypeError('invalid base256 encoding')
+ throw Error('invalid base256 encoding')
if (!Number.isSafeInteger(value))
// The number is so large that javascript cannot represent it with integer
// precision.
- throw TypeError('parsed number outside of javascript safe integer range')
+ throw Error('parsed number outside of javascript safe integer range')
return value
}
diff --git a/node_modules/tar/lib/list.js b/node_modules/tar/lib/list.js
index 250ebe001..9da3f812c 100644
--- a/node_modules/tar/lib/list.js
+++ b/node_modules/tar/lib/list.js
@@ -1,7 +1,5 @@
'use strict'
-const Buffer = require('./buffer.js')
-
// XXX: This shares a lot in common with extract.js
// maybe some DRY opportunity here?
diff --git a/node_modules/tar/lib/mkdir.js b/node_modules/tar/lib/mkdir.js
index c6a154c24..381d0e1b3 100644
--- a/node_modules/tar/lib/mkdir.js
+++ b/node_modules/tar/lib/mkdir.js
@@ -76,7 +76,7 @@ const mkdir = module.exports = (dir, opt, cb) => {
})
if (preserve)
- return mkdirp(dir, mode, done)
+ return mkdirp(dir, {mode}).then(made => done(null, made), done)
const sub = path.relative(cwd, dir)
const parts = sub.split(/\/|\\/)
diff --git a/node_modules/tar/lib/mode-fix.js b/node_modules/tar/lib/mode-fix.js
index 3363a3b15..c3758741c 100644
--- a/node_modules/tar/lib/mode-fix.js
+++ b/node_modules/tar/lib/mode-fix.js
@@ -1,6 +1,16 @@
'use strict'
-module.exports = (mode, isDir) => {
+module.exports = (mode, isDir, portable) => {
mode &= 0o7777
+
+ // in portable mode, use the minimum reasonable umask
+ // if this system creates files with 0o664 by default
+ // (as some linux distros do), then we'll write the
+ // archive with 0o644 instead. Also, don't ever create
+ // a file that is not readable/writable by the owner.
+ if (portable) {
+ mode = (mode | 0o600) &~0o22
+ }
+
// if dirs are readable, then they should be listable
if (isDir) {
if (mode & 0o400)
diff --git a/node_modules/tar/lib/pack.js b/node_modules/tar/lib/pack.js
index 857cea910..0fca4ae25 100644
--- a/node_modules/tar/lib/pack.js
+++ b/node_modules/tar/lib/pack.js
@@ -1,7 +1,5 @@
'use strict'
-const Buffer = require('./buffer.js')
-
// A readable tar stream creator
// Technically, this is a transform stream that you write paths into,
// and tar format comes out of.
@@ -62,6 +60,7 @@ const Pack = warner(class Pack extends MiniPass {
super(opt)
opt = opt || Object.create(null)
this.opt = opt
+ this.file = opt.file || ''
this.cwd = opt.cwd || process.cwd()
this.maxReadSize = opt.maxReadSize
this.preservePaths = !!opt.preservePaths
@@ -76,10 +75,13 @@ const Pack = warner(class Pack extends MiniPass {
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
+ this.portable = !!opt.portable
this.zip = null
if (opt.gzip) {
if (typeof opt.gzip !== 'object')
opt.gzip = {}
+ if (this.portable)
+ opt.gzip.portable = true
this.zip = new zlib.Gzip(opt.gzip)
this.zip.on('data', chunk => super.write(chunk))
this.zip.on('end', _ => super.end())
@@ -88,7 +90,6 @@ const Pack = warner(class Pack extends MiniPass {
} else
this.on('drain', this[ONDRAIN])
- this.portable = !!opt.portable
this.noDirRecurse = !!opt.noDirRecurse
this.follow = !!opt.follow
this.noMtime = !!opt.noMtime
@@ -285,9 +286,7 @@ const Pack = warner(class Pack extends MiniPass {
[ENTRYOPT] (job) {
return {
- onwarn: (msg, data) => {
- this.warn(msg, data)
- },
+ onwarn: (code, msg, data) => this.warn(code, msg, data),
noPax: this.noPax,
cwd: this.cwd,
absolute: job.absolute,
diff --git a/node_modules/tar/lib/parse.js b/node_modules/tar/lib/parse.js
index 43d4383dd..d9a49ad1f 100644
--- a/node_modules/tar/lib/parse.js
+++ b/node_modules/tar/lib/parse.js
@@ -29,7 +29,6 @@ const maxMetaEntrySize = 1024 * 1024
const Entry = require('./read-entry.js')
const Pax = require('./pax.js')
const zlib = require('minizlib')
-const Buffer = require('./buffer.js')
const gzipHeader = Buffer.from([0x1f, 0x8b])
const STATE = Symbol('state')
@@ -58,6 +57,9 @@ const MAYBEEND = Symbol('maybeEnd')
const WRITING = Symbol('writing')
const ABORTED = Symbol('aborted')
const DONE = Symbol('onDone')
+const SAW_VALID_ENTRY = Symbol('sawValidEntry')
+const SAW_NULL_BLOCK = Symbol('sawNullBlock')
+const SAW_EOF = Symbol('sawEOF')
const noop = _ => true
@@ -66,6 +68,21 @@ module.exports = warner(class Parser extends EE {
opt = opt || {}
super(opt)
+ this.file = opt.file || ''
+
+ // set to boolean false when an entry starts. 1024 bytes of \0
+ // is technically a valid tarball, albeit a boring one.
+ this[SAW_VALID_ENTRY] = null
+
+ // these BADARCHIVE errors can't be detected early. listen on DONE.
+ this.on(DONE, _ => {
+ if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
+ // either less than 1 block of data, or all entries were invalid.
+ // Either way, probably not even a tarball.
+ this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
+ }
+ })
+
if (opt.ondone)
this.on(DONE, opt.ondone)
else
@@ -95,6 +112,8 @@ module.exports = warner(class Parser extends EE {
this[ENDED] = false
this[UNZIP] = null
this[ABORTED] = false
+ this[SAW_NULL_BLOCK] = false
+ this[SAW_EOF] = false
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
if (typeof opt.onentry === 'function')
@@ -102,58 +121,90 @@ module.exports = warner(class Parser extends EE {
}
[CONSUMEHEADER] (chunk, position) {
+ if (this[SAW_VALID_ENTRY] === null)
+ this[SAW_VALID_ENTRY] = false
let header
try {
header = new Header(chunk, position, this[EX], this[GEX])
} catch (er) {
- return this.warn('invalid entry', er)
+ return this.warn('TAR_ENTRY_INVALID', er)
}
- if (header.nullBlock)
- this[EMIT]('nullBlock')
- else if (!header.cksumValid)
- this.warn('invalid entry', header)
- else if (!header.path)
- this.warn('invalid: path is required', header)
- else {
- const type = header.type
- if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
- this.warn('invalid: linkpath required', header)
- else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
- this.warn('invalid: linkpath forbidden', header)
+ if (header.nullBlock) {
+ if (this[SAW_NULL_BLOCK]) {
+ this[SAW_EOF] = true
+ // ending an archive with no entries. pointless, but legal.
+ if (this[STATE] === 'begin')
+ this[STATE] = 'header'
+ this[EMIT]('eof')
+ } else {
+ this[SAW_NULL_BLOCK] = true
+ this[EMIT]('nullBlock')
+ }
+ } else {
+ this[SAW_NULL_BLOCK] = false
+ if (!header.cksumValid)
+ this.warn('TAR_ENTRY_INVALID', 'checksum failure', {header})
+ else if (!header.path)
+ this.warn('TAR_ENTRY_INVALID', 'path is required', {header})
else {
- const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
-
- if (entry.meta) {
- if (entry.size > this.maxMetaEntrySize) {
- entry.ignore = true
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = 'ignore'
- } else if (entry.size > 0) {
- this[META] = ''
- entry.on('data', c => this[META] += c)
- this[STATE] = 'meta'
+ const type = header.type
+ if (/^(Symbolic)?Link$/.test(type) && !header.linkpath)
+ this.warn('TAR_ENTRY_INVALID', 'linkpath required', {header})
+ else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath)
+ this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', {header})
+ else {
+ const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
+
+ // we do this for meta & ignored entries as well, because they
+ // are still valid tar, or else we wouldn't know to ignore them
+ if (!this[SAW_VALID_ENTRY]) {
+ if (entry.remain) {
+ // this might be the one!
+ const onend = () => {
+ if (!entry.invalid)
+ this[SAW_VALID_ENTRY] = true
+ }
+ entry.on('end', onend)
+ } else {
+ this[SAW_VALID_ENTRY] = true
+ }
}
- } else {
- this[EX] = null
- entry.ignore = entry.ignore || !this.filter(entry.path, entry)
- if (entry.ignore) {
- this[EMIT]('ignoredEntry', entry)
- this[STATE] = entry.remain ? 'ignore' : 'begin'
+ if (entry.meta) {
+ if (entry.size > this.maxMetaEntrySize) {
+ entry.ignore = true
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = 'ignore'
+ entry.resume()
+ } else if (entry.size > 0) {
+ this[META] = ''
+ entry.on('data', c => this[META] += c)
+ this[STATE] = 'meta'
+ }
} else {
- if (entry.remain)
- this[STATE] = 'body'
- else {
- this[STATE] = 'begin'
- entry.end()
+ this[EX] = null
+ entry.ignore = entry.ignore || !this.filter(entry.path, entry)
+
+ if (entry.ignore) {
+ // probably valid, just not something we care about
+ this[EMIT]('ignoredEntry', entry)
+ this[STATE] = entry.remain ? 'ignore' : 'header'
+ entry.resume()
+ } else {
+ if (entry.remain)
+ this[STATE] = 'body'
+ else {
+ this[STATE] = 'header'
+ entry.end()
+ }
+
+ if (!this[READENTRY]) {
+ this[QUEUE].push(entry)
+ this[NEXTENTRY]()
+ } else
+ this[QUEUE].push(entry)
}
-
- if (!this[READENTRY]) {
- this[QUEUE].push(entry)
- this[NEXTENTRY]()
- } else
- this[QUEUE].push(entry)
}
}
}
@@ -211,7 +262,7 @@ module.exports = warner(class Parser extends EE {
entry.write(c)
if (!entry.blockRemain) {
- this[STATE] = 'begin'
+ this[STATE] = 'header'
this[WRITEENTRY] = null
entry.end()
}
@@ -265,11 +316,11 @@ module.exports = warner(class Parser extends EE {
}
}
- abort (msg, error) {
+ abort (error) {
this[ABORTED] = true
- this.warn(msg, error)
this.emit('abort', error)
- this.emit('error', error)
+ // always throws, even in non-strict mode
+ this.warn('TAR_ABORT', error, { recoverable: false })
}
write (chunk) {
@@ -295,8 +346,7 @@ module.exports = warner(class Parser extends EE {
this[ENDED] = false
this[UNZIP] = new zlib.Unzip()
this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
- this[UNZIP].on('error', er =>
- this.abort(er.message, er))
+ this[UNZIP].on('error', er => this.abort(er))
this[UNZIP].on('end', _ => {
this[ENDED] = true
this[CONSUMECHUNK]()
@@ -341,9 +391,10 @@ module.exports = warner(class Parser extends EE {
this[EMITTEDEND] = true
const entry = this[WRITEENTRY]
if (entry && entry.blockRemain) {
+ // truncated, likely a damaged file
const have = this[BUFFER] ? this[BUFFER].length : 0
- this.warn('Truncated input (needed ' + entry.blockRemain +
- ' more bytes, only ' + have + ' available)', entry)
+ this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
+ entry.blockRemain} more bytes, only ${have} available)`, {entry})
if (this[BUFFER])
entry.write(this[BUFFER])
entry.end()
@@ -353,11 +404,11 @@ module.exports = warner(class Parser extends EE {
}
[CONSUMECHUNK] (chunk) {
- if (this[CONSUMING]) {
+ if (this[CONSUMING])
this[BUFFERCONCAT](chunk)
- } else if (!chunk && !this[BUFFER]) {
+ else if (!chunk && !this[BUFFER])
this[MAYBEEND]()
- } else {
+ else {
this[CONSUMING] = true
if (this[BUFFER]) {
this[BUFFERCONCAT](chunk)
@@ -368,7 +419,10 @@ module.exports = warner(class Parser extends EE {
this[CONSUMECHUNKSUB](chunk)
}
- while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) {
+ while (this[BUFFER] &&
+ this[BUFFER].length >= 512 &&
+ !this[ABORTED] &&
+ !this[SAW_EOF]) {
const c = this[BUFFER]
this[BUFFER] = null
this[CONSUMECHUNKSUB](c)
@@ -385,9 +439,10 @@ module.exports = warner(class Parser extends EE {
// the buffer. Advance the position and put any remainder in the buffer.
let position = 0
let length = chunk.length
- while (position + 512 <= length && !this[ABORTED]) {
+ while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
switch (this[STATE]) {
case 'begin':
+ case 'header':
this[CONSUMEHEADER](chunk, position)
position += 512
break
diff --git a/node_modules/tar/lib/path-reservations.js b/node_modules/tar/lib/path-reservations.js
new file mode 100644
index 000000000..3cf0c2c12
--- /dev/null
+++ b/node_modules/tar/lib/path-reservations.js
@@ -0,0 +1,125 @@
+// A path exclusive reservation system
+// reserve([list, of, paths], fn)
+// When the fn is first in line for all its paths, it
+// is called with a cb that clears the reservation.
+//
+// Used by async unpack to avoid clobbering paths in use,
+// while still allowing maximal safe parallelization.
+
+const assert = require('assert')
+
+module.exports = () => {
+ // path => [function or Set]
+ // A Set object means a directory reservation
+ // A fn is a direct reservation on that path
+ const queues = new Map()
+
+ // fn => {paths:[path,...], dirs:[path, ...]}
+ const reservations = new Map()
+
+ // return a set of parent dirs for a given path
+ const { join } = require('path')
+ const getDirs = path =>
+ join(path).split(/[\\\/]/).slice(0, -1).reduce((set, path) =>
+ set.length ? set.concat(join(set[set.length-1], path)) : [path], [])
+
+ // functions currently running
+ const running = new Set()
+
+ // return the queues for each path the function cares about
+ // fn => {paths, dirs}
+ const getQueues = fn => {
+ const res = reservations.get(fn)
+ /* istanbul ignore if - unpossible */
+ if (!res)
+ throw new Error('function does not have any path reservations')
+ return {
+ paths: res.paths.map(path => queues.get(path)),
+ dirs: [...res.dirs].map(path => queues.get(path)),
+ }
+ }
+
+ // check if fn is first in line for all its paths, and is
+ // included in the first set for all its dir queues
+ const check = fn => {
+ const {paths, dirs} = getQueues(fn)
+ return paths.every(q => q[0] === fn) &&
+ dirs.every(q => q[0] instanceof Set && q[0].has(fn))
+ }
+
+ // run the function if it's first in line and not already running
+ const run = fn => {
+ if (running.has(fn) || !check(fn))
+ return false
+ running.add(fn)
+ fn(() => clear(fn))
+ return true
+ }
+
+ const clear = fn => {
+ if (!running.has(fn))
+ return false
+
+ const { paths, dirs } = reservations.get(fn)
+ const next = new Set()
+
+ paths.forEach(path => {
+ const q = queues.get(path)
+ assert.equal(q[0], fn)
+ if (q.length === 1)
+ queues.delete(path)
+ else {
+ q.shift()
+ if (typeof q[0] === 'function')
+ next.add(q[0])
+ else
+ q[0].forEach(fn => next.add(fn))
+ }
+ })
+
+ dirs.forEach(dir => {
+ const q = queues.get(dir)
+ assert(q[0] instanceof Set)
+ if (q[0].size === 1 && q.length === 1) {
+ queues.delete(dir)
+ } else if (q[0].size === 1) {
+ q.shift()
+
+ // must be a function or else the Set would've been reused
+ next.add(q[0])
+ } else
+ q[0].delete(fn)
+ })
+ running.delete(fn)
+
+ next.forEach(fn => run(fn))
+ return true
+ }
+
+ const reserve = (paths, fn) => {
+ const dirs = new Set(
+ paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
+ )
+ reservations.set(fn, {dirs, paths})
+ paths.forEach(path => {
+ const q = queues.get(path)
+ if (!q)
+ queues.set(path, [fn])
+ else
+ q.push(fn)
+ })
+ dirs.forEach(dir => {
+ const q = queues.get(dir)
+ if (!q)
+ queues.set(dir, [new Set([fn])])
+ else if (q[q.length-1] instanceof Set)
+ q[q.length-1].add(fn)
+ else
+ q.push(new Set([fn]))
+ })
+
+ return run(fn)
+ }
+
+ return { check, reserve }
+}
diff --git a/node_modules/tar/lib/pax.js b/node_modules/tar/lib/pax.js
index 9d7e4aba5..214a459f3 100644
--- a/node_modules/tar/lib/pax.js
+++ b/node_modules/tar/lib/pax.js
@@ -1,5 +1,4 @@
'use strict'
-const Buffer = require('./buffer.js')
const Header = require('./header.js')
const path = require('path')
diff --git a/node_modules/tar/lib/replace.js b/node_modules/tar/lib/replace.js
index 571cee94a..44126d1f8 100644
--- a/node_modules/tar/lib/replace.js
+++ b/node_modules/tar/lib/replace.js
@@ -1,5 +1,4 @@
'use strict'
-const Buffer = require('./buffer.js')
// tar -r
const hlo = require('./high-level-opt.js')
diff --git a/node_modules/tar/lib/unpack.js b/node_modules/tar/lib/unpack.js
index fc765096e..af0e0ffa0 100644
--- a/node_modules/tar/lib/unpack.js
+++ b/node_modules/tar/lib/unpack.js
@@ -1,5 +1,11 @@
'use strict'
+// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
+// but the path reservations are required to avoid race conditions where
+// parallelized unpack ops may mess with one another, due to dependencies
+// (like a Link depending on its target) or destructive operations (like
+// clobbering an fs object to create one of a different type.)
+
const assert = require('assert')
const EE = require('events').EventEmitter
const Parser = require('./parse.js')
@@ -9,9 +15,11 @@ const path = require('path')
const mkdir = require('./mkdir.js')
const mkdirSync = mkdir.sync
const wc = require('./winchars.js')
+const pathReservations = require('./path-reservations.js')
const ONENTRY = Symbol('onEntry')
const CHECKFS = Symbol('checkFs')
+const CHECKFS2 = Symbol('checkFs2')
const ISREUSABLE = Symbol('isReusable')
const MAKEFS = Symbol('makeFs')
const FILE = Symbol('file')
@@ -34,6 +42,12 @@ const DOCHOWN = Symbol('doChown')
const UID = Symbol('uid')
const GID = Symbol('gid')
const crypto = require('crypto')
+const getFlag = require('./get-write-flag.js')
+
+/* istanbul ignore next */
+const neverCalled = () => {
+ throw new Error('sync function called cb somehow?!?')
+}
// Unlinks on Windows are not atomic.
//
@@ -91,6 +105,8 @@ class Unpack extends Parser {
super(opt)
+ this.reservations = pathReservations()
+
this.transform = typeof opt.transform === 'function' ? opt.transform : null
this.writable = true
@@ -163,6 +179,15 @@ class Unpack extends Parser {
this.on('entry', entry => this[ONENTRY](entry))
}
+ // a bad or damaged archive is a warning for Parser, but an error
+ // when extracting. Mark those errors as unrecoverable, because
+ // the Unpack contract cannot be met.
+ warn (code, msg, data = {}) {
+ if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT')
+ data.recoverable = false
+ return super.warn(code, msg, data)
+ }
+
[MAYBECLOSE] () {
if (this[ENDED] && this[PENDING] === 0) {
this.emit('prefinish')
@@ -189,7 +214,10 @@ class Unpack extends Parser {
if (!this.preservePaths) {
const p = entry.path
if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) {
- this.warn('path contains \'..\'', p)
+ this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
+ entry,
+ path: p,
+ })
return false
}
@@ -197,8 +225,12 @@ class Unpack extends Parser {
// so we only need to test this one to get both
if (path.win32.isAbsolute(p)) {
const parsed = path.win32.parse(p)
- this.warn('stripping ' + parsed.root + ' from absolute path', p)
entry.path = p.substr(parsed.root.length)
+ const r = parsed.root
+ this.warn('TAR_ENTRY_INFO', `stripping ${r} from absolute path`, {
+ entry,
+ path: p,
+ })
}
}
@@ -250,7 +282,7 @@ class Unpack extends Parser {
if (er.name === 'CwdError')
this.emit('error', er)
else {
- this.warn(er.message, er)
+ this.warn('TAR_ENTRY_ERROR', er, {entry})
this[UNPEND]()
entry.resume()
}
@@ -291,9 +323,10 @@ class Unpack extends Parser {
return uint32(this.gid, entry.gid, this.processGid)
}
- [FILE] (entry) {
+ [FILE] (entry, fullyDone) {
const mode = entry.mode & 0o7777 || this.fmode
const stream = new fsm.WriteStream(entry.absolute, {
+ flags: getFlag(entry.size),
mode: mode,
autoClose: false
})
@@ -304,8 +337,12 @@ class Unpack extends Parser {
if (er)
return this[ONERROR](er, entry)
- if (--actions === 0)
- fs.close(stream.fd, _ => this[UNPEND]())
+ if (--actions === 0) {
+ fs.close(stream.fd, er => {
+ fullyDone()
+ er ? this[ONERROR](er, entry) : this[UNPEND]()
+ })
+ }
}
stream.on('finish', _ => {
@@ -344,15 +381,18 @@ class Unpack extends Parser {
tx.pipe(stream)
}
- [DIRECTORY] (entry) {
+ [DIRECTORY] (entry, fullyDone) {
const mode = entry.mode & 0o7777 || this.dmode
this[MKDIR](entry.absolute, mode, er => {
- if (er)
+ if (er) {
+ fullyDone()
return this[ONERROR](er, entry)
+ }
let actions = 1
const done = _ => {
if (--actions === 0) {
+ fullyDone()
this[UNPEND]()
entry.resume()
}
@@ -373,16 +413,18 @@ class Unpack extends Parser {
}
[UNSUPPORTED] (entry) {
- this.warn('unsupported entry type: ' + entry.type, entry)
+ entry.unsupported = true
+ this.warn('TAR_ENTRY_UNSUPPORTED',
+ `unsupported entry type: ${entry.type}`, {entry})
entry.resume()
}
- [SYMLINK] (entry) {
- this[LINK](entry, entry.linkpath, 'symlink')
+ [SYMLINK] (entry, done) {
+ this[LINK](entry, entry.linkpath, 'symlink', done)
}
- [HARDLINK] (entry) {
- this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link')
+ [HARDLINK] (entry, done) {
+ this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link', done)
}
[PEND] () {
@@ -413,29 +455,40 @@ class Unpack extends Parser {
// check if a thing is there, and if so, try to clobber it
[CHECKFS] (entry) {
this[PEND]()
+ const paths = [entry.path]
+ if (entry.linkpath)
+ paths.push(entry.linkpath)
+ this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
+ }
+ [CHECKFS2] (entry, done) {
this[MKDIR](path.dirname(entry.absolute), this.dmode, er => {
- if (er)
+ if (er) {
+ done()
return this[ONERROR](er, entry)
+ }
fs.lstat(entry.absolute, (er, st) => {
- if (st && (this.keep || this.newer && st.mtime > entry.mtime))
+ if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
this[SKIP](entry)
- else if (er || this[ISREUSABLE](entry, st))
- this[MAKEFS](null, entry)
+ done()
+ } else if (er || this[ISREUSABLE](entry, st)) {
+ this[MAKEFS](null, entry, done)
+ }
else if (st.isDirectory()) {
if (entry.type === 'Directory') {
if (!entry.mode || (st.mode & 0o7777) === entry.mode)
- this[MAKEFS](null, entry)
+ this[MAKEFS](null, entry, done)
else
- fs.chmod(entry.absolute, entry.mode, er => this[MAKEFS](er, entry))
+ fs.chmod(entry.absolute, entry.mode,
+ er => this[MAKEFS](er, entry, done))
} else
- fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry))
+ fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry, done))
} else
- unlinkFile(entry.absolute, er => this[MAKEFS](er, entry))
+ unlinkFile(entry.absolute, er => this[MAKEFS](er, entry, done))
})
})
}
- [MAKEFS] (er, entry) {
+ [MAKEFS] (er, entry, done) {
if (er)
return this[ONERROR](er, entry)
@@ -443,25 +496,26 @@ class Unpack extends Parser {
case 'File':
case 'OldFile':
case 'ContiguousFile':
- return this[FILE](entry)
+ return this[FILE](entry, done)
case 'Link':
- return this[HARDLINK](entry)
+ return this[HARDLINK](entry, done)
case 'SymbolicLink':
- return this[SYMLINK](entry)
+ return this[SYMLINK](entry, done)
case 'Directory':
case 'GNUDumpDir':
- return this[DIRECTORY](entry)
+ return this[DIRECTORY](entry, done)
}
}
- [LINK] (entry, linkpath, link) {
+ [LINK] (entry, linkpath, link, done) {
// XXX: get the type ('file' or 'dir') for windows
fs[link](linkpath, entry.absolute, er => {
if (er)
return this[ONERROR](er, entry)
+ done()
this[UNPEND]()
entry.resume()
})
@@ -474,7 +528,7 @@ class UnpackSync extends Unpack {
}
[CHECKFS] (entry) {
- const er = this[MKDIR](path.dirname(entry.absolute), this.dmode)
+ const er = this[MKDIR](path.dirname(entry.absolute), this.dmode, neverCalled)
if (er)
return this[ONERROR](er, entry)
try {
@@ -482,7 +536,7 @@ class UnpackSync extends Unpack {
if (this.keep || this.newer && st.mtime > entry.mtime)
return this[SKIP](entry)
else if (this[ISREUSABLE](entry, st))
- return this[MAKEFS](null, entry)
+ return this[MAKEFS](null, entry, neverCalled)
else {
try {
if (st.isDirectory()) {
@@ -493,29 +547,34 @@ class UnpackSync extends Unpack {
fs.rmdirSync(entry.absolute)
} else
unlinkFileSync(entry.absolute)
- return this[MAKEFS](null, entry)
+ return this[MAKEFS](null, entry, neverCalled)
} catch (er) {
return this[ONERROR](er, entry)
}
}
} catch (er) {
- return this[MAKEFS](null, entry)
+ return this[MAKEFS](null, entry, neverCalled)
}
}
- [FILE] (entry) {
+ [FILE] (entry, _) {
const mode = entry.mode & 0o7777 || this.fmode
const oner = er => {
- try { fs.closeSync(fd) } catch (_) {}
- if (er)
- this[ONERROR](er, entry)
+ let closeError
+ try {
+ fs.closeSync(fd)
+ } catch (e) {
+ closeError = e
+ }
+ if (er || closeError)
+ this[ONERROR](er || closeError, entry)
}
let stream
let fd
try {
- fd = fs.openSync(entry.absolute, 'w', mode)
+ fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
} catch (er) {
return oner(er)
}
@@ -570,7 +629,7 @@ class UnpackSync extends Unpack {
})
}
- [DIRECTORY] (entry) {
+ [DIRECTORY] (entry, _) {
const mode = entry.mode & 0o7777 || this.dmode
const er = this[MKDIR](entry.absolute, mode)
if (er)
@@ -607,7 +666,7 @@ class UnpackSync extends Unpack {
}
}
- [LINK] (entry, linkpath, link) {
+ [LINK] (entry, linkpath, link, _) {
try {
fs[link + 'Sync'](linkpath, entry.absolute)
entry.resume()
diff --git a/node_modules/tar/lib/warn-mixin.js b/node_modules/tar/lib/warn-mixin.js
index 94a4b9b99..11eb52cc6 100644
--- a/node_modules/tar/lib/warn-mixin.js
+++ b/node_modules/tar/lib/warn-mixin.js
@@ -1,14 +1,21 @@
'use strict'
module.exports = Base => class extends Base {
- warn (msg, data) {
- if (!this.strict)
- this.emit('warn', msg, data)
- else if (data instanceof Error)
- this.emit('error', data)
- else {
- const er = new Error(msg)
- er.data = data
- this.emit('error', er)
- }
+ warn (code, message, data = {}) {
+ if (this.file)
+ data.file = this.file
+ if (this.cwd)
+ data.cwd = this.cwd
+ data.code = message instanceof Error && message.code || code
+ data.tarCode = code
+ if (!this.strict && data.recoverable !== false) {
+ if (message instanceof Error) {
+ data = Object.assign(message, data)
+ message = message.message
+ }
+ this.emit('warn', data.tarCode, message, data)
+ } else if (message instanceof Error) {
+ this.emit('error', Object.assign(message, data))
+ } else
+ this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
}
}
diff --git a/node_modules/tar/lib/write-entry.js b/node_modules/tar/lib/write-entry.js
index 0c019006f..0e33cb59d 100644
--- a/node_modules/tar/lib/write-entry.js
+++ b/node_modules/tar/lib/write-entry.js
@@ -1,5 +1,4 @@
'use strict'
-const Buffer = require('./buffer.js')
const MiniPass = require('minipass')
const Pax = require('./pax.js')
const Header = require('./header.js')
@@ -54,12 +53,13 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
+ let pathWarn = false
if (!this.preservePaths && path.win32.isAbsolute(p)) {
// absolutes on posix are also absolutes on win32
// so we only need to test this one to get both
const parsed = path.win32.parse(p)
- this.warn('stripping ' + parsed.root + ' from absolute path', p)
this.path = p.substr(parsed.root.length)
+ pathWarn = parsed.root
}
this.win32 = !!opt.win32 || process.platform === 'win32'
@@ -73,6 +73,13 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
if (this.path === '')
this.path = './'
+ if (pathWarn) {
+ this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
+ entry: this,
+ path: pathWarn + this.path,
+ })
+ }
+
if (this.statCache.has(this.absolute))
this[ONLSTAT](this.statCache.get(this.absolute))
else
@@ -108,7 +115,7 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
}
[MODE] (mode) {
- return modeFix(mode, this.type === 'Directory')
+ return modeFix(mode, this.type === 'Directory', this.portable)
}
[HEADER] () {
@@ -167,14 +174,14 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
}
[ONREADLINK] (linkpath) {
- this.linkpath = linkpath
+ this.linkpath = linkpath.replace(/\\/g, '/')
this[HEADER]()
this.end()
}
[HARDLINK] (linkpath) {
this.type = 'Link'
- this.linkpath = path.relative(this.cwd, linkpath)
+ this.linkpath = path.relative(this.cwd, linkpath).replace(/\\/g, '/')
this.stat.size = 0
this[HEADER]()
this.end()
@@ -215,8 +222,11 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
[READ] (fd, buf, offset, length, pos, remain, blockRemain) {
fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
- if (er)
- return this[CLOSE](fd, _ => this.emit('error', er))
+ if (er) {
+ // ignoring the error from close(2) is a bad practice, but at
+ // this point we already have an error, don't need another one
+ return this[CLOSE](fd, () => this.emit('error', er))
+ }
this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
})
}
@@ -231,8 +241,7 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
er.path = this.absolute
er.syscall = 'read'
er.code = 'EOF'
- this[CLOSE](fd, _ => _)
- return this.emit('error', er)
+ return this[CLOSE](fd, () => this.emit('error', er))
}
if (bytesRead > remain) {
@@ -240,8 +249,7 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
er.path = this.absolute
er.syscall = 'read'
er.code = 'EOF'
- this[CLOSE](fd, _ => _)
- return this.emit('error', er)
+ return this[CLOSE](fd, () => this.emit('error', er))
}
// null out the rest of the buffer, if we could fit the block padding
@@ -265,9 +273,7 @@ const WriteEntry = warner(class WriteEntry extends MiniPass {
if (!remain) {
if (blockRemain)
this.write(Buffer.alloc(blockRemain))
- this.end()
- this[CLOSE](fd, _ => _)
- return
+ return this[CLOSE](fd, er => er ? this.emit('error', er) : this.end())
}
if (offset >= length) {
@@ -303,13 +309,16 @@ class WriteEntrySync extends WriteEntry {
this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead)
threw = false
} finally {
+ // ignoring the error from close(2) is a bad practice, but at
+ // this point we already have an error, don't need another one
if (threw)
- try { this[CLOSE](fd) } catch (er) {}
+ try { this[CLOSE](fd, () => {}) } catch (er) {}
}
}
- [CLOSE] (fd) {
+ [CLOSE] (fd, cb) {
fs.closeSync(fd)
+ cb()
}
}
@@ -343,12 +352,10 @@ const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
if (typeof opt.onwarn === 'function')
this.on('warn', opt.onwarn)
+ let pathWarn = false
if (path.isAbsolute(this.path) && !this.preservePaths) {
const parsed = path.parse(this.path)
- this.warn(
- 'stripping ' + parsed.root + ' from absolute path',
- this.path
- )
+ pathWarn = parsed.root
this.path = this.path.substr(parsed.root.length)
}
@@ -371,6 +378,13 @@ const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
ctime: this.portable ? null : this.ctime
})
+ if (pathWarn) {
+ this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
+ entry: this,
+ path: pathWarn + this.path,
+ })
+ }
+
if (this.header.encode() && !this.noPax)
super.write(new Pax({
atime: this.portable ? null : this.atime,
@@ -392,7 +406,7 @@ const WriteEntryTar = warner(class WriteEntryTar extends MiniPass {
}
[MODE] (mode) {
- return modeFix(mode, this.type === 'Directory')
+ return modeFix(mode, this.type === 'Directory', this.portable)
}
write (data) {
diff --git a/node_modules/tar/node_modules/.bin/mkdirp b/node_modules/tar/node_modules/.bin/mkdirp
new file mode 120000
index 000000000..017896ceb
--- /dev/null
+++ b/node_modules/tar/node_modules/.bin/mkdirp
@@ -0,0 +1 @@
+../mkdirp/bin/cmd.js \ No newline at end of file
diff --git a/node_modules/tar/node_modules/minipass/README.md b/node_modules/tar/node_modules/minipass/README.md
index c989beea0..32ace2fb9 100644
--- a/node_modules/tar/node_modules/minipass/README.md
+++ b/node_modules/tar/node_modules/minipass/README.md
@@ -7,32 +7,32 @@ stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
fast](https://docs.google.com/spreadsheets/d/1oObKSrVwLX_7Ut4Z6g3fZW-AX1j1-k6w-cDsrkaSbHM/edit#gid=0)
for objects, strings, and buffers.
-Supports pipe()ing (including multi-pipe() and backpressure
-transmission), buffering data until either a `data` event handler or
-`pipe()` is added (so you don't lose the first chunk), and most other
-cases where PassThrough is a good idea.
+Supports pipe()ing (including multi-pipe() and backpressure transmission),
+buffering data until either a `data` event handler or `pipe()` is added (so
+you don't lose the first chunk), and most other cases where PassThrough is
+a good idea.
-There is a `read()` method, but it's much more efficient to consume
-data from this stream via `'data'` events or by calling `pipe()` into
-some other stream. Calling `read()` requires the buffer to be
-flattened in some cases, which requires copying memory.
+There is a `read()` method, but it's much more efficient to consume data
+from this stream via `'data'` events or by calling `pipe()` into some other
+stream. Calling `read()` requires the buffer to be flattened in some
+cases, which requires copying memory.
-There is also no `unpipe()` method. Once you start piping, there is
-no stopping it!
+There is also no `unpipe()` method. Once you start piping, there is no
+stopping it!
-If you set `objectMode: true` in the options, then whatever is written
-will be emitted. Otherwise, it'll do a minimal amount of Buffer
-copying to ensure proper Streams semantics when `read(n)` is called.
+If you set `objectMode: true` in the options, then whatever is written will
+be emitted. Otherwise, it'll do a minimal amount of Buffer copying to
+ensure proper Streams semantics when `read(n)` is called.
`objectMode` can also be set by doing `stream.objectMode = true`, or by
writing any non-string/non-buffer data. `objectMode` cannot be set to
false once it is set.
-This is not a `through` or `through2` stream. It doesn't transform
-the data, it just passes it right through. If you want to transform
-the data, extend the class, and override the `write()` method. Once
-you're done transforming the data however you want, call
-`super.write()` with the transform output.
+This is not a `through` or `through2` stream. It doesn't transform the
+data, it just passes it right through. If you want to transform the data,
+extend the class, and override the `write()` method. Once you're done
+transforming the data however you want, call `super.write()` with the
+transform output.
For some examples of streams that extend Minipass in various ways, check
out:
@@ -46,6 +46,7 @@ out:
- [tap](http://npm.im/tap)
- [tap-parser](http://npm.im/tap)
- [treport](http://npm.im/tap)
+- [minipass-fetch](http://npm.im/minipass-fetch)
## Differences from Node.js Streams
@@ -252,7 +253,8 @@ src.pipe(tee)
## USAGE
-It's a stream! Use it like a stream and it'll most likely do what you want.
+It's a stream! Use it like a stream and it'll most likely do what you
+want.
```js
const Minipass = require('minipass')
@@ -280,31 +282,30 @@ streams.
* `write(chunk, [encoding], [callback])` - Put data in. (Note that, in the
base Minipass class, the same data will come out.) Returns `false` if
- the stream will buffer the next write, or true if it's still in
- "flowing" mode.
+ the stream will buffer the next write, or true if it's still in "flowing"
+ mode.
* `end([chunk, [encoding]], [callback])` - Signal that you have no more
data to write. This will queue an `end` event to be fired when all the
data has been consumed.
-* `setEncoding(encoding)` - Set the encoding for data coming of the
- stream. This can only be done once.
+* `setEncoding(encoding)` - Set the encoding for data coming of the stream.
+ This can only be done once.
* `pause()` - No more data for a while, please. This also prevents `end`
from being emitted for empty streams until the stream is resumed.
-* `resume()` - Resume the stream. If there's data in the buffer, it is
- all discarded. Any buffered events are immediately emitted.
+* `resume()` - Resume the stream. If there's data in the buffer, it is all
+ discarded. Any buffered events are immediately emitted.
* `pipe(dest)` - Send all output to the stream provided. There is no way
to unpipe. When data is emitted, it is immediately written to any and
all pipe destinations.
-* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters.
- Some events are given special treatment, however. (See below under
- "events".)
+* `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are EventEmitters. Some
+ events are given special treatment, however. (See below under "events".)
* `promise()` - Returns a Promise that resolves when the stream emits
`end`, or rejects if the stream emits `error`.
* `collect()` - Return a Promise that resolves on `end` with an array
- containing each chunk of data that was emitted, or rejects if the
- stream emits `error`. Note that this consumes the stream data.
-* `concat()` - Same as `collect()`, but concatenates the data into a
- single Buffer object. Will reject the returned promise if the stream is
- in objectMode, or if it goes into objectMode by the end of the data.
+ containing each chunk of data that was emitted, or rejects if the stream
+ emits `error`. Note that this consumes the stream data.
+* `concat()` - Same as `collect()`, but concatenates the data into a single
+ Buffer object. Will reject the returned promise if the stream is in
+ objectMode, or if it goes into objectMode by the end of the data.
* `read(n)` - Consume `n` bytes of data out of the buffer. If `n` is not
provided, then consume all of it. If `n` bytes are not available, then
it returns null. **Note** consuming streams in this way is less
@@ -421,8 +422,8 @@ mp.concat().then(onebigchunk => {
### iteration
-You can iterate over streams synchronously or asynchronously in
-platforms that support it.
+You can iterate over streams synchronously or asynchronously in platforms
+that support it.
Synchronous iteration will end when the currently available data is
consumed, even if the `end` event has not been reached. In string and
@@ -430,9 +431,8 @@ buffer mode, the data is concatenated, so unless multiple writes are
occurring in the same tick as the `read()`, sync iteration loops will
generally only have a single iteration.
-To consume chunks in this way exactly as they have been written, with
-no flattening, create the stream with the `{ objectMode: true }`
-option.
+To consume chunks in this way exactly as they have been written, with no
+flattening, create the stream with the `{ objectMode: true }` option.
```js
const mp = new Minipass({ objectMode: true })
diff --git a/node_modules/tar/node_modules/minipass/index.js b/node_modules/tar/node_modules/minipass/index.js
index c072352d4..55ea0f3dd 100644
--- a/node_modules/tar/node_modules/minipass/index.js
+++ b/node_modules/tar/node_modules/minipass/index.js
@@ -1,5 +1,6 @@
'use strict'
const EE = require('events')
+const Stream = require('stream')
const Yallist = require('yallist')
const SD = require('string_decoder').StringDecoder
@@ -29,12 +30,6 @@ const ASYNCITERATOR = doIter && Symbol.asyncIterator
const ITERATOR = doIter && Symbol.iterator
|| Symbol('iterator not implemented')
-// Buffer in node 4.x < 4.5.0 doesn't have working Buffer.from
-// or Buffer.alloc, and Buffer in node 10 deprecated the ctor.
-// .M, this is fine .\^/M..
-const B = Buffer.alloc ? Buffer
- : /* istanbul ignore next */ require('safe-buffer').Buffer
-
// events that mean 'the stream is over'
// these are treated specially, and re-emitted
// if they are listened for after emitting.
@@ -49,9 +44,9 @@ const isArrayBuffer = b => b instanceof ArrayBuffer ||
b.constructor.name === 'ArrayBuffer' &&
b.byteLength >= 0
-const isArrayBufferView = b => !B.isBuffer(b) && ArrayBuffer.isView(b)
+const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
-module.exports = class Minipass extends EE {
+module.exports = class Minipass extends Stream {
constructor (options) {
super()
this[FLOWING] = false
@@ -126,11 +121,11 @@ module.exports = class Minipass extends EE {
// at some point in the future, we may want to do the opposite!
// leave strings and buffers as-is
// anything else switches us into object mode
- if (!this[OBJECTMODE] && !B.isBuffer(chunk)) {
+ if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
if (isArrayBufferView(chunk))
- chunk = B.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
+ chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
else if (isArrayBuffer(chunk))
- chunk = B.from(chunk)
+ chunk = Buffer.from(chunk)
else if (typeof chunk !== 'string')
// use the setter so we throw if we have encoding set
this.objectMode = true
@@ -152,10 +147,10 @@ module.exports = class Minipass extends EE {
if (typeof chunk === 'string' && !this[OBJECTMODE] &&
// unless it is a string already ready for us to use
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)) {
- chunk = B.from(chunk, encoding)
+ chunk = Buffer.from(chunk, encoding)
}
- if (B.isBuffer(chunk) && this[ENCODING])
+ if (Buffer.isBuffer(chunk) && this[ENCODING])
chunk = this[DECODER].write(chunk)
try {
@@ -188,7 +183,7 @@ module.exports = class Minipass extends EE {
])
else
this.buffer = new Yallist([
- B.concat(Array.from(this.buffer), this[BUFFERLENGTH])
+ Buffer.concat(Array.from(this.buffer), this[BUFFERLENGTH])
])
}
@@ -423,12 +418,17 @@ module.exports = class Minipass extends EE {
// const all = await stream.collect()
collect () {
const buf = []
- buf.dataLength = 0
+ if (!this[OBJECTMODE])
+ buf.dataLength = 0
+ // set the promise first, in case an error is raised
+ // by triggering the flow here.
+ const p = this.promise()
this.on('data', c => {
buf.push(c)
- buf.dataLength += c.length
+ if (!this[OBJECTMODE])
+ buf.dataLength += c.length
})
- return this.promise().then(() => buf)
+ return p.then(() => buf)
}
// const data = await stream.concat()
@@ -438,7 +438,7 @@ module.exports = class Minipass extends EE {
: this.collect().then(buf =>
this[OBJECTMODE]
? Promise.reject(new Error('cannot concat in objectMode'))
- : this[ENCODING] ? buf.join('') : B.concat(buf, buf.dataLength))
+ : this[ENCODING] ? buf.join('') : Buffer.concat(buf, buf.dataLength))
}
// stream.promise().then(() => done, er => emitted error)
@@ -529,9 +529,10 @@ module.exports = class Minipass extends EE {
}
static isStream (s) {
- return !!s && (s instanceof Minipass || s instanceof EE && (
- typeof s.pipe === 'function' || // readable
- (typeof s.write === 'function' && typeof s.end === 'function') // writable
- ))
+ return !!s && (s instanceof Minipass || s instanceof Stream ||
+ s instanceof EE && (
+ typeof s.pipe === 'function' || // readable
+ (typeof s.write === 'function' && typeof s.end === 'function') // writable
+ ))
}
}
diff --git a/node_modules/tar/node_modules/minipass/package.json b/node_modules/tar/node_modules/minipass/package.json
index aeb390253..568682bc2 100644
--- a/node_modules/tar/node_modules/minipass/package.json
+++ b/node_modules/tar/node_modules/minipass/package.json
@@ -1,27 +1,27 @@
{
- "_from": "minipass@^2.8.6",
- "_id": "minipass@2.9.0",
+ "_from": "minipass@^3.0.0",
+ "_id": "minipass@3.1.1",
"_inBundle": false,
- "_integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
+ "_integrity": "sha512-UFqVihv6PQgwj8/yTGvl9kPz7xIAY+R5z6XYjRInD3Gk3qx6QGSD6zEcpeG4Dy/lQnv1J6zv8ejV90hyYIKf3w==",
"_location": "/tar/minipass",
"_phantomChildren": {},
"_requested": {
"type": "range",
"registry": true,
- "raw": "minipass@^2.8.6",
+ "raw": "minipass@^3.0.0",
"name": "minipass",
"escapedName": "minipass",
- "rawSpec": "^2.8.6",
+ "rawSpec": "^3.0.0",
"saveSpec": null,
- "fetchSpec": "^2.8.6"
+ "fetchSpec": "^3.0.0"
},
"_requiredBy": [
"/tar"
],
- "_resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
- "_shasum": "e713762e7d3e32fed803115cf93e04bca9fcc9a6",
- "_spec": "minipass@^2.8.6",
- "_where": "/Users/mperrotte/npminc/cli/node_modules/tar",
+ "_resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.1.tgz",
+ "_shasum": "7607ce778472a185ad6d89082aa2070f79cedcd5",
+ "_spec": "minipass@^3.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/tar",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -32,8 +32,7 @@
},
"bundleDependencies": false,
"dependencies": {
- "safe-buffer": "^5.1.2",
- "yallist": "^3.0.0"
+ "yallist": "^4.0.0"
},
"deprecated": false,
"description": "minimal implementation of a PassThrough stream",
@@ -42,6 +41,9 @@
"tap": "^14.6.5",
"through2": "^2.0.3"
},
+ "engines": {
+ "node": ">=8"
+ },
"files": [
"index.js"
],
@@ -59,12 +61,12 @@
},
"scripts": {
"postpublish": "git push origin --follow-tags",
- "postversion": "npm publish",
+ "postversion": "npm publish --tag=next",
"preversion": "npm test",
"test": "tap"
},
"tap": {
"check-coverage": true
},
- "version": "2.9.0"
+ "version": "3.1.1"
}
diff --git a/node_modules/tar/node_modules/mkdirp/CHANGELOG.md b/node_modules/tar/node_modules/mkdirp/CHANGELOG.md
new file mode 100644
index 000000000..81458380b
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/CHANGELOG.md
@@ -0,0 +1,15 @@
+# Changers Lorgs!
+
+## 1.0
+
+Full rewrite. Essentially a brand new module.
+
+- Return a promise instead of taking a callback.
+- Use native `fs.mkdir(path, { recursive: true })` when available.
+- Drop support for outdated Node.js versions. (Technically still works on
+ Node.js v8, but only 10 and above are officially supported.)
+
+## 0.x
+
+Original and most widely used recursive directory creation implementation
+in JavaScript, dating back to 2010.
diff --git a/node_modules/tar/node_modules/mkdirp/LICENSE b/node_modules/tar/node_modules/mkdirp/LICENSE
new file mode 100644
index 000000000..13fcd15f0
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/LICENSE
@@ -0,0 +1,21 @@
+Copyright James Halliday (mail@substack.net) and Isaac Z. Schlueter (i@izs.me)
+
+This project is free software released under the MIT license:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/node_modules/tar/node_modules/mkdirp/bin/cmd.js b/node_modules/tar/node_modules/mkdirp/bin/cmd.js
new file mode 100755
index 000000000..6e0aa8dc4
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/bin/cmd.js
@@ -0,0 +1,68 @@
+#!/usr/bin/env node
+
+const usage = () => `
+usage: mkdirp [DIR1,DIR2..] {OPTIONS}
+
+ Create each supplied directory including any necessary parent directories
+ that don't yet exist.
+
+ If the directory already exists, do nothing.
+
+OPTIONS are:
+
+ -m<mode> If a directory needs to be created, set the mode as an octal
+ --mode=<mode> permission string.
+
+ -v --version Print the mkdirp version number
+
+ -h --help Print this helpful banner
+
+ -p --print Print the first directories created for each path provided
+
+ --manual Use manual implementation, even if native is available
+`
+
+const dirs = []
+const opts = {}
+let print = false
+let dashdash = false
+let manual = false
+for (const arg of process.argv.slice(2)) {
+ if (dashdash)
+ dirs.push(arg)
+ else if (arg === '--')
+ dashdash = true
+ else if (arg === '--manual')
+ manual = true
+ else if (/^-h/.test(arg) || /^--help/.test(arg)) {
+ console.log(usage())
+ process.exit(0)
+ } else if (arg === '-v' || arg === '--version') {
+ console.log(require('../package.json').version)
+ process.exit(0)
+ } else if (arg === '-p' || arg === '--print') {
+ print = true
+ } else if (/^-m/.test(arg) || /^--mode=/.test(arg)) {
+ const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8)
+ if (isNaN(mode)) {
+ console.error(`invalid mode argument: ${arg}\nMust be an octal number.`)
+ process.exit(1)
+ }
+ opts.mode = mode
+ } else
+ dirs.push(arg)
+}
+
+const mkdirp = require('../')
+const impl = manual ? mkdirp.manual : mkdirp
+if (dirs.length === 0)
+ console.error(usage())
+
+Promise.all(dirs.map(dir => impl(dir, opts)))
+ .then(made => print ? made.forEach(m => m && console.log(m)) : null)
+ .catch(er => {
+ console.error(er.message)
+ if (er.code)
+ console.error(' code: ' + er.code)
+ process.exit(1)
+ })
diff --git a/node_modules/tar/node_modules/mkdirp/index.js b/node_modules/tar/node_modules/mkdirp/index.js
new file mode 100644
index 000000000..ad7a16c9f
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/index.js
@@ -0,0 +1,31 @@
+const optsArg = require('./lib/opts-arg.js')
+const pathArg = require('./lib/path-arg.js')
+
+const {mkdirpNative, mkdirpNativeSync} = require('./lib/mkdirp-native.js')
+const {mkdirpManual, mkdirpManualSync} = require('./lib/mkdirp-manual.js')
+const {useNative, useNativeSync} = require('./lib/use-native.js')
+
+
+const mkdirp = (path, opts) => {
+ path = pathArg(path)
+ opts = optsArg(opts)
+ return useNative(opts)
+ ? mkdirpNative(path, opts)
+ : mkdirpManual(path, opts)
+}
+
+const mkdirpSync = (path, opts) => {
+ path = pathArg(path)
+ opts = optsArg(opts)
+ return useNativeSync(opts)
+ ? mkdirpNativeSync(path, opts)
+ : mkdirpManualSync(path, opts)
+}
+
+mkdirp.sync = mkdirpSync
+mkdirp.native = (path, opts) => mkdirpNative(pathArg(path), optsArg(opts))
+mkdirp.manual = (path, opts) => mkdirpManual(pathArg(path), optsArg(opts))
+mkdirp.nativeSync = (path, opts) => mkdirpNativeSync(pathArg(path), optsArg(opts))
+mkdirp.manualSync = (path, opts) => mkdirpManualSync(pathArg(path), optsArg(opts))
+
+module.exports = mkdirp
diff --git a/node_modules/tar/node_modules/mkdirp/lib/find-made.js b/node_modules/tar/node_modules/mkdirp/lib/find-made.js
new file mode 100644
index 000000000..022e492c0
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/find-made.js
@@ -0,0 +1,29 @@
+const {dirname} = require('path')
+
+const findMade = (opts, parent, path = undefined) => {
+ // we never want the 'made' return value to be a root directory
+ if (path === parent)
+ return Promise.resolve()
+
+ return opts.statAsync(parent).then(
+ st => st.isDirectory() ? path : undefined, // will fail later
+ er => er.code === 'ENOENT'
+ ? findMade(opts, dirname(parent), parent)
+ : undefined
+ )
+}
+
+const findMadeSync = (opts, parent, path = undefined) => {
+ if (path === parent)
+ return undefined
+
+ try {
+ return opts.statSync(parent).isDirectory() ? path : undefined
+ } catch (er) {
+ return er.code === 'ENOENT'
+ ? findMadeSync(opts, dirname(parent), parent)
+ : undefined
+ }
+}
+
+module.exports = {findMade, findMadeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
new file mode 100644
index 000000000..2eb18cd64
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
@@ -0,0 +1,64 @@
+const {dirname} = require('path')
+
+const mkdirpManual = (path, opts, made) => {
+ opts.recursive = false
+ const parent = dirname(path)
+ if (parent === path) {
+ return opts.mkdirAsync(path, opts).catch(er => {
+ // swallowed by recursive implementation on posix systems
+ // any other error is a failure
+ if (er.code !== 'EISDIR')
+ throw er
+ })
+ }
+
+ return opts.mkdirAsync(path, opts).then(() => made || path, er => {
+ if (er.code === 'ENOENT')
+ return mkdirpManual(parent, opts)
+ .then(made => mkdirpManual(path, opts, made))
+ if (er.code !== 'EEXIST' && er.code !== 'EROFS')
+ throw er
+ return opts.statAsync(path).then(st => {
+ if (st.isDirectory())
+ return made
+ else
+ throw er
+ }, () => { throw er })
+ })
+}
+
+const mkdirpManualSync = (path, opts, made) => {
+ const parent = dirname(path)
+ opts.recursive = false
+
+ if (parent === path) {
+ try {
+ return opts.mkdirSync(path, opts)
+ } catch (er) {
+ // swallowed by recursive implementation on posix systems
+ // any other error is a failure
+ if (er.code !== 'EISDIR')
+ throw er
+ else
+ return
+ }
+ }
+
+ try {
+ opts.mkdirSync(path, opts)
+ return made || path
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))
+ if (er.code !== 'EEXIST' && er.code !== 'EROFS')
+ throw er
+ try {
+ if (!opts.statSync(path).isDirectory())
+ throw er
+ } catch (_) {
+ throw er
+ }
+ }
+}
+
+module.exports = {mkdirpManual, mkdirpManualSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
new file mode 100644
index 000000000..c7a6b6980
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
@@ -0,0 +1,39 @@
+const {dirname} = require('path')
+const {findMade, findMadeSync} = require('./find-made.js')
+const {mkdirpManual, mkdirpManualSync} = require('./mkdirp-manual.js')
+
+const mkdirpNative = (path, opts) => {
+ opts.recursive = true
+ const parent = dirname(path)
+ if (parent === path)
+ return opts.mkdirAsync(path, opts)
+
+ return findMade(opts, path).then(made =>
+ opts.mkdirAsync(path, opts).then(() => made)
+ .catch(er => {
+ if (er.code === 'ENOENT')
+ return mkdirpManual(path, opts)
+ else
+ throw er
+ }))
+}
+
+const mkdirpNativeSync = (path, opts) => {
+ opts.recursive = true
+ const parent = dirname(path)
+ if (parent === path)
+ return opts.mkdirSync(path, opts)
+
+ const made = findMadeSync(opts, path)
+ try {
+ opts.mkdirSync(path, opts)
+ return made
+ } catch (er) {
+ if (er.code === 'ENOENT')
+ return mkdirpManualSync(path, opts)
+ else
+ throw er
+ }
+}
+
+module.exports = {mkdirpNative, mkdirpNativeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js b/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
new file mode 100644
index 000000000..488bd44c3
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
@@ -0,0 +1,23 @@
+const { promisify } = require('util')
+const fs = require('fs')
+const optsArg = opts => {
+ if (!opts)
+ opts = { mode: 0o777 & (~process.umask()), fs }
+ else if (typeof opts === 'object')
+ opts = { mode: 0o777 & (~process.umask()), fs, ...opts }
+ else if (typeof opts === 'number')
+ opts = { mode: opts, fs }
+ else if (typeof opts === 'string')
+ opts = { mode: parseInt(opts, 8), fs }
+ else
+ throw new TypeError('invalid options argument')
+
+ opts.mkdir = opts.mkdir || opts.fs.mkdir || fs.mkdir
+ opts.mkdirAsync = promisify(opts.mkdir)
+ opts.stat = opts.stat || opts.fs.stat || fs.stat
+ opts.statAsync = promisify(opts.stat)
+ opts.statSync = opts.statSync || opts.fs.statSync || fs.statSync
+ opts.mkdirSync = opts.mkdirSync || opts.fs.mkdirSync || fs.mkdirSync
+ return opts
+}
+module.exports = optsArg
diff --git a/node_modules/tar/node_modules/mkdirp/lib/path-arg.js b/node_modules/tar/node_modules/mkdirp/lib/path-arg.js
new file mode 100644
index 000000000..cc07de5a6
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/path-arg.js
@@ -0,0 +1,29 @@
+const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform
+const { resolve, parse } = require('path')
+const pathArg = path => {
+ if (/\0/.test(path)) {
+ // simulate same failure that node raises
+ throw Object.assign(
+ new TypeError('path must be a string without null bytes'),
+ {
+ path,
+ code: 'ERR_INVALID_ARG_VALUE',
+ }
+ )
+ }
+
+ path = resolve(path)
+ if (platform === 'win32') {
+ const badWinChars = /[*|"<>?:]/
+ const {root} = parse(path)
+ if (badWinChars.test(path.substr(root.length))) {
+ throw Object.assign(new Error('Illegal characters in path.'), {
+ path,
+ code: 'EINVAL',
+ })
+ }
+ }
+
+ return path
+}
+module.exports = pathArg
diff --git a/node_modules/tar/node_modules/mkdirp/lib/use-native.js b/node_modules/tar/node_modules/mkdirp/lib/use-native.js
new file mode 100644
index 000000000..079361de1
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/lib/use-native.js
@@ -0,0 +1,10 @@
+const fs = require('fs')
+
+const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version
+const versArr = version.replace(/^v/, '').split('.')
+const hasNative = +versArr[0] > 10 || +versArr[0] === 10 && +versArr[1] >= 12
+
+const useNative = !hasNative ? () => false : opts => opts.mkdir === fs.mkdir
+const useNativeSync = !hasNative ? () => false : opts => opts.mkdirSync === fs.mkdirSync
+
+module.exports = {useNative, useNativeSync}
diff --git a/node_modules/tar/node_modules/mkdirp/package.json b/node_modules/tar/node_modules/mkdirp/package.json
new file mode 100644
index 000000000..f6c5a817f
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/package.json
@@ -0,0 +1,75 @@
+{
+ "_from": "mkdirp@^1.0.3",
+ "_id": "mkdirp@1.0.3",
+ "_inBundle": false,
+ "_integrity": "sha512-6uCP4Qc0sWsgMLy1EOqqS/3rjDHOEnsStVr/4vtAIK2Y5i2kA7lFFejYrpIyiN9w0pYf4ckeCYT9f1r1P9KX5g==",
+ "_location": "/tar/mkdirp",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "mkdirp@^1.0.3",
+ "name": "mkdirp",
+ "escapedName": "mkdirp",
+ "rawSpec": "^1.0.3",
+ "saveSpec": null,
+ "fetchSpec": "^1.0.3"
+ },
+ "_requiredBy": [
+ "/tar"
+ ],
+ "_resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.3.tgz",
+ "_shasum": "4cf2e30ad45959dddea53ad97d518b6c8205e1ea",
+ "_spec": "mkdirp@^1.0.3",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/tar",
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/node-mkdirp/issues"
+ },
+ "bundleDependencies": false,
+ "deprecated": false,
+ "description": "Recursively mkdir, like `mkdir -p`",
+ "devDependencies": {
+ "require-inject": "^1.4.4",
+ "tap": "^14.10.6"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "files": [
+ "bin",
+ "lib",
+ "index.js"
+ ],
+ "homepage": "https://github.com/isaacs/node-mkdirp#readme",
+ "keywords": [
+ "mkdir",
+ "directory",
+ "make dir",
+ "make",
+ "dir",
+ "recursive",
+ "native"
+ ],
+ "license": "MIT",
+ "main": "index.js",
+ "name": "mkdirp",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/node-mkdirp.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --follow-tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "snap": "tap",
+ "test": "tap"
+ },
+ "tap": {
+ "check-coverage": true,
+ "coverage-map": "map.js"
+ },
+ "version": "1.0.3"
+}
diff --git a/node_modules/tar/node_modules/mkdirp/readme.markdown b/node_modules/tar/node_modules/mkdirp/readme.markdown
new file mode 100644
index 000000000..827de5905
--- /dev/null
+++ b/node_modules/tar/node_modules/mkdirp/readme.markdown
@@ -0,0 +1,266 @@
+# mkdirp
+
+Like `mkdir -p`, but in Node.js!
+
+Now with a modern API and no\* bugs!
+
+<small>\* may contain some bugs</small>
+
+# example
+
+## pow.js
+
+```js
+const mkdirp = require('mkdirp')
+
+// return value is a Promise resolving to the first directory created
+mkdirp('/tmp/foo/bar/baz').then(made =>
+ console.log(`made directories, starting with ${made}`))
+```
+
+Output (where `/tmp/foo` already exists)
+
+```
+made directories, starting with /tmp/foo/bar
+```
+
+Or, if you don't have time to wait around for promises:
+
+```js
+const mkdirp = require('mkdirp')
+
+// return value is the first directory created
+const made = mkdirp.sync('/tmp/foo/bar/baz')
+console.log(`made directories, starting with ${made}`)
+```
+
+And now /tmp/foo/bar/baz exists, huzzah!
+
+# methods
+
+```js
+const mkdirp = require('mkdirp')
+```
+
+## mkdirp(dir, [opts]) -> Promise<String | undefined>
+
+Create a new directory and any necessary subdirectories at `dir` with octal
+permission string `opts.mode`. If `opts` is a string or number, it will be
+treated as the `opts.mode`.
+
+If `opts.mode` isn't specified, it defaults to `0o777 &
+(~process.umask())`.
+
+Promise resolves to first directory `made` that had to be created, or
+`undefined` if everything already exists. Promise rejects if any errors
+are encountered. Note that, in the case of promise rejection, some
+directories _may_ have been created, as recursive directory creation is not
+an atomic operation.
+
+You can optionally pass in an alternate `fs` implementation by passing in
+`opts.fs`. Your implementation should have `opts.fs.mkdir(path, opts, cb)`
+and `opts.fs.stat(path, cb)`.
+
+You can also override just one or the other of `mkdir` and `stat` by
+passing in `opts.stat` or `opts.mkdir`, or providing an `fs` option that
+only overrides one of these.
+
+## mkdirp.sync(dir, opts) -> String|null
+
+Synchronously create a new directory and any necessary subdirectories at
+`dir` with octal permission string `opts.mode`. If `opts` is a string or
+number, it will be treated as the `opts.mode`.
+
+If `opts.mode` isn't specified, it defaults to `0o777 &
+(~process.umask())`.
+
+Returns the first directory that had to be created, or undefined if
+everything already exists.
+
+You can optionally pass in an alternate `fs` implementation by passing in
+`opts.fs`. Your implementation should have `opts.fs.mkdirSync(path, mode)`
+and `opts.fs.statSync(path)`.
+
+You can also override just one or the other of `mkdirSync` and `statSync`
+by passing in `opts.statSync` or `opts.mkdirSync`, or providing an `fs`
+option that only overrides one of these.
+
+## mkdirp.manual, mkdirp.manualSync
+
+Use the manual implementation (not the native one). This is the default
+when the native implementation is not available or the stat/mkdir
+implementation is overridden.
+
+## mkdirp.native, mkdirp.nativeSync
+
+Use the native implementation (not the manual one). This is the default
+when the native implementation is available and stat/mkdir are not
+overridden.
+
+# implementation
+
+On Node.js v10.12.0 and above, use the native `fs.mkdir(p,
+{recursive:true})` option, unless `fs.mkdir`/`fs.mkdirSync` has been
+overridden by an option.
+
+## native implementation
+
+- If the path is a root directory, then pass it to the underlying
+ implementation and return the result/error. (In this case, it'll either
+ succeed or fail, but we aren't actually creating any dirs.)
+- Walk up the path statting each directory, to find the first path that
+ will be created, `made`.
+- Call `fs.mkdir(path, { recursive: true })` (or `fs.mkdirSync`)
+- If error, raise it to the caller.
+- Return `made`.
+
+## manual implementation
+
+- Call underlying `fs.mkdir` implementation, with `recursive: false`
+- If error:
+ - If path is a root directory, raise to the caller and do not handle it
+ - If ENOENT, mkdirp parent dir, store result as `made`
+ - stat(path)
+ - If error, raise original `mkdir` error
+ - If directory, return `made`
+ - Else, raise original `mkdir` error
+- else
+ - return `undefined` if a root dir, or `made` if set, or `path`
+
+## windows vs unix caveat
+
+On Windows file systems, attempts to create a root directory (ie, a drive
+letter or root UNC path) will fail. If the root directory exists, then it
+will fail with `EPERM`. If the root directory does not exist, then it will
+fail with `ENOENT`.
+
+On posix file systems, attempts to create a root directory (in recursive
+mode) will succeed silently, as it is treated like just another directory
+that already exists. (In non-recursive mode, of course, it fails with
+`EEXIST`.)
+
+In order to preserve this system-specific behavior (and because it's not as
+if we can create the parent of a root directory anyway), attempts to create
+a root directory are passed directly to the `fs` implementation, and any
+errors encountered are not handled.
+
+## native error caveat
+
+The native implementation (as of at least Node.js v13.4.0) does not provide
+appropriate errors in some cases (see
+[nodejs/node#31481](https://github.com/nodejs/node/issues/31481) and
+[nodejs/node#28015](https://github.com/nodejs/node/issues/28015)).
+
+In order to work around this issue, the native implementation will fall
+back to the manual implementation if an `ENOENT` error is encountered.
+
+# choosing a recursive mkdir implementation
+
+There are a few to choose from! Use the one that suits your needs best :D
+
+## use `fs.mkdir(path, {recursive: true}, cb)` if:
+
+- You wish to optimize performance even at the expense of other factors.
+- You don't need to know the first dir created.
+- You are ok with getting `ENOENT` as the error when some other problem is
+ the actual cause.
+- You can limit your platforms to Node.js v10.12 and above.
+- You're ok with using callbacks instead of promises.
+- You don't need/want a CLI.
+- You don't need to override the `fs` methods in use.
+
+## use this module (mkdirp 1.x) if:
+
+- You need to know the first directory that was created.
+- You wish to use the native implementation if available, but fall back
+ when it's not.
+- You prefer promise-returning APIs to callback-taking APIs.
+- You want more useful error messages than the native recursive mkdir
+ provides (at least as of Node.js v13.4), and are ok with re-trying on
+ `ENOENT` to achieve this.
+- You need (or at least, are ok with) a CLI.
+- You need to override the `fs` methods in use.
+
+## use [`make-dir`](http://npm.im/make-dir) if:
+
+- You do not need to know the first dir created (and wish to save a few
+ `stat` calls when using the native implementation for this reason).
+- You wish to use the native implementation if available, but fall back
+ when it's not.
+- You prefer promise-returning APIs to callback-taking APIs.
+- You are ok with occasionally getting `ENOENT` errors for failures that
+ are actually related to something other than a missing file system entry.
+- You don't need/want a CLI.
+- You need to override the `fs` methods in use.
+
+## use mkdirp 0.x if:
+
+- You need to know the first directory that was created.
+- You need (or at least, are ok with) a CLI.
+- You need to override the `fs` methods in use.
+- You're ok with using callbacks instead of promises.
+- You are not running on Windows, where the root-level ENOENT errors can
+ lead to infinite regress.
+- You think vinyl just sounds warmer and richer for some weird reason.
+- You are supporting truly ancient Node.js versions, before even the advent
+ of a `Promise` language primitive. (Please don't. You deserve better.)
+
+# cli
+
+This package also ships with a `mkdirp` command.
+
+```
+$ mkdirp -h
+
+usage: mkdirp [DIR1,DIR2..] {OPTIONS}
+
+ Create each supplied directory including any necessary parent directories
+ that don't yet exist.
+
+ If the directory already exists, do nothing.
+
+OPTIONS are:
+
+ -m<mode> If a directory needs to be created, set the mode as an octal
+ --mode=<mode> permission string.
+
+ -v --version Print the mkdirp version number
+
+ -h --help Print this helpful banner
+
+ -p --print Print the first directories created for each path provided
+
+ --manual Use manual implementation, even if native is available
+```
+
+# install
+
+With [npm](http://npmjs.org) do:
+
+```
+npm install mkdirp
+```
+
+to get the library locally, or
+
+```
+npm install -g mkdirp
+```
+
+to get the command everywhere, or
+
+```
+npx mkdirp ...
+```
+
+to run the command without installing it globally.
+
+# platform support
+
+This module works on node v8, but only v10 and above are officially
+supported, as Node v8 reached its LTS end of life 2020-01-01, which is in
+the past, as of this writing.
+
+# license
+
+MIT
diff --git a/node_modules/tar/node_modules/yallist/LICENSE b/node_modules/tar/node_modules/yallist/LICENSE
new file mode 100644
index 000000000..19129e315
--- /dev/null
+++ b/node_modules/tar/node_modules/yallist/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/node_modules/tar/node_modules/yallist/README.md b/node_modules/tar/node_modules/yallist/README.md
new file mode 100644
index 000000000..f58610186
--- /dev/null
+++ b/node_modules/tar/node_modules/yallist/README.md
@@ -0,0 +1,204 @@
+# yallist
+
+Yet Another Linked List
+
+There are many doubly-linked list implementations like it, but this
+one is mine.
+
+For when an array would be too big, and a Map can't be iterated in
+reverse order.
+
+
+[![Build Status](https://travis-ci.org/isaacs/yallist.svg?branch=master)](https://travis-ci.org/isaacs/yallist) [![Coverage Status](https://coveralls.io/repos/isaacs/yallist/badge.svg?service=github)](https://coveralls.io/github/isaacs/yallist)
+
+## basic usage
+
+```javascript
+var yallist = require('yallist')
+var myList = yallist.create([1, 2, 3])
+myList.push('foo')
+myList.unshift('bar')
+// of course pop() and shift() are there, too
+console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
+myList.forEach(function (k) {
+ // walk the list head to tail
+})
+myList.forEachReverse(function (k, index, list) {
+ // walk the list tail to head
+})
+var myDoubledList = myList.map(function (k) {
+ return k + k
+})
+// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
+// mapReverse is also a thing
+var myDoubledListReverse = myList.mapReverse(function (k) {
+ return k + k
+}) // ['foofoo', 6, 4, 2, 'barbar']
+
+var reduced = myList.reduce(function (set, entry) {
+ set += entry
+ return set
+}, 'start')
+console.log(reduced) // 'startfoo123bar'
+```
+
+## api
+
+The whole API is considered "public".
+
+Functions with the same name as an Array method work more or less the
+same way.
+
+There's reverse versions of most things because that's the point.
+
+### Yallist
+
+Default export, the class that holds and manages a list.
+
+Call it with either a forEach-able (like an array) or a set of
+arguments, to initialize the list.
+
+The Array-ish methods all act like you'd expect. No magic length,
+though, so if you change that it won't automatically prune or add
+empty spots.
+
+### Yallist.create(..)
+
+Alias for Yallist function. Some people like factories.
+
+#### yallist.head
+
+The first node in the list
+
+#### yallist.tail
+
+The last node in the list
+
+#### yallist.length
+
+The number of nodes in the list. (Change this at your peril. It is
+not magic like Array length.)
+
+#### yallist.toArray()
+
+Convert the list to an array.
+
+#### yallist.forEach(fn, [thisp])
+
+Call a function on each item in the list.
+
+#### yallist.forEachReverse(fn, [thisp])
+
+Call a function on each item in the list, in reverse order.
+
+#### yallist.get(n)
+
+Get the data at position `n` in the list. If you use this a lot,
+probably better off just using an Array.
+
+#### yallist.getReverse(n)
+
+Get the data at position `n`, counting from the tail.
+
+#### yallist.map(fn, thisp)
+
+Create a new Yallist with the result of calling the function on each
+item.
+
+#### yallist.mapReverse(fn, thisp)
+
+Same as `map`, but in reverse.
+
+#### yallist.pop()
+
+Get the data from the list tail, and remove the tail from the list.
+
+#### yallist.push(item, ...)
+
+Insert one or more items to the tail of the list.
+
+#### yallist.reduce(fn, initialValue)
+
+Like Array.reduce.
+
+#### yallist.reduceReverse
+
+Like Array.reduce, but in reverse.
+
+#### yallist.reverse
+
+Reverse the list in place.
+
+#### yallist.shift()
+
+Get the data from the list head, and remove the head from the list.
+
+#### yallist.slice([from], [to])
+
+Just like Array.slice, but returns a new Yallist.
+
+#### yallist.sliceReverse([from], [to])
+
+Just like yallist.slice, but the result is returned in reverse.
+
+#### yallist.toArray()
+
+Create an array representation of the list.
+
+#### yallist.toArrayReverse()
+
+Create a reversed array representation of the list.
+
+#### yallist.unshift(item, ...)
+
+Insert one or more items to the head of the list.
+
+#### yallist.unshiftNode(node)
+
+Move a Node object to the front of the list. (That is, pull it out of
+wherever it lives, and make it the new head.)
+
+If the node belongs to a different list, then that list will remove it
+first.
+
+#### yallist.pushNode(node)
+
+Move a Node object to the end of the list. (That is, pull it out of
+wherever it lives, and make it the new tail.)
+
+If the node belongs to a list already, then that list will remove it
+first.
+
+#### yallist.removeNode(node)
+
+Remove a node from the list, preserving referential integrity of head
+and tail and other nodes.
+
+Will throw an error if you try to have a list remove a node that
+doesn't belong to it.
+
+### Yallist.Node
+
+The class that holds the data and is actually the list.
+
+Call with `var n = new Node(value, previousNode, nextNode)`
+
+Note that if you do direct operations on Nodes themselves, it's very
+easy to get into weird states where the list is broken. Be careful :)
+
+#### node.next
+
+The next node in the list.
+
+#### node.prev
+
+The previous node in the list.
+
+#### node.value
+
+The data the node contains.
+
+#### node.list
+
+The list to which this node belongs. (Null if it does not belong to
+any list.)
diff --git a/node_modules/tar/node_modules/yallist/iterator.js b/node_modules/tar/node_modules/yallist/iterator.js
new file mode 100644
index 000000000..d41c97a19
--- /dev/null
+++ b/node_modules/tar/node_modules/yallist/iterator.js
@@ -0,0 +1,8 @@
+'use strict'
+module.exports = function (Yallist) {
+ Yallist.prototype[Symbol.iterator] = function* () {
+ for (let walker = this.head; walker; walker = walker.next) {
+ yield walker.value
+ }
+ }
+}
diff --git a/node_modules/tar/node_modules/yallist/package.json b/node_modules/tar/node_modules/yallist/package.json
new file mode 100644
index 000000000..83e3c6b56
--- /dev/null
+++ b/node_modules/tar/node_modules/yallist/package.json
@@ -0,0 +1,63 @@
+{
+ "_from": "yallist@^4.0.0",
+ "_id": "yallist@4.0.0",
+ "_inBundle": false,
+ "_integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "_location": "/tar/yallist",
+ "_phantomChildren": {},
+ "_requested": {
+ "type": "range",
+ "registry": true,
+ "raw": "yallist@^4.0.0",
+ "name": "yallist",
+ "escapedName": "yallist",
+ "rawSpec": "^4.0.0",
+ "saveSpec": null,
+ "fetchSpec": "^4.0.0"
+ },
+ "_requiredBy": [
+ "/tar",
+ "/tar/minipass"
+ ],
+ "_resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "_shasum": "9bb92790d9c0effec63be73519e11a35019a3a72",
+ "_spec": "yallist@^4.0.0",
+ "_where": "/Users/claudiahdz/npm/cli/node_modules/tar",
+ "author": {
+ "name": "Isaac Z. Schlueter",
+ "email": "i@izs.me",
+ "url": "http://blog.izs.me/"
+ },
+ "bugs": {
+ "url": "https://github.com/isaacs/yallist/issues"
+ },
+ "bundleDependencies": false,
+ "dependencies": {},
+ "deprecated": false,
+ "description": "Yet Another Linked List",
+ "devDependencies": {
+ "tap": "^12.1.0"
+ },
+ "directories": {
+ "test": "test"
+ },
+ "files": [
+ "yallist.js",
+ "iterator.js"
+ ],
+ "homepage": "https://github.com/isaacs/yallist#readme",
+ "license": "ISC",
+ "main": "yallist.js",
+ "name": "yallist",
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/isaacs/yallist.git"
+ },
+ "scripts": {
+ "postpublish": "git push origin --all; git push origin --tags",
+ "postversion": "npm publish",
+ "preversion": "npm test",
+ "test": "tap test/*.js --100"
+ },
+ "version": "4.0.0"
+}
diff --git a/node_modules/tar/node_modules/yallist/yallist.js b/node_modules/tar/node_modules/yallist/yallist.js
new file mode 100644
index 000000000..4e83ab1c5
--- /dev/null
+++ b/node_modules/tar/node_modules/yallist/yallist.js
@@ -0,0 +1,426 @@
+'use strict'
+module.exports = Yallist
+
+Yallist.Node = Node
+Yallist.create = Yallist
+
+function Yallist (list) {
+ var self = this
+ if (!(self instanceof Yallist)) {
+ self = new Yallist()
+ }
+
+ self.tail = null
+ self.head = null
+ self.length = 0
+
+ if (list && typeof list.forEach === 'function') {
+ list.forEach(function (item) {
+ self.push(item)
+ })
+ } else if (arguments.length > 0) {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ self.push(arguments[i])
+ }
+ }
+
+ return self
+}
+
+Yallist.prototype.removeNode = function (node) {
+ if (node.list !== this) {
+ throw new Error('removing node which does not belong to this list')
+ }
+
+ var next = node.next
+ var prev = node.prev
+
+ if (next) {
+ next.prev = prev
+ }
+
+ if (prev) {
+ prev.next = next
+ }
+
+ if (node === this.head) {
+ this.head = next
+ }
+ if (node === this.tail) {
+ this.tail = prev
+ }
+
+ node.list.length--
+ node.next = null
+ node.prev = null
+ node.list = null
+
+ return next
+}
+
+Yallist.prototype.unshiftNode = function (node) {
+ if (node === this.head) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var head = this.head
+ node.list = this
+ node.next = head
+ if (head) {
+ head.prev = node
+ }
+
+ this.head = node
+ if (!this.tail) {
+ this.tail = node
+ }
+ this.length++
+}
+
+Yallist.prototype.pushNode = function (node) {
+ if (node === this.tail) {
+ return
+ }
+
+ if (node.list) {
+ node.list.removeNode(node)
+ }
+
+ var tail = this.tail
+ node.list = this
+ node.prev = tail
+ if (tail) {
+ tail.next = node
+ }
+
+ this.tail = node
+ if (!this.head) {
+ this.head = node
+ }
+ this.length++
+}
+
+Yallist.prototype.push = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ push(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.unshift = function () {
+ for (var i = 0, l = arguments.length; i < l; i++) {
+ unshift(this, arguments[i])
+ }
+ return this.length
+}
+
+Yallist.prototype.pop = function () {
+ if (!this.tail) {
+ return undefined
+ }
+
+ var res = this.tail.value
+ this.tail = this.tail.prev
+ if (this.tail) {
+ this.tail.next = null
+ } else {
+ this.head = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.shift = function () {
+ if (!this.head) {
+ return undefined
+ }
+
+ var res = this.head.value
+ this.head = this.head.next
+ if (this.head) {
+ this.head.prev = null
+ } else {
+ this.tail = null
+ }
+ this.length--
+ return res
+}
+
+Yallist.prototype.forEach = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.head, i = 0; walker !== null; i++) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.next
+ }
+}
+
+Yallist.prototype.forEachReverse = function (fn, thisp) {
+ thisp = thisp || this
+ for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
+ fn.call(thisp, walker.value, i, this)
+ walker = walker.prev
+ }
+}
+
+Yallist.prototype.get = function (n) {
+ for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.next
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.getReverse = function (n) {
+ for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
+ // abort out of the list early if we hit a cycle
+ walker = walker.prev
+ }
+ if (i === n && walker !== null) {
+ return walker.value
+ }
+}
+
+Yallist.prototype.map = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.head; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.next
+ }
+ return res
+}
+
+Yallist.prototype.mapReverse = function (fn, thisp) {
+ thisp = thisp || this
+ var res = new Yallist()
+ for (var walker = this.tail; walker !== null;) {
+ res.push(fn.call(thisp, walker.value, this))
+ walker = walker.prev
+ }
+ return res
+}
+
+Yallist.prototype.reduce = function (fn, initial) {
+ var acc
+ var walker = this.head
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.head) {
+ walker = this.head.next
+ acc = this.head.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = 0; walker !== null; i++) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.next
+ }
+
+ return acc
+}
+
+Yallist.prototype.reduceReverse = function (fn, initial) {
+ var acc
+ var walker = this.tail
+ if (arguments.length > 1) {
+ acc = initial
+ } else if (this.tail) {
+ walker = this.tail.prev
+ acc = this.tail.value
+ } else {
+ throw new TypeError('Reduce of empty list with no initial value')
+ }
+
+ for (var i = this.length - 1; walker !== null; i--) {
+ acc = fn(acc, walker.value, i)
+ walker = walker.prev
+ }
+
+ return acc
+}
+
+Yallist.prototype.toArray = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.head; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.next
+ }
+ return arr
+}
+
+Yallist.prototype.toArrayReverse = function () {
+ var arr = new Array(this.length)
+ for (var i = 0, walker = this.tail; walker !== null; i++) {
+ arr[i] = walker.value
+ walker = walker.prev
+ }
+ return arr
+}
+
+Yallist.prototype.slice = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
+ walker = walker.next
+ }
+ for (; walker !== null && i < to; i++, walker = walker.next) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.sliceReverse = function (from, to) {
+ to = to || this.length
+ if (to < 0) {
+ to += this.length
+ }
+ from = from || 0
+ if (from < 0) {
+ from += this.length
+ }
+ var ret = new Yallist()
+ if (to < from || to < 0) {
+ return ret
+ }
+ if (from < 0) {
+ from = 0
+ }
+ if (to > this.length) {
+ to = this.length
+ }
+ for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
+ walker = walker.prev
+ }
+ for (; walker !== null && i > from; i--, walker = walker.prev) {
+ ret.push(walker.value)
+ }
+ return ret
+}
+
+Yallist.prototype.splice = function (start, deleteCount, ...nodes) {
+ if (start > this.length) {
+ start = this.length - 1
+ }
+ if (start < 0) {
+ start = this.length + start;
+ }
+
+ for (var i = 0, walker = this.head; walker !== null && i < start; i++) {
+ walker = walker.next
+ }
+
+ var ret = []
+ for (var i = 0; walker && i < deleteCount; i++) {
+ ret.push(walker.value)
+ walker = this.removeNode(walker)
+ }
+ if (walker === null) {
+ walker = this.tail
+ }
+
+ if (walker !== this.head && walker !== this.tail) {
+ walker = walker.prev
+ }
+
+ for (var i = 0; i < nodes.length; i++) {
+ walker = insert(this, walker, nodes[i])
+ }
+ return ret;
+}
+
+Yallist.prototype.reverse = function () {
+ var head = this.head
+ var tail = this.tail
+ for (var walker = head; walker !== null; walker = walker.prev) {
+ var p = walker.prev
+ walker.prev = walker.next
+ walker.next = p
+ }
+ this.head = tail
+ this.tail = head
+ return this
+}
+
+function insert (self, node, value) {
+ var inserted = node === self.head ?
+ new Node(value, null, node, self) :
+ new Node(value, node, node.next, self)
+
+ if (inserted.next === null) {
+ self.tail = inserted
+ }
+ if (inserted.prev === null) {
+ self.head = inserted
+ }
+
+ self.length++
+
+ return inserted
+}
+
+function push (self, item) {
+ self.tail = new Node(item, self.tail, null, self)
+ if (!self.head) {
+ self.head = self.tail
+ }
+ self.length++
+}
+
+function unshift (self, item) {
+ self.head = new Node(item, null, self.head, self)
+ if (!self.tail) {
+ self.tail = self.head
+ }
+ self.length++
+}
+
+function Node (value, prev, next, list) {
+ if (!(this instanceof Node)) {
+ return new Node(value, prev, next, list)
+ }
+
+ this.list = list
+ this.value = value
+
+ if (prev) {
+ prev.next = this
+ this.prev = prev
+ } else {
+ this.prev = null
+ }
+
+ if (next) {
+ next.prev = this
+ this.next = next
+ } else {
+ this.next = null
+ }
+}
+
+try {
+ // add if support for Symbol.iterator is present
+ require('./iterator.js')(Yallist)
+} catch (er) {}
diff --git a/node_modules/tar/package.json b/node_modules/tar/package.json
index b12db0f3e..347a0799a 100644
--- a/node_modules/tar/package.json
+++ b/node_modules/tar/package.json
@@ -1,33 +1,28 @@
{
- "_from": "tar@4.4.13",
- "_id": "tar@4.4.13",
+ "_from": "tar@6.0.1",
+ "_id": "tar@6.0.1",
"_inBundle": false,
- "_integrity": "sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==",
+ "_integrity": "sha512-bKhKrrz2FJJj5s7wynxy/fyxpE0CmCjmOQ1KV4KkgXFWOgoIT/NbTMnB1n+LFNrNk0SSBVGGxcK5AGsyC+pW5Q==",
"_location": "/tar",
- "_phantomChildren": {
- "safe-buffer": "5.1.2",
- "yallist": "3.0.3"
- },
+ "_phantomChildren": {},
"_requested": {
"type": "version",
"registry": true,
- "raw": "tar@4.4.13",
+ "raw": "tar@6.0.1",
"name": "tar",
"escapedName": "tar",
- "rawSpec": "4.4.13",
+ "rawSpec": "6.0.1",
"saveSpec": null,
- "fetchSpec": "4.4.13"
+ "fetchSpec": "6.0.1"
},
"_requiredBy": [
"#USER",
- "/",
- "/node-gyp",
- "/pacote"
+ "/"
],
- "_resolved": "https://registry.npmjs.org/tar/-/tar-4.4.13.tgz",
- "_shasum": "43b364bc52888d555298637b10d60790254ab525",
- "_spec": "tar@4.4.13",
- "_where": "/Users/mperrotte/npminc/cli",
+ "_resolved": "https://registry.npmjs.org/tar/-/tar-6.0.1.tgz",
+ "_shasum": "7b3bd6c313cb6e0153770108f8d70ac298607efa",
+ "_spec": "tar@6.0.1",
+ "_where": "/Users/claudiahdz/npm/cli",
"author": {
"name": "Isaac Z. Schlueter",
"email": "i@izs.me",
@@ -38,32 +33,31 @@
},
"bundleDependencies": false,
"dependencies": {
- "chownr": "^1.1.1",
- "fs-minipass": "^1.2.5",
- "minipass": "^2.8.6",
- "minizlib": "^1.2.1",
- "mkdirp": "^0.5.0",
- "safe-buffer": "^5.1.2",
- "yallist": "^3.0.3"
+ "chownr": "^1.1.3",
+ "fs-minipass": "^2.0.0",
+ "minipass": "^3.0.0",
+ "minizlib": "^2.1.0",
+ "mkdirp": "^1.0.3",
+ "yallist": "^4.0.0"
},
"deprecated": false,
"description": "tar for node",
"devDependencies": {
"chmodr": "^1.2.0",
- "end-of-stream": "^1.4.1",
+ "end-of-stream": "^1.4.3",
"events-to-array": "^1.1.2",
"mutate-fs": "^2.1.1",
- "rimraf": "^2.6.3",
- "tap": "^14.6.5",
+ "rimraf": "^2.7.1",
+ "tap": "^14.9.2",
"tar-fs": "^1.16.3",
"tar-stream": "^1.6.2"
},
"engines": {
- "node": ">=4.5"
+ "node": ">= 10"
},
"files": [
"index.js",
- "lib/"
+ "lib/*.js"
],
"homepage": "https://github.com/npm/node-tar#readme",
"license": "ISC",
@@ -84,5 +78,5 @@
"coverage-map": "map.js",
"check-coverage": true
},
- "version": "4.4.13"
+ "version": "6.0.1"
}